Mailing List Archive

query the page type of a hvm page from within a hvm guest
Now that kdump works with pv-on-hvm guests, one issue remains:
The balloon driver in the guest frees guest pages and marks them as
mmio. When the kernel crashes and the crash kernel attempts to read the
oldmem via /proc/vmcore a read from ballooned pages will generate 100%
load in dom0 because Xen asks qemu-dm for the page content. Since the
reads come in as 8byte requests each ballooned page is tried 512 times.

If the crash kernel had a way to ask the hypervisor wether a specific
guest gfn is ballooned and thus backed by ram, the load issue would not
happen. There seems to be no interface to query the type of a guest gfn
from within the hvm guest.

Any ideas how to implement that?
I see HVMOP_set_mem_type, but no HVMOP_get_mem_type.


Olaf

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: query the page type of a hvm page from within a hvm guest [ In reply to ]
Hi,

At 11:12 +0100 on 07 Apr (1302174722), Olaf Hering wrote:
> If the crash kernel had a way to ask the hypervisor wether a specific
> guest gfn is ballooned and thus backed by ram, the load issue would not
> happen. There seems to be no interface to query the type of a guest gfn
> from within the hvm guest.
>
> Any ideas how to implement that?
> I see HVMOP_set_mem_type, but no HVMOP_get_mem_type.

Feel free to add HVMOP_get_mem_type. I don't think any great harm can
come from allowing the guest to query its own memory status.

Tim.

--
Tim Deegan <Tim.Deegan@citrix.com>
Principal Software Engineer, Xen Platform Team
Citrix Systems UK Ltd. (Company #02937203, SL9 0BG)

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: query the page type of a hvm page from within a hvm guest [ In reply to ]
On Thu, Apr 07, Tim Deegan wrote:

> Hi,
>
> At 11:12 +0100 on 07 Apr (1302174722), Olaf Hering wrote:
> > If the crash kernel had a way to ask the hypervisor wether a specific
> > guest gfn is ballooned and thus backed by ram, the load issue would not
> > happen. There seems to be no interface to query the type of a guest gfn
> > from within the hvm guest.
> >
> > Any ideas how to implement that?
> > I see HVMOP_set_mem_type, but no HVMOP_get_mem_type.
>
> Feel free to add HVMOP_get_mem_type. I don't think any great harm can
> come from allowing the guest to query its own memory status.

This version works for me, tested with xen 4.0.1 and SLES11 SP1 kernel.
The actual kernel interface needs to be send to lkml.

Is there an u8 for padding required after mem_type in xen_hvm_get_mem_type?
Should HVMOP_get_mem_type just check for p2m_is_mmio() or return every
possible hvmmem_type_t value?

Olaf

---
unmodified_drivers/linux-2.6/platform-pci/platform-pci.c | 36 +++++++++++++++
xen/arch/ia64/vmx/vmx_hypercall.c | 1
xen/arch/x86/hvm/hvm.c | 27 +++++++++++
xen/include/public/hvm/hvm_op.h | 25 ++++++++--
4 files changed, 84 insertions(+), 5 deletions(-)

diff -r 2f08c89b767d unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
--- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c Wed Apr 20 17:13:08 2011 +0100
+++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c Mon May 02 11:10:13 2011 +0200
@@ -349,6 +349,39 @@ static int check_platform_magic(struct d
return -ENODEV;
}

+#ifdef HAVE_OLDMEM_PFN_IS_RAM
+static get_mem_type_supported;
+static unsigned long prev_pfn;
+static unsigned long prev_mem_type;
+
+static int xen_oldmem_pfn_is_ram(unsigned long pfn)
+{
+ struct xen_hvm_get_mem_type a;
+ int ret;
+
+ if (get_mem_type_supported)
+ return -ENXIO;
+
+ if (pfn == prev_pfn)
+ return prev_mem_type == HVMMEM_ram_rw;
+
+ a.domid = DOMID_SELF;
+ a.pfn = pfn;
+ a.mem_type = 0;
+ ret = HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a);
+ if (ret) {
+ get_mem_type_supported = ret;
+ ret = -ENXIO;
+ } else {
+ ret = a.mem_type == HVMMEM_ram_rw;
+ prev_pfn = pfn;
+ prev_mem_type = a.mem_type;
+ }
+
+ return ret;
+}
+#endif
+
static int __devinit platform_pci_init(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -417,6 +450,9 @@ static int __devinit platform_pci_init(s
if ((ret = xen_panic_handler_init()))
goto out;

+#ifdef HAVE_OLDMEM_PFN_IS_RAM
+ register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
+#endif
out:
if (ret) {
pci_release_region(pdev, 0);
diff -r 2f08c89b767d xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Wed Apr 20 17:13:08 2011 +0100
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Mon May 02 11:10:13 2011 +0200
@@ -217,6 +217,7 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
break;
}

+ case HVMOP_get_mem_type:
case HVMOP_set_mem_type:
case HVMOP_set_mem_access:
case HVMOP_get_mem_access:
diff -r 2f08c89b767d xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Apr 20 17:13:08 2011 +0100
+++ b/xen/arch/x86/hvm/hvm.c Mon May 02 11:10:13 2011 +0200
@@ -3676,6 +3676,33 @@ long do_hvm_op(unsigned long op, XEN_GUE
break;
}

+ case HVMOP_get_mem_type:
+ {
+ struct xen_hvm_get_mem_type a;
+ struct domain *d;
+ p2m_type_t t;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = rcu_lock_remote_target_domain_by_id(a.domid, &d);
+ if ( rc != 0 )
+ return rc;
+
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) )
+ {
+ gfn_to_mfn_unshare(p2m_get_hostp2m(d), a.pfn, &t, 0);
+ if ( p2m_is_mmio(t) )
+ a.mem_type = HVMMEM_mmio_dm;
+ else
+ a.mem_type = HVMMEM_ram_rw;
+ rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+ }
+ rcu_unlock_domain(d);
+ break;
+ }
+
case HVMOP_set_mem_type:
{
struct xen_hvm_set_mem_type a;
diff -r 2f08c89b767d xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h Wed Apr 20 17:13:08 2011 +0100
+++ b/xen/include/public/hvm/hvm_op.h Mon May 02 11:10:13 2011 +0200
@@ -76,6 +76,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_
/* Flushes all VCPU TLBs: @arg must be NULL. */
#define HVMOP_flush_tlbs 5

+typedef enum {
+ HVMMEM_ram_rw, /* Normal read/write guest RAM */
+ HVMMEM_ram_ro, /* Read-only; writes are discarded */
+ HVMMEM_mmio_dm, /* Reads and write go to the device model */
+} hvmmem_type_t;
+
/* Following tools-only interfaces may change in future. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)

@@ -109,11 +115,6 @@ typedef struct xen_hvm_modified_memory x
DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);

#define HVMOP_set_mem_type 8
-typedef enum {
- HVMMEM_ram_rw, /* Normal read/write guest RAM */
- HVMMEM_ram_ro, /* Read-only; writes are discarded */
- HVMMEM_mmio_dm, /* Reads and write go to the device model */
-} hvmmem_type_t;
/* Notify that a region of memory is to be treated in a specific way. */
struct xen_hvm_set_mem_type {
/* Domain to be updated. */
@@ -223,6 +224,20 @@ struct xen_hvm_inject_trap {
typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);

+#define HVMOP_get_mem_type 15
+/* Return hvmmem_type_t for the specified pfn. */
+struct xen_hvm_get_mem_type {
+ /* Domain to be updated. */
+ domid_t domid;
+ /* OUT variable. */
+ uint8_t mem_type;
+ /* IN variable. */
+ uint64_t pfn;
+};
+typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
+
+
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */

#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: query the page type of a hvm page from within a hvm guest [ In reply to ]
>>> On 02.05.11 at 11:17, Olaf Hering <olaf@aepfle.de> wrote:
> On Thu, Apr 07, Tim Deegan wrote:
>
>> Hi,
>>
>> At 11:12 +0100 on 07 Apr (1302174722), Olaf Hering wrote:
>> > If the crash kernel had a way to ask the hypervisor wether a specific
>> > guest gfn is ballooned and thus backed by ram, the load issue would not
>> > happen. There seems to be no interface to query the type of a guest gfn
>> > from within the hvm guest.
>> >
>> > Any ideas how to implement that?
>> > I see HVMOP_set_mem_type, but no HVMOP_get_mem_type.
>>
>> Feel free to add HVMOP_get_mem_type. I don't think any great harm can
>> come from allowing the guest to query its own memory status.
>
> This version works for me, tested with xen 4.0.1 and SLES11 SP1 kernel.
> The actual kernel interface needs to be send to lkml.
>
> Is there an u8 for padding required after mem_type in xen_hvm_get_mem_type?
> Should HVMOP_get_mem_type just check for p2m_is_mmio() or return every
> possible hvmmem_type_t value?

I'd say the latter, even if you don't use it at present.

> Olaf
>
> ---
> unmodified_drivers/linux-2.6/platform-pci/platform-pci.c | 36
> +++++++++++++++
> xen/arch/ia64/vmx/vmx_hypercall.c | 1
> xen/arch/x86/hvm/hvm.c | 27 +++++++++++
> xen/include/public/hvm/hvm_op.h | 25 ++++++++--
> 4 files changed, 84 insertions(+), 5 deletions(-)
>
> diff -r 2f08c89b767d
> unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
> --- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c Wed Apr 20
> 17:13:08 2011 +0100
> +++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c Mon May 02
> 11:10:13 2011 +0200
> @@ -349,6 +349,39 @@ static int check_platform_magic(struct d
> return -ENODEV;
> }
>
> +#ifdef HAVE_OLDMEM_PFN_IS_RAM
> +static get_mem_type_supported;
> +static unsigned long prev_pfn;
> +static unsigned long prev_mem_type;
> +
> +static int xen_oldmem_pfn_is_ram(unsigned long pfn)
> +{
> + struct xen_hvm_get_mem_type a;
> + int ret;
> +
> + if (get_mem_type_supported)

The name of the variable seems badly chosen, and the way you
coded this the code wouldn't be able to use the new interface
after migrating from an incapable hypervisor to a capable one.

> + return -ENXIO;
> +
> + if (pfn == prev_pfn)
> + return prev_mem_type == HVMMEM_ram_rw;

Did you in fact observe many immediately subsequent calls with
the same input (i.e. is the caching really worthwhile)?

> +
> + a.domid = DOMID_SELF;
> + a.pfn = pfn;
> + a.mem_type = 0;
> + ret = HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a);
> + if (ret) {
> + get_mem_type_supported = ret;
> + ret = -ENXIO;
> + } else {
> + ret = a.mem_type == HVMMEM_ram_rw;
> + prev_pfn = pfn;
> + prev_mem_type = a.mem_type;
> + }
> +
> + return ret;
> +}
> +#endif
> +
> static int __devinit platform_pci_init(struct pci_dev *pdev,
> const struct pci_device_id *ent)
> {
> @@ -417,6 +450,9 @@ static int __devinit platform_pci_init(s
> if ((ret = xen_panic_handler_init()))
> goto out;
>
> +#ifdef HAVE_OLDMEM_PFN_IS_RAM
> + register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
> +#endif
> out:
> if (ret) {
> pci_release_region(pdev, 0);
> diff -r 2f08c89b767d xen/arch/ia64/vmx/vmx_hypercall.c
> --- a/xen/arch/ia64/vmx/vmx_hypercall.c Wed Apr 20 17:13:08 2011 +0100
> +++ b/xen/arch/ia64/vmx/vmx_hypercall.c Mon May 02 11:10:13 2011 +0200
> @@ -217,6 +217,7 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
> break;
> }
>
> + case HVMOP_get_mem_type:
> case HVMOP_set_mem_type:
> case HVMOP_set_mem_access:
> case HVMOP_get_mem_access:
> diff -r 2f08c89b767d xen/arch/x86/hvm/hvm.c
> --- a/xen/arch/x86/hvm/hvm.c Wed Apr 20 17:13:08 2011 +0100
> +++ b/xen/arch/x86/hvm/hvm.c Mon May 02 11:10:13 2011 +0200
> @@ -3676,6 +3676,33 @@ long do_hvm_op(unsigned long op, XEN_GUE
> break;
> }
>
> + case HVMOP_get_mem_type:
> + {
> + struct xen_hvm_get_mem_type a;
> + struct domain *d;
> + p2m_type_t t;
> +
> + if ( copy_from_guest(&a, arg, 1) )
> + return -EFAULT;
> +
> + rc = rcu_lock_remote_target_domain_by_id(a.domid, &d);
> + if ( rc != 0 )
> + return rc;
> +
> + rc = -EINVAL;
> + if ( is_hvm_domain(d) )
> + {
> + gfn_to_mfn_unshare(p2m_get_hostp2m(d), a.pfn, &t, 0);
> + if ( p2m_is_mmio(t) )
> + a.mem_type = HVMMEM_mmio_dm;
> + else
> + a.mem_type = HVMMEM_ram_rw;
> + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
> + }
> + rcu_unlock_domain(d);
> + break;
> + }
> +
> case HVMOP_set_mem_type:
> {
> struct xen_hvm_set_mem_type a;
> diff -r 2f08c89b767d xen/include/public/hvm/hvm_op.h
> --- a/xen/include/public/hvm/hvm_op.h Wed Apr 20 17:13:08 2011 +0100
> +++ b/xen/include/public/hvm/hvm_op.h Mon May 02 11:10:13 2011 +0200
> @@ -76,6 +76,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_
> /* Flushes all VCPU TLBs: @arg must be NULL. */
> #define HVMOP_flush_tlbs 5
>
> +typedef enum {
> + HVMMEM_ram_rw, /* Normal read/write guest RAM */
> + HVMMEM_ram_ro, /* Read-only; writes are discarded */
> + HVMMEM_mmio_dm, /* Reads and write go to the device model */
> +} hvmmem_type_t;
> +
> /* Following tools-only interfaces may change in future. */
> #if defined(__XEN__) || defined(__XEN_TOOLS__)
>
> @@ -109,11 +115,6 @@ typedef struct xen_hvm_modified_memory x
> DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
>
> #define HVMOP_set_mem_type 8
> -typedef enum {
> - HVMMEM_ram_rw, /* Normal read/write guest RAM */
> - HVMMEM_ram_ro, /* Read-only; writes are discarded */
> - HVMMEM_mmio_dm, /* Reads and write go to the device model */
> -} hvmmem_type_t;
> /* Notify that a region of memory is to be treated in a specific way. */
> struct xen_hvm_set_mem_type {
> /* Domain to be updated. */
> @@ -223,6 +224,20 @@ struct xen_hvm_inject_trap {
> typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
> DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
>
> +#define HVMOP_get_mem_type 15
> +/* Return hvmmem_type_t for the specified pfn. */
> +struct xen_hvm_get_mem_type {
> + /* Domain to be updated. */

... queried ...

Jan

> + domid_t domid;
> + /* OUT variable. */
> + uint8_t mem_type;
> + /* IN variable. */
> + uint64_t pfn;
> +};
> +typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
> +
> +
> #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
>
> #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: query the page type of a hvm page from within a hvm guest [ In reply to ]
On Mon, May 02, Jan Beulich wrote:

> > + if (get_mem_type_supported)
>
> The name of the variable seems badly chosen, and the way you
> coded this the code wouldn't be able to use the new interface
> after migrating from an incapable hypervisor to a capable one.

For the plain kdump usage thats probably ok. I think ballooned out
oldmem pages will turn into ram pages during migration, but I'm not sure.

> > + return -ENXIO;
> > +
> > + if (pfn == prev_pfn)
> > + return prev_mem_type == HVMMEM_ram_rw;
>
> Did you in fact observe many immediately subsequent calls with
> the same input (i.e. is the caching really worthwhile)?

It depends on how expensive a hypercall is.
read_from_oldmem() did read full pages in my testing.
I think removing the caching and do the hypercall unconditional is ok.

Olaf


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: query the page type of a hvm page from within a hvm guest [ In reply to ]
On Mon, May 02, Jan Beulich wrote:

> >>> On 02.05.11 at 11:17, Olaf Hering <olaf@aepfle.de> wrote:
> > On Thu, Apr 07, Tim Deegan wrote:
> >
> >> Hi,
> >>
> >> At 11:12 +0100 on 07 Apr (1302174722), Olaf Hering wrote:
> >> > If the crash kernel had a way to ask the hypervisor wether a specific
> >> > guest gfn is ballooned and thus backed by ram, the load issue would not
> >> > happen. There seems to be no interface to query the type of a guest gfn
> >> > from within the hvm guest.
> >> >
> >> > Any ideas how to implement that?
> >> > I see HVMOP_set_mem_type, but no HVMOP_get_mem_type.
> >>
> >> Feel free to add HVMOP_get_mem_type. I don't think any great harm can
> >> come from allowing the guest to query its own memory status.
> >
> > This version works for me, tested with xen 4.0.1 and SLES11 SP1 kernel.
> > The actual kernel interface needs to be send to lkml.
> >
> > Is there an u8 for padding required after mem_type in xen_hvm_get_mem_type?
> > Should HVMOP_get_mem_type just check for p2m_is_mmio() or return every
> > possible hvmmem_type_t value?
>
> I'd say the latter, even if you don't use it at present.

So what about this version then?



---
unmodified_drivers/linux-2.6/platform-pci/platform-pci.c | 36 +++++++++++++++
xen/arch/ia64/vmx/vmx_hypercall.c | 1
xen/arch/x86/hvm/hvm.c | 27 +++++++++++
xen/include/public/hvm/hvm_op.h | 25 ++++++++--
4 files changed, 84 insertions(+), 5 deletions(-)

diff -r 2f08c89b767d unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
--- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c Wed Apr 20 17:13:08 2011 +0100
+++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c Mon May 02 17:42:57 2011 +0200
@@ -349,6 +349,32 @@ static int check_platform_magic(struct d
return -ENODEV;
}

+#ifdef HAVE_OLDMEM_PFN_IS_RAM
+static int xen_oldmem_pfn_is_ram(unsigned long pfn)
+{
+ struct xen_hvm_get_mem_type a;
+ int ret;
+
+ a.domid = DOMID_SELF;
+ a.pfn = pfn;
+ if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
+ return -ENXIO;
+
+ switch (a.mem_type) {
+ case HVMMEM_mmio_dm:
+ ret = 0;
+ break;
+ case HVMMEM_ram_rw:
+ case HVMMEM_ram_ro:
+ default:
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
+#endif
+
static int __devinit platform_pci_init(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -417,6 +443,9 @@ static int __devinit platform_pci_init(s
if ((ret = xen_panic_handler_init()))
goto out;

+#ifdef HAVE_OLDMEM_PFN_IS_RAM
+ register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
+#endif
out:
if (ret) {
pci_release_region(pdev, 0);
diff -r 2f08c89b767d xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Wed Apr 20 17:13:08 2011 +0100
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Mon May 02 17:42:57 2011 +0200
@@ -217,6 +217,7 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
break;
}

+ case HVMOP_get_mem_type:
case HVMOP_set_mem_type:
case HVMOP_set_mem_access:
case HVMOP_get_mem_access:
diff -r 2f08c89b767d xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Apr 20 17:13:08 2011 +0100
+++ b/xen/arch/x86/hvm/hvm.c Mon May 02 17:42:57 2011 +0200
@@ -3676,6 +3676,37 @@ long do_hvm_op(unsigned long op, XEN_GUE
break;
}

+ case HVMOP_get_mem_type:
+ {
+ struct xen_hvm_get_mem_type a;
+ struct domain *d;
+ p2m_type_t t;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = rcu_lock_remote_target_domain_by_id(a.domid, &d);
+ if ( rc != 0 )
+ return rc;
+
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) )
+ {
+ gfn_to_mfn_unshare(p2m_get_hostp2m(d), a.pfn, &t, 0);
+ if ( p2m_is_mmio(t) )
+ a.mem_type = HVMMEM_mmio_dm;
+ else if ( p2m_is_readonly(t) )
+ a.mem_type = HVMMEM_ram_ro;
+ else if ( p2m_is_ram(t) )
+ a.mem_type = HVMMEM_ram_rw;
+ else
+ a.mem_type = HVMMEM_mmio_dm;
+ rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+ }
+ rcu_unlock_domain(d);
+ break;
+ }
+
case HVMOP_set_mem_type:
{
struct xen_hvm_set_mem_type a;
diff -r 2f08c89b767d xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h Wed Apr 20 17:13:08 2011 +0100
+++ b/xen/include/public/hvm/hvm_op.h Mon May 02 17:42:57 2011 +0200
@@ -76,6 +76,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_
/* Flushes all VCPU TLBs: @arg must be NULL. */
#define HVMOP_flush_tlbs 5

+typedef enum {
+ HVMMEM_ram_rw, /* Normal read/write guest RAM */
+ HVMMEM_ram_ro, /* Read-only; writes are discarded */
+ HVMMEM_mmio_dm, /* Reads and write go to the device model */
+} hvmmem_type_t;
+
/* Following tools-only interfaces may change in future. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)

@@ -109,11 +115,6 @@ typedef struct xen_hvm_modified_memory x
DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);

#define HVMOP_set_mem_type 8
-typedef enum {
- HVMMEM_ram_rw, /* Normal read/write guest RAM */
- HVMMEM_ram_ro, /* Read-only; writes are discarded */
- HVMMEM_mmio_dm, /* Reads and write go to the device model */
-} hvmmem_type_t;
/* Notify that a region of memory is to be treated in a specific way. */
struct xen_hvm_set_mem_type {
/* Domain to be updated. */
@@ -223,6 +224,20 @@ struct xen_hvm_inject_trap {
typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);

+#define HVMOP_get_mem_type 15
+/* Return hvmmem_type_t for the specified pfn. */
+struct xen_hvm_get_mem_type {
+ /* Domain to be queried. */
+ domid_t domid;
+ /* OUT variable. */
+ uint8_t mem_type;
+ /* IN variable. */
+ uint64_t pfn;
+};
+typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
+
+
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */

#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: query the page type of a hvm page from within a hvm guest [ In reply to ]
>>> On 02.05.11 at 18:27, Olaf Hering <olaf@aepfle.de> wrote:
> So what about this version then?

Looks good to me (you may want to replace a few tabs with spaces,
though in the hypervisor portion of the patch).

Jan

> ---
> unmodified_drivers/linux-2.6/platform-pci/platform-pci.c | 36
> +++++++++++++++
> xen/arch/ia64/vmx/vmx_hypercall.c | 1
> xen/arch/x86/hvm/hvm.c | 27 +++++++++++
> xen/include/public/hvm/hvm_op.h | 25 ++++++++--
> 4 files changed, 84 insertions(+), 5 deletions(-)
>
> diff -r 2f08c89b767d
> unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
> --- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c Wed Apr 20
> 17:13:08 2011 +0100
> +++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c Mon May 02
> 17:42:57 2011 +0200
> @@ -349,6 +349,32 @@ static int check_platform_magic(struct d
> return -ENODEV;
> }
>
> +#ifdef HAVE_OLDMEM_PFN_IS_RAM
> +static int xen_oldmem_pfn_is_ram(unsigned long pfn)
> +{
> + struct xen_hvm_get_mem_type a;
> + int ret;
> +
> + a.domid = DOMID_SELF;
> + a.pfn = pfn;
> + if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
> + return -ENXIO;
> +
> + switch (a.mem_type) {
> + case HVMMEM_mmio_dm:
> + ret = 0;
> + break;
> + case HVMMEM_ram_rw:
> + case HVMMEM_ram_ro:
> + default:
> + ret = 1;
> + break;
> + }
> +
> + return ret;
> +}
> +#endif
> +
> static int __devinit platform_pci_init(struct pci_dev *pdev,
> const struct pci_device_id *ent)
> {
> @@ -417,6 +443,9 @@ static int __devinit platform_pci_init(s
> if ((ret = xen_panic_handler_init()))
> goto out;
>
> +#ifdef HAVE_OLDMEM_PFN_IS_RAM
> + register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
> +#endif
> out:
> if (ret) {
> pci_release_region(pdev, 0);
> diff -r 2f08c89b767d xen/arch/ia64/vmx/vmx_hypercall.c
> --- a/xen/arch/ia64/vmx/vmx_hypercall.c Wed Apr 20 17:13:08 2011 +0100
> +++ b/xen/arch/ia64/vmx/vmx_hypercall.c Mon May 02 17:42:57 2011 +0200
> @@ -217,6 +217,7 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
> break;
> }
>
> + case HVMOP_get_mem_type:
> case HVMOP_set_mem_type:
> case HVMOP_set_mem_access:
> case HVMOP_get_mem_access:
> diff -r 2f08c89b767d xen/arch/x86/hvm/hvm.c
> --- a/xen/arch/x86/hvm/hvm.c Wed Apr 20 17:13:08 2011 +0100
> +++ b/xen/arch/x86/hvm/hvm.c Mon May 02 17:42:57 2011 +0200
> @@ -3676,6 +3676,37 @@ long do_hvm_op(unsigned long op, XEN_GUE
> break;
> }
>
> + case HVMOP_get_mem_type:
> + {
> + struct xen_hvm_get_mem_type a;
> + struct domain *d;
> + p2m_type_t t;
> +
> + if ( copy_from_guest(&a, arg, 1) )
> + return -EFAULT;
> +
> + rc = rcu_lock_remote_target_domain_by_id(a.domid, &d);
> + if ( rc != 0 )
> + return rc;
> +
> + rc = -EINVAL;
> + if ( is_hvm_domain(d) )
> + {
> + gfn_to_mfn_unshare(p2m_get_hostp2m(d), a.pfn, &t, 0);
> + if ( p2m_is_mmio(t) )
> + a.mem_type = HVMMEM_mmio_dm;
> + else if ( p2m_is_readonly(t) )
> + a.mem_type = HVMMEM_ram_ro;
> + else if ( p2m_is_ram(t) )
> + a.mem_type = HVMMEM_ram_rw;
> + else
> + a.mem_type = HVMMEM_mmio_dm;
> + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
> + }
> + rcu_unlock_domain(d);
> + break;
> + }
> +
> case HVMOP_set_mem_type:
> {
> struct xen_hvm_set_mem_type a;
> diff -r 2f08c89b767d xen/include/public/hvm/hvm_op.h
> --- a/xen/include/public/hvm/hvm_op.h Wed Apr 20 17:13:08 2011 +0100
> +++ b/xen/include/public/hvm/hvm_op.h Mon May 02 17:42:57 2011 +0200
> @@ -76,6 +76,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_
> /* Flushes all VCPU TLBs: @arg must be NULL. */
> #define HVMOP_flush_tlbs 5
>
> +typedef enum {
> + HVMMEM_ram_rw, /* Normal read/write guest RAM */
> + HVMMEM_ram_ro, /* Read-only; writes are discarded */
> + HVMMEM_mmio_dm, /* Reads and write go to the device model */
> +} hvmmem_type_t;
> +
> /* Following tools-only interfaces may change in future. */
> #if defined(__XEN__) || defined(__XEN_TOOLS__)
>
> @@ -109,11 +115,6 @@ typedef struct xen_hvm_modified_memory x
> DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
>
> #define HVMOP_set_mem_type 8
> -typedef enum {
> - HVMMEM_ram_rw, /* Normal read/write guest RAM */
> - HVMMEM_ram_ro, /* Read-only; writes are discarded */
> - HVMMEM_mmio_dm, /* Reads and write go to the device model */
> -} hvmmem_type_t;
> /* Notify that a region of memory is to be treated in a specific way. */
> struct xen_hvm_set_mem_type {
> /* Domain to be updated. */
> @@ -223,6 +224,20 @@ struct xen_hvm_inject_trap {
> typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
> DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
>
> +#define HVMOP_get_mem_type 15
> +/* Return hvmmem_type_t for the specified pfn. */
> +struct xen_hvm_get_mem_type {
> + /* Domain to be queried. */
> + domid_t domid;
> + /* OUT variable. */
> + uint8_t mem_type;
> + /* IN variable. */
> + uint64_t pfn;
> +};
> +typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
> +
> +
> #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
>
> #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel