Mailing List Archive

[PATCH 3 of 5] xenpaging: add need_populate and paged_no_mfn checks
# HG changeset patch
# User Olaf Hering <olaf@aepfle.de>
# Date 1323189148 -3600
# Node ID 7f2cfd9bd113c40a49cc7e036fd07eb706a22f15
# Parent 96d3292797d861592a7d2d3840f371ec719775a9
xenpaging: add need_populate and paged_no_mfn checks

There is currently a mix of p2mt checks for the various paging types.
Some mean the p2mt needs to be populated, others mean a gfn without mfn.

Add a new p2m_do_populate() helper which covers the p2m_ram_paged and
p2m_ram_paging_out types. If a gfn is not in these states anymore another
populate request for the pager is not needed. This avoids a call to
p2m_mem_paging_populate() which in turn reduces the pressure on the ring
buffer because no temporary slot needs to be claimed. As such, this helper is
an optimization.

Modify the existing p2m_is_paged() helper which now covers also
p2m_ram_paging_in_start in addition to the current p2m_ram_paged type. A gfn
in these two states is not backed by a mfn.

Signed-off-by: Olaf Hering <olaf@aepfle.de>

diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -67,7 +67,8 @@ static int hvmemul_do_io(
ram_mfn = get_gfn_unshare(curr->domain, ram_gfn, &p2mt);
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(curr->domain, ram_gfn);
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(curr->domain, ram_gfn);
put_gfn(curr->domain, ram_gfn);
return X86EMUL_RETRY;
}
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -363,7 +363,8 @@ static int hvm_set_ioreq_page(
}
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(d, gmfn);
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(d, gmfn);
put_gfn(d, gmfn);
return -ENOENT;
}
@@ -1298,7 +1299,7 @@ int hvm_hap_nested_page_fault(unsigned l

#ifdef __x86_64__
/* Check if the page has been paged out */
- if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
+ if ( p2m_do_populate(p2mt) )
p2m_mem_paging_populate(v->domain, gfn);

/* Mem sharing: unshare the page and try again */
@@ -1844,7 +1845,8 @@ static void *__hvm_map_guest_frame(unsig
}
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(d, gfn);
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(d, gfn);
put_gfn(d, gfn);
return NULL;
}
@@ -2320,7 +2322,8 @@ static enum hvm_copy_result __hvm_copy(

if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(curr->domain, gfn);
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(curr->domain, gfn);
put_gfn(curr->domain, gfn);
return HVMCOPY_gfn_paged_out;
}
@@ -3808,7 +3811,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
mfn_t mfn = get_gfn_unshare(d, pfn, &t);
if ( p2m_is_paging(t) )
{
- p2m_mem_paging_populate(d, pfn);
+ if ( p2m_do_populate(t) )
+ p2m_mem_paging_populate(d, pfn);
put_gfn(d, pfn);
rc = -EINVAL;
goto param_fail3;
@@ -3912,7 +3916,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
mfn = get_gfn_unshare(d, pfn, &t);
if ( p2m_is_paging(t) )
{
- p2m_mem_paging_populate(d, pfn);
+ if ( p2m_do_populate(t) )
+ p2m_mem_paging_populate(d, pfn);
put_gfn(d, pfn);
rc = -EINVAL;
goto param_fail4;
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3528,9 +3528,10 @@ int do_mmu_update(
if ( !p2m_is_valid(p2mt) )
mfn = INVALID_MFN;

- if ( p2m_is_paged(p2mt) )
+ if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(pg_owner, gmfn);
+ if ( p2m_is_paged(p2mt) )
+ p2m_mem_paging_populate(pg_owner, gmfn);
put_gfn(pt_owner, gmfn);
rc = -ENOENT;
break;
@@ -3560,21 +3561,15 @@ int do_mmu_update(

l1emfn = mfn_x(get_gfn(pg_owner, l1egfn, &l1e_p2mt));

- if ( p2m_is_paged(l1e_p2mt) )
+#ifdef __x86_64__
+ if ( p2m_is_paging(l1e_p2mt) )
{
- p2m_mem_paging_populate(pg_owner, l1e_get_pfn(l1e));
+ if ( p2m_is_paged(l1e_p2mt) )
+ p2m_mem_paging_populate(pg_owner, l1e_get_pfn(l1e));
put_gfn(pg_owner, l1egfn);
rc = -ENOENT;
break;
}
- else if ( p2m_ram_paging_in_start == l1e_p2mt &&
- !mfn_valid(l1emfn) )
- {
- put_gfn(pg_owner, l1egfn);
- rc = -ENOENT;
- break;
- }
-#ifdef __x86_64__
/* XXX: Ugly: pull all the checks into a separate function.
* Don't want to do it now, not to interfere with mem_paging
* patches */
@@ -3609,16 +3604,10 @@ int do_mmu_update(

l2emfn = mfn_x(get_gfn(pg_owner, l2egfn, &l2e_p2mt));

- if ( p2m_is_paged(l2e_p2mt) )
+ if ( p2m_is_paging(l2e_p2mt) )
{
- p2m_mem_paging_populate(pg_owner, l2egfn);
- put_gfn(pg_owner, l2egfn);
- rc = -ENOENT;
- break;
- }
- else if ( p2m_ram_paging_in_start == l2e_p2mt &&
- !mfn_valid(l2emfn) )
- {
+ if ( p2m_is_paged(l2e_p2mt) )
+ p2m_mem_paging_populate(pg_owner, l2egfn);
put_gfn(pg_owner, l2egfn);
rc = -ENOENT;
break;
@@ -3644,16 +3633,10 @@ int do_mmu_update(

l3emfn = mfn_x(get_gfn(pg_owner, l3egfn, &l3e_p2mt));

- if ( p2m_is_paged(l3e_p2mt) )
+ if ( p2m_is_paging(l3e_p2mt) )
{
- p2m_mem_paging_populate(pg_owner, l3egfn);
- put_gfn(pg_owner, l3egfn);
- rc = -ENOENT;
- break;
- }
- else if ( p2m_ram_paging_in_start == l3e_p2mt &&
- !mfn_valid(l3emfn) )
- {
+ if ( p2m_is_paged(l3e_p2mt) )
+ p2m_mem_paging_populate(pg_owner, l3egfn);
put_gfn(pg_owner, l3egfn);
rc = -ENOENT;
break;
@@ -3679,16 +3662,10 @@ int do_mmu_update(

l4emfn = mfn_x(get_gfn(pg_owner, l4egfn, &l4e_p2mt));

- if ( p2m_is_paged(l4e_p2mt) )
+ if ( p2m_is_paging(l4e_p2mt) )
{
- p2m_mem_paging_populate(pg_owner, l4egfn);
- put_gfn(pg_owner, l4egfn);
- rc = -ENOENT;
- break;
- }
- else if ( p2m_ram_paging_in_start == l4e_p2mt &&
- !mfn_valid(l4emfn) )
- {
+ if ( p2m_is_paged(l4e_p2mt) )
+ p2m_mem_paging_populate(pg_owner, l4egfn);
put_gfn(pg_owner, l4egfn);
rc = -ENOENT;
break;
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/mm/guest_walk.c
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -102,7 +102,8 @@ static inline void *map_domain_gfn(struc
if ( p2m_is_paging(*p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
- p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
+ if ( p2m_do_populate(*p2mt) )
+ p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
__put_gfn(p2m, gfn_x(gfn));
*rc = _PAGE_PAGED;
return NULL;
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/mm/hap/guest_walk.c
--- a/xen/arch/x86/mm/hap/guest_walk.c
+++ b/xen/arch/x86/mm/hap/guest_walk.c
@@ -64,7 +64,8 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA
if ( p2m_is_paging(p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
- p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);

pfec[0] = PFEC_page_paged;
__put_gfn(p2m, top_gfn);
@@ -101,7 +102,8 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA
if ( p2m_is_paging(p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
- p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));

pfec[0] = PFEC_page_paged;
__put_gfn(p2m, gfn_x(gfn));
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -375,8 +375,7 @@ ept_set_entry(struct p2m_domain *p2m, un
* Read-then-write is OK because we hold the p2m lock. */
old_entry = *ept_entry;

- if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) ||
- (p2mt == p2m_ram_paging_in_start) )
+ if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) )
{
/* Construct the new entry, and then write it once */
new_entry.emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat,
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -918,7 +918,7 @@ void p2m_mem_paging_populate(struct doma
p2m_lock(p2m);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
/* Allow only nominated or evicted pages to enter page-in path */
- if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged )
+ if ( p2m_do_populate(p2mt) )
{
/* Evict will fail now, tag this request for pager */
if ( p2mt == p2m_ram_paging_out )
@@ -935,7 +935,7 @@ void p2m_mem_paging_populate(struct doma
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
}
/* No need to inform pager if the gfn is not in the page-out path */
- else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
+ else if ( !p2m_do_populate(p2mt) )
{
/* gfn is already on its way back and vcpu is not paused */
mem_event_put_req_producers(&d->mem_event->paging);
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/common/grant_table.c
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -163,7 +163,8 @@ static int __get_paged_frame(unsigned lo
*frame = mfn_x(mfn);
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(rd, gfn);
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(rd, gfn);
put_gfn(rd, gfn);
rc = GNTST_eagain;
}
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -158,7 +158,11 @@ typedef enum {
| p2m_to_mask(p2m_ram_paging_in_start) \
| p2m_to_mask(p2m_ram_paging_in))

-#define P2M_PAGED_TYPES (p2m_to_mask(p2m_ram_paged))
+#define P2M_POPULATE_TYPES (p2m_to_mask(p2m_ram_paged) \
+ | p2m_to_mask(p2m_ram_paging_out) )
+
+#define P2M_PAGED_NO_MFN_TYPES (p2m_to_mask(p2m_ram_paged) \
+ | p2m_to_mask(p2m_ram_paging_in_start) )

/* Shared types */
/* XXX: Sharable types could include p2m_ram_ro too, but we would need to
@@ -184,7 +188,8 @@ typedef enum {
#define p2m_has_emt(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | p2m_to_mask(p2m_mmio_direct)))
#define p2m_is_pageable(_t) (p2m_to_mask(_t) & P2M_PAGEABLE_TYPES)
#define p2m_is_paging(_t) (p2m_to_mask(_t) & P2M_PAGING_TYPES)
-#define p2m_is_paged(_t) (p2m_to_mask(_t) & P2M_PAGED_TYPES)
+#define p2m_is_paged(_t) (p2m_to_mask(_t) & P2M_PAGED_NO_MFN_TYPES)
+#define p2m_do_populate(_t) (p2m_to_mask(_t) & P2M_POPULATE_TYPES)
#define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES)
#define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES)
#define p2m_is_broken(_t) (p2m_to_mask(_t) & P2M_BROKEN_TYPES)

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel