Mailing List Archive

[PATCH 09 of 18] x86/mm: Check how many mfns are shared, in addition to how many are saved
xen/arch/x86/mm.c | 6 ------
xen/arch/x86/mm/mem_sharing.c | 31 ++++++++++++++++++++++++++++---
xen/arch/x86/x86_64/compat/mm.c | 6 ++++++
xen/arch/x86/x86_64/mm.c | 7 +++++++
xen/include/asm-x86/mem_sharing.h | 1 +
xen/include/public/memory.h | 1 +
6 files changed, 43 insertions(+), 9 deletions(-)


This patch also moves the existing sharing-related memory op to the
correct location, and adds logic to the audit() method that uses the
new information.

This patch only provides the Xen implementation of the domctls.

Signed-off-by: Adin Scannell <adin@scannell.ca>

diff -r 24d514cd4dee -r 65b32b391373 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -119,7 +119,6 @@
#include <xen/trace.h>
#include <asm/setup.h>
#include <asm/fixmap.h>
-#include <asm/mem_sharing.h>

/*
* Mapping of first 2 or 4 megabytes of memory. This is mapped with 4kB
@@ -5093,11 +5092,6 @@ long arch_memory_op(int op, XEN_GUEST_HA
return rc;
}

-#ifdef __x86_64__
- case XENMEM_get_sharing_freed_pages:
- return mem_sharing_get_nr_saved_mfns();
-#endif
-
default:
return subarch_memory_op(op, arg);
}
diff -r 24d514cd4dee -r 65b32b391373 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -75,6 +75,7 @@ static inline int mem_sharing_audit(void

static shr_handle_t next_handle = 1;
static atomic_t nr_saved_mfns = ATOMIC_INIT(0);
+static atomic_t nr_shared_mfns = ATOMIC_INIT(0);

typedef struct gfn_info
{
@@ -153,9 +154,12 @@ static struct page_info* mem_sharing_loo
static int mem_sharing_audit(void)
{
int errors = 0;
+ unsigned long count_expected;
+ unsigned long count_found = 0;
struct list_head *ae;

ASSERT(shr_locked_by_me());
+ count_expected = atomic_read(&nr_shared_mfns);

list_for_each(ae, &shr_audit_list)
{
@@ -194,6 +198,7 @@ static int mem_sharing_audit(void)
errors++;
}

+ count_found++;
/* Check if all GFNs map to the MFN, and the p2m types */
list_for_each(le, &pg->shared_info->gfns)
{
@@ -239,6 +244,13 @@ static int mem_sharing_audit(void)
}
}

+ if ( count_found != count_expected )
+ {
+ MEM_SHARING_DEBUG("Expected %ld shared mfns, found %ld.",
+ count_expected, count_found);
+ errors++;
+ }
+
return errors;
}
#endif
@@ -296,6 +308,11 @@ unsigned int mem_sharing_get_nr_saved_mf
return ((unsigned int)atomic_read(&nr_saved_mfns));
}

+unsigned int mem_sharing_get_nr_shared_mfns(void)
+{
+ return (unsigned int)atomic_read(&nr_shared_mfns);
+}
+
int mem_sharing_sharing_resume(struct domain *d)
{
mem_event_response_t rsp;
@@ -570,10 +587,11 @@ int mem_sharing_share_pages(struct domai
BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn) == 0);
if ( single_client_gfn )
{
- /* Only increase the per-domain count when we are actually
+ /* Only increase the stats counts when we are actually
* sharing. And don't increase it should we ever re-share */
atomic_inc(&d->shr_pages);
ASSERT( cd == d );
+ atomic_inc(&nr_shared_mfns);
}
put_domain(d);
}
@@ -654,10 +672,14 @@ gfn_found:
if ( flags & MEM_SHARING_DESTROY_GFN )
{
mem_sharing_gfn_destroy(gfn_info, !last_gfn);
- if ( !last_gfn )
+ if ( last_gfn )
+ {
+ atomic_dec(&nr_shared_mfns);
+ } else {
/* Even though we don't allocate a private page, we have to account
* for the MFN that originally backed this PFN. */
atomic_dec(&nr_saved_mfns);
+ }

if ( last_gfn )
{
@@ -707,8 +729,11 @@ gfn_found:
put_page_and_type(old_page);

private_page_found:
- if ( !last_gfn )
+ if ( last_gfn ) {
+ atomic_dec(&nr_shared_mfns);
+ } else {
atomic_dec(&nr_saved_mfns);
+ }

if ( p2m_change_type(d, gfn, p2m_ram_shared, p2m_ram_rw) !=
p2m_ram_shared )
diff -r 24d514cd4dee -r 65b32b391373 xen/arch/x86/x86_64/compat/mm.c
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -205,6 +205,12 @@ int compat_arch_memory_op(int op, XEN_GU
break;
}

+ case XENMEM_get_sharing_freed_pages:
+ return mem_sharing_get_nr_saved_mfns();
+
+ case XENMEM_get_sharing_shared_pages:
+ return mem_sharing_get_nr_shared_mfns();
+
default:
rc = -ENOSYS;
break;
diff -r 24d514cd4dee -r 65b32b391373 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -34,6 +34,7 @@
#include <asm/msr.h>
#include <asm/setup.h>
#include <asm/numa.h>
+#include <asm/mem_sharing.h>
#include <public/memory.h>

/* Parameters for PFN/MADDR compression. */
@@ -1090,6 +1091,12 @@ long subarch_memory_op(int op, XEN_GUEST

break;

+ case XENMEM_get_sharing_freed_pages:
+ return mem_sharing_get_nr_saved_mfns();
+
+ case XENMEM_get_sharing_shared_pages:
+ return mem_sharing_get_nr_shared_mfns();
+
default:
rc = -ENOSYS;
break;
diff -r 24d514cd4dee -r 65b32b391373 xen/include/asm-x86/mem_sharing.h
--- a/xen/include/asm-x86/mem_sharing.h
+++ b/xen/include/asm-x86/mem_sharing.h
@@ -40,6 +40,7 @@ struct page_sharing_info
(is_hvm_domain(_d) && paging_mode_hap(_d))

unsigned int mem_sharing_get_nr_saved_mfns(void);
+unsigned int mem_sharing_get_nr_shared_mfns(void);
int mem_sharing_nominate_page(struct domain *d,
unsigned long gfn,
int expected_refcnt,
diff -r 24d514cd4dee -r 65b32b391373 xen/include/public/memory.h
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -294,6 +294,7 @@ typedef struct xen_pod_target xen_pod_ta
* The call never fails.
*/
#define XENMEM_get_sharing_freed_pages 18
+#define XENMEM_get_sharing_shared_pages 19

#endif /* __XEN_PUBLIC_MEMORY_H__ */


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH 09 of 18] x86/mm: Check how many mfns are shared, in addition to how many are saved [ In reply to ]
At 02:47 -0500 on 08 Dec (1323312444), Andres Lagar-Cavilla wrote:
> This patch also moves the existing sharing-related memory op to the
> correct location, and adds logic to the audit() method that uses the
> new information.
>
> This patch only provides the Xen implementation of the domctls.
>
> Signed-off-by: Adin Scannell <adin@scannell.ca>

Acked-by: Tim Deegan <tim@xen.org> (once patches #3 and #4 are in).

Tim.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel