Mailing List Archive

[PATCH 9 of 9] x86/mm: use RCU in mem sharing audit list, eliminate global lock completely
xen/arch/x86/mm/mem_sharing.c | 83 +++++++++++++++-----------------------
xen/arch/x86/mm/mm-locks.h | 18 --------
xen/include/asm-x86/mem_sharing.h | 3 +-
3 files changed, 35 insertions(+), 69 deletions(-)


Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>

diff -r 5b9b36648e43 -r 835be7c121b7 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -32,6 +32,7 @@
#include <asm/p2m.h>
#include <asm/mem_event.h>
#include <asm/atomic.h>
+#include <xen/rcupdate.h>

#include "mm-locks.h"

@@ -47,44 +48,33 @@ typedef struct pg_lock_data {

#if MEM_SHARING_AUDIT

-static mm_lock_t shr_lock;
-
-#define shr_lock() _shr_lock()
-#define shr_unlock() _shr_unlock()
-#define shr_locked_by_me() _shr_locked_by_me()
-
static int mem_sharing_audit(void);

#define MEM_SHARING_DEBUG(_f, _a...) \
debugtrace_printk("mem_sharing_debug: %s(): " _f, __func__, ##_a)

static struct list_head shr_audit_list;
+static spinlock_t shr_audit_lock;
+DEFINE_RCU_READ_LOCK(shr_audit_read_lock);
+
+/* RCU delayed free of audit list entry */
+static void _free_pg_shared_info(struct rcu_head *head)
+{
+ xfree(container_of(head, struct page_sharing_info, rcu_head));
+}

static inline void audit_add_list(struct page_info *page)
{
- list_add(&page->shared_info->entry, &shr_audit_list);
+ list_add_rcu(&page->shared_info->entry, &shr_audit_list);
}

static inline void audit_del_list(struct page_info *page)
{
- list_del(&page->shared_info->entry);
+ list_del_rcu(&page->shared_info->entry);
+ call_rcu(&page->shared_info->rcu_head, _free_pg_shared_info);
}
-
-static inline int mem_sharing_page_lock(struct page_info *p,
- pg_lock_data_t *l)
-{
- return 1;
-}
-#define mem_sharing_page_unlock(p, l) ((void)0)
-
-#define get_next_handle() next_handle++;
#else

-#define shr_lock() ((void)0)
-#define shr_unlock() ((void)0)
-/* Only used inside audit code */
-//#define shr_locked_by_me() ((void)0)
-
static inline int mem_sharing_audit(void)
{
return 0;
@@ -93,6 +83,8 @@ static inline int mem_sharing_audit(void
#define audit_add_list(p) ((void)0)
#define audit_del_list(p) ((void)0)

+#endif /* MEM_SHARING_AUDIT */
+
static inline int mem_sharing_page_lock(struct page_info *pg,
pg_lock_data_t *pld)
{
@@ -127,7 +119,6 @@ static inline shr_handle_t get_next_hand
while ( (y = cmpxchg(&next_handle, x, x + 1)) != x );
return x + 1;
}
-#endif /* MEM_SHARING_AUDIT */

#define mem_sharing_enabled(d) \
(is_hvm_domain(d) && (d)->arch.hvm_domain.mem_sharing_enabled)
@@ -220,11 +211,13 @@ static int mem_sharing_audit(void)
unsigned long count_expected;
unsigned long count_found = 0;
struct list_head *ae;
+ DECLARE_PG_LOCK_DATA(pld);

- ASSERT(shr_locked_by_me());
count_expected = atomic_read(&nr_shared_mfns);

- list_for_each(ae, &shr_audit_list)
+ rcu_read_lock(&shr_audit_read_lock);
+
+ list_for_each_rcu(ae, &shr_audit_list)
{
struct page_sharing_info *shared_info;
unsigned long nr_gfns = 0;
@@ -236,6 +229,15 @@ static int mem_sharing_audit(void)
pg = shared_info->pg;
mfn = page_to_mfn(pg);

+ /* If we can't lock it, it's definitely not a shared page */
+ if ( !mem_sharing_page_lock(pg, &pld) )
+ {
+ MEM_SHARING_DEBUG("mfn %lx in audit list, but cannot be locked (%lx)!\n",
+ mfn_x(mfn), pg->u.inuse.type_info);
+ errors++;
+ continue;
+ }
+
/* Check if the MFN has correct type, owner and handle. */
if ( !(pg->u.inuse.type_info & PGT_shared_page) )
{
@@ -317,8 +319,12 @@ static int mem_sharing_audit(void)
(pg->u.inuse.type_info & PGT_count_mask));
errors++;
}
+
+ mem_sharing_page_unlock(pg, &pld);
}

+ rcu_read_unlock(&shr_audit_read_lock);
+
if ( count_found != count_expected )
{
MEM_SHARING_DEBUG("Expected %ld shared mfns, found %ld.",
@@ -552,14 +558,10 @@ static int page_make_private(struct doma
spin_lock(&d->page_alloc_lock);

/* We can only change the type if count is one */
- /* If we are locking pages individually, then we need to drop
+ /* Because we are locking pages individually, we need to drop
* the lock here, while the page is typed. We cannot risk the
* race of page_unlock and then put_page_type. */
-#if MEM_SHARING_AUDIT
- expected_type = (PGT_shared_page | PGT_validated | 1);
-#else
expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
-#endif
if ( page->u.inuse.type_info != expected_type )
{
put_page(page);
@@ -570,12 +572,8 @@ static int page_make_private(struct doma
/* Drop the final typecount */
put_page_and_type(page);

-#if MEM_SHARING_AUDIT
- (void)pld;
-#else
/* Now that we've dropped the type, we can unlock */
mem_sharing_page_unlock(page, pld);
-#endif

/* Change the owner */
ASSERT(page_get_owner(page) == dom_cow);
@@ -627,7 +625,6 @@ int mem_sharing_nominate_page(struct dom

*phandle = 0UL;

- shr_lock();
mfn = get_gfn(d, gfn, &p2mt);

/* Check if mfn is valid */
@@ -719,7 +716,6 @@ int mem_sharing_nominate_page(struct dom

out:
put_gfn(d, gfn);
- shr_unlock();
return ret;
}

@@ -735,8 +731,6 @@ int mem_sharing_share_pages(struct domai
p2m_type_t smfn_type, cmfn_type;
DECLARE_PG_LOCK_DATA(pld);

- shr_lock();
-
/* XXX if sd == cd handle potential deadlock by ordering
* the get_ and put_gfn's */
smfn = get_gfn(sd, sgfn, &smfn_type);
@@ -831,7 +825,6 @@ int mem_sharing_share_pages(struct domai

/* Clear the rest of the shared state */
audit_del_list(cpage);
- xfree(cpage->shared_info);
cpage->shared_info = NULL;

mem_sharing_page_unlock(secondpg, &pld);
@@ -847,7 +840,6 @@ int mem_sharing_share_pages(struct domai
err_out:
put_gfn(cd, cgfn);
put_gfn(sd, sgfn);
- shr_unlock();
return ret;
}

@@ -932,16 +924,11 @@ int mem_sharing_unshare_page(struct doma
struct list_head *le;
DECLARE_PG_LOCK_DATA(pld);

- /* This is one of the reasons why we can't enforce ordering
- * between shr_lock and p2m fine-grained locks in mm-lock.
- * Callers may walk in here already holding the lock for this gfn */
- shr_lock();
mfn = get_gfn(d, gfn, &p2mt);

/* Has someone already unshared it? */
if ( !p2m_is_shared(p2mt) ) {
put_gfn(d, gfn);
- shr_unlock();
return 0;
}

@@ -986,7 +973,6 @@ gfn_found:
} else {
/* Clean up shared state */
audit_del_list(page);
- xfree(page->shared_info);
page->shared_info = NULL;
}

@@ -1000,7 +986,6 @@ gfn_found:
test_and_clear_bit(_PGC_allocated, &page->count_info))
put_page(page);
put_gfn(d, gfn);
- shr_unlock();

return 0;
}
@@ -1018,7 +1003,6 @@ gfn_found:
{
mem_sharing_page_unlock(old_page, &pld);
put_gfn(d, gfn);
- shr_unlock();
return -ENOMEM;
}

@@ -1049,7 +1033,6 @@ private_page_found:
paging_mark_dirty(d, mfn_x(page_to_mfn(page)));
/* We do not need to unlock a private page */
put_gfn(d, gfn);
- shr_unlock();
return 0;
}

@@ -1247,7 +1230,7 @@ void __init mem_sharing_init(void)
{
printk("Initing memory sharing.\n");
#if MEM_SHARING_AUDIT
- mm_lock_init(&shr_lock);
+ spin_lock_init(&shr_audit_lock);
INIT_LIST_HEAD(&shr_audit_list);
#endif
}
diff -r 5b9b36648e43 -r 835be7c121b7 xen/arch/x86/mm/mm-locks.h
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -143,19 +143,6 @@ static inline void mm_enforce_order_unlo
* *
************************************************************************/

-#if MEM_SHARING_AUDIT
-/* Page-sharing lock (global)
- *
- * A single global lock that protects the memory-sharing code's
- * hash tables. */
-
-declare_mm_lock(shr)
-#define _shr_lock() mm_lock(shr, &shr_lock)
-#define _shr_unlock() mm_unlock(&shr_lock)
-#define _shr_locked_by_me() mm_locked_by_me(&shr_lock)
-
-#else
-
/* Sharing per page lock
*
* This is an external lock, not represented by an mm_lock_t. The memory
@@ -163,9 +150,6 @@ declare_mm_lock(shr)
* tuples to a shared page. We enforce order here against the p2m lock,
* which is taken after the page_lock to change the gfn's p2m entry.
*
- * Note that in sharing audit mode, we use the global page lock above,
- * instead.
- *
* The lock is recursive because during share we lock two pages. */

declare_mm_order_constraint(per_page_sharing)
@@ -174,8 +158,6 @@ declare_mm_order_constraint(per_page_sha
mm_enforce_order_lock_post_per_page_sharing((l), (r))
#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))

-#endif /* MEM_SHARING_AUDIT */
-
/* Nested P2M lock (per-domain)
*
* A per-domain lock that protects the mapping from nested-CR3 to
diff -r 5b9b36648e43 -r 835be7c121b7 xen/include/asm-x86/mem_sharing.h
--- a/xen/include/asm-x86/mem_sharing.h
+++ b/xen/include/asm-x86/mem_sharing.h
@@ -25,7 +25,7 @@
#include <public/domctl.h>

/* Auditing of memory sharing code? */
-#define MEM_SHARING_AUDIT 0
+#define MEM_SHARING_AUDIT 1

typedef uint64_t shr_handle_t;

@@ -35,6 +35,7 @@ struct page_sharing_info
shr_handle_t handle; /* Globally unique version / handle. */
#if MEM_SHARING_AUDIT
struct list_head entry; /* List of all shared pages (entry). */
+ struct rcu_head rcu_head; /* List of all shared pages (entry). */
#endif
struct list_head gfns; /* List of domains and gfns for this page (head). */
};

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel