Mailing List Archive

[xen-unstable] X86: offline/broken page handler for pod cache
# HG changeset patch
# User Liu, Jinsong <jinsong.liu@intel.com>
# Date 1302174758 -3600
# Node ID fd8b81db422ddd80f036fc800691d3225f5fb2eb
# Parent d8bb2de119de41d8a62174963aadc0950dea5557
X86: offline/broken page handler for pod cache

When offline a page, or, when a broken page occur, the page maybe
populated, or, may at pod cache. This patch is to handle the
offline/broken page at pod cache. It scan pod cache, if hit, remove
and replace it, and then put the offline/broken page to
page_offlined_list/page_broken_list

Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>
---


diff -r d8bb2de119de -r fd8b81db422d xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Thu Apr 07 12:12:01 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c Thu Apr 07 12:12:38 2011 +0100
@@ -678,6 +678,78 @@
spin_unlock(&d->page_alloc_lock);
}

+int
+p2m_pod_offline_or_broken_hit(struct page_info *p)
+{
+ struct domain *d;
+ struct p2m_domain *p2m;
+ struct page_info *q, *tmp;
+ unsigned long mfn, bmfn;
+
+ if ( !(d = page_get_owner(p)) || !(p2m = p2m_get_hostp2m(d)) )
+ return 0;
+
+ spin_lock(&d->page_alloc_lock);
+ bmfn = mfn_x(page_to_mfn(p));
+ page_list_for_each_safe(q, tmp, &p2m->pod.super)
+ {
+ mfn = mfn_x(page_to_mfn(q));
+ if ( (bmfn >= mfn) && ((bmfn - mfn) < SUPERPAGE_PAGES) )
+ {
+ unsigned long i;
+ page_list_del(q, &p2m->pod.super);
+ for ( i = 0; i < SUPERPAGE_PAGES; i++)
+ {
+ q = mfn_to_page(_mfn(mfn + i));
+ page_list_add_tail(q, &p2m->pod.single);
+ }
+ page_list_del(p, &p2m->pod.single);
+ p2m->pod.count--;
+ goto pod_hit;
+ }
+ }
+
+ page_list_for_each_safe(q, tmp, &p2m->pod.single)
+ {
+ mfn = mfn_x(page_to_mfn(q));
+ if ( mfn == bmfn )
+ {
+ page_list_del(p, &p2m->pod.single);
+ p2m->pod.count--;
+ goto pod_hit;
+ }
+ }
+
+ spin_unlock(&d->page_alloc_lock);
+ return 0;
+
+pod_hit:
+ page_list_add_tail(p, &d->arch.relmem_list);
+ spin_unlock(&d->page_alloc_lock);
+ return 1;
+}
+
+void
+p2m_pod_offline_or_broken_replace(struct page_info *p)
+{
+ struct domain *d;
+ struct p2m_domain *p2m;
+
+ if ( !(d = page_get_owner(p)) || !(p2m = p2m_get_hostp2m(d)) )
+ return;
+
+ free_domheap_page(p);
+
+ p = alloc_domheap_page(d, 0);
+ if ( unlikely(!p) )
+ return;
+
+ p2m_lock(p2m);
+ p2m_pod_cache_add(p2m, p, 0);
+ p2m_unlock(p2m);
+ return;
+}
+
/* This function is needed for two reasons:
* + To properly handle clearing of PoD entries
* + To "steal back" memory being freed for the PoD cache, rather than
diff -r d8bb2de119de -r fd8b81db422d xen/common/page_alloc.c
--- a/xen/common/page_alloc.c Thu Apr 07 12:12:01 2011 +0100
+++ b/xen/common/page_alloc.c Thu Apr 07 12:12:38 2011 +0100
@@ -41,6 +41,7 @@
#include <asm/page.h>
#include <asm/numa.h>
#include <asm/flushtlb.h>
+#include <asm/p2m.h>

/*
* Comma-separated list of hexadecimal page numbers containing bad bytes.
@@ -714,10 +715,15 @@
}
else if ( (owner = page_get_owner_and_reference(pg)) )
{
+ if ( p2m_pod_offline_or_broken_hit(pg) )
+ goto pod_replace;
+ else
+ {
*status = PG_OFFLINE_OWNED | PG_OFFLINE_PENDING |
(owner->domain_id << PG_OFFLINE_OWNER_SHIFT);
/* Release the reference since it will not be allocated anymore */
put_page(pg);
+ }
}
else if ( old_info & PGC_xen_heap )
{
@@ -744,6 +750,18 @@
spin_unlock(&heap_lock);

return ret;
+
+pod_replace:
+ put_page(pg);
+ spin_unlock(&heap_lock);
+
+ p2m_pod_offline_or_broken_replace(pg);
+ *status = PG_OFFLINE_OFFLINED;
+
+ if ( broken )
+ *status |= PG_OFFLINE_BROKEN;
+
+ return ret;
}

/*
diff -r d8bb2de119de -r fd8b81db422d xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Apr 07 12:12:01 2011 +0100
+++ b/xen/include/asm-x86/p2m.h Thu Apr 07 12:12:38 2011 +0100
@@ -530,6 +530,14 @@
unsigned int order,
p2m_query_t q);

+/* Scan pod cache when offline/broken page triggered */
+int
+p2m_pod_offline_or_broken_hit(struct page_info *p);
+
+/* Replace pod cache when offline/broken page triggered */
+void
+p2m_pod_offline_or_broken_replace(struct page_info *p);
+
/* Add a page to a domain's p2m table */
int guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn,
unsigned long mfn, unsigned int page_order,

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog