Mailing List Archive

[xen-unstable] Implement x86 continuable domain destroy.
# HG changeset patch
# User kfraser@localhost.localdomain
# Date 1188576011 -3600
# Node ID 3b50a7e52ff2f67de6cbd2cf03427b9bbac3ec5e
# Parent 230000d3ef329aa718ed432682ab103eb805c331
Implement x86 continuable domain destroy.
This patch addresses the following bug report.
http://bugzilla.xensource.com/bugzilla/show_bug.cgi?id=1037
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
---
xen/arch/x86/domain.c | 116 ++++++++++++++++++++++++++++++++-----------
xen/arch/x86/domctl.c | 12 ++--
xen/common/domain.c | 2
xen/include/asm-x86/domain.h | 13 ++++
4 files changed, 112 insertions(+), 31 deletions(-)

diff -r 230000d3ef32 -r 3b50a7e52ff2 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Fri Aug 31 16:50:26 2007 +0100
+++ b/xen/arch/x86/domain.c Fri Aug 31 17:00:11 2007 +0100
@@ -437,6 +437,9 @@ int arch_domain_create(struct domain *d)
int vcpuid, pdpt_order, paging_initialised = 0;
int rc = -ENOMEM;

+ d->arch.relmem = RELMEM_not_started;
+ INIT_LIST_HEAD(&d->arch.relmem_list);
+
pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order);
if ( d->arch.mm_perdomain_pt == NULL )
@@ -1599,12 +1602,13 @@ int hypercall_xlat_continuation(unsigned
}
#endif

-static void relinquish_memory(struct domain *d, struct list_head *list,
- unsigned long type)
+static int relinquish_memory(
+ struct domain *d, struct list_head *list, unsigned long type)
{
struct list_head *ent;
struct page_info *page;
unsigned long x, y;
+ int ret = 0;

/* Use a recursive lock, as we may enter 'free_domheap_page'. */
spin_lock_recursive(&d->page_alloc_lock);
@@ -1619,6 +1623,7 @@ static void relinquish_memory(struct dom
{
/* Couldn't get a reference -- someone is freeing this page. */
ent = ent->next;
+ list_move_tail(&page->list, &d->arch.relmem_list);
continue;
}

@@ -1653,10 +1658,21 @@ static void relinquish_memory(struct dom

/* Follow the list chain and /then/ potentially free the page. */
ent = ent->next;
+ list_move_tail(&page->list, &d->arch.relmem_list);
put_page(page);
- }
-
+
+ if ( hypercall_preempt_check() )
+ {
+ ret = -EAGAIN;
+ goto out;
+ }
+ }
+
+ list_splice_init(&d->arch.relmem_list, list);
+
+ out:
spin_unlock_recursive(&d->page_alloc_lock);
+ return ret;
}

static void vcpu_destroy_pagetables(struct vcpu *v)
@@ -1719,35 +1735,81 @@ static void vcpu_destroy_pagetables(stru

int domain_relinquish_resources(struct domain *d)
{
+ int ret;
struct vcpu *v;

BUG_ON(!cpus_empty(d->domain_dirty_cpumask));

- /* Tear down paging-assistance stuff. */
- paging_teardown(d);
-
- /* Drop the in-use references to page-table bases. */
- for_each_vcpu ( d, v )
- vcpu_destroy_pagetables(v);
-
- /*
- * Relinquish GDT mappings. No need for explicit unmapping of the LDT as
- * it automatically gets squashed when the guest's mappings go away.
- */
- for_each_vcpu(d, v)
- destroy_gdt(v);
-
- /* Relinquish every page of memory. */
+ switch ( d->arch.relmem )
+ {
+ case RELMEM_not_started:
+ /* Tear down paging-assistance stuff. */
+ paging_teardown(d);
+
+ /* Drop the in-use references to page-table bases. */
+ for_each_vcpu ( d, v )
+ vcpu_destroy_pagetables(v);
+
+ /*
+ * Relinquish GDT mappings. No need for explicit unmapping of the LDT
+ * as it automatically gets squashed when the guest's mappings go away.
+ */
+ for_each_vcpu(d, v)
+ destroy_gdt(v);
+
+ d->arch.relmem = RELMEM_xen_l4;
+ /* fallthrough */
+
+ /* Relinquish every page of memory. */
#if CONFIG_PAGING_LEVELS >= 4
- relinquish_memory(d, &d->xenpage_list, PGT_l4_page_table);
- relinquish_memory(d, &d->page_list, PGT_l4_page_table);
-#endif
+ case RELMEM_xen_l4:
+ ret = relinquish_memory(d, &d->xenpage_list, PGT_l4_page_table);
+ if ( ret )
+ return ret;
+ d->arch.relmem = RELMEM_dom_l4;
+ /* fallthrough */
+ case RELMEM_dom_l4:
+ ret = relinquish_memory(d, &d->page_list, PGT_l4_page_table);
+ if ( ret )
+ return ret;
+ d->arch.relmem = RELMEM_xen_l3;
+ /* fallthrough */
+#endif
+
#if CONFIG_PAGING_LEVELS >= 3
- relinquish_memory(d, &d->xenpage_list, PGT_l3_page_table);
- relinquish_memory(d, &d->page_list, PGT_l3_page_table);
-#endif
- relinquish_memory(d, &d->xenpage_list, PGT_l2_page_table);
- relinquish_memory(d, &d->page_list, PGT_l2_page_table);
+ case RELMEM_xen_l3:
+ ret = relinquish_memory(d, &d->xenpage_list, PGT_l3_page_table);
+ if ( ret )
+ return ret;
+ d->arch.relmem = RELMEM_dom_l3;
+ /* fallthrough */
+ case RELMEM_dom_l3:
+ ret = relinquish_memory(d, &d->page_list, PGT_l3_page_table);
+ if ( ret )
+ return ret;
+ d->arch.relmem = RELMEM_xen_l2;
+ /* fallthrough */
+#endif
+
+ case RELMEM_xen_l2:
+ ret = relinquish_memory(d, &d->xenpage_list, PGT_l2_page_table);
+ if ( ret )
+ return ret;
+ d->arch.relmem = RELMEM_dom_l2;
+ /* fallthrough */
+ case RELMEM_dom_l2:
+ ret = relinquish_memory(d, &d->page_list, PGT_l2_page_table);
+ if ( ret )
+ return ret;
+ d->arch.relmem = RELMEM_done;
+ /* fallthrough */
+
+ case RELMEM_done:
+ break;
+
+ default:
+ BUG();
+ }

/* Free page used by xen oprofile buffer. */
free_xenoprof_pages(d);
diff -r 230000d3ef32 -r 3b50a7e52ff2 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c Fri Aug 31 16:50:26 2007 +0100
+++ b/xen/arch/x86/domctl.c Fri Aug 31 17:00:11 2007 +0100
@@ -257,10 +257,14 @@ long arch_do_domctl(
break;
}

+ spin_lock(&d->page_alloc_lock);
+
+ if ( unlikely(d->is_dying) ) {
+ spin_unlock(&d->page_alloc_lock);
+ goto getmemlist_out;
+ }
+
ret = 0;
-
- spin_lock(&d->page_alloc_lock);
-
list_ent = d->page_list.next;
for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
{
@@ -279,7 +283,7 @@ long arch_do_domctl(

domctl->u.getmemlist.num_pfns = i;
copy_to_guest(u_domctl, domctl, 1);
-
+ getmemlist_out:
rcu_unlock_domain(d);
}
}
diff -r 230000d3ef32 -r 3b50a7e52ff2 xen/common/domain.c
--- a/xen/common/domain.c Fri Aug 31 16:50:26 2007 +0100
+++ b/xen/common/domain.c Fri Aug 31 17:00:11 2007 +0100
@@ -318,6 +318,7 @@ int domain_kill(struct domain *d)
d->is_dying = DOMDYING_dying;
evtchn_destroy(d);
gnttab_release_mappings(d);
+ /* fallthrough */
case DOMDYING_dying:
rc = domain_relinquish_resources(d);
page_scrub_kick();
@@ -329,6 +330,7 @@ int domain_kill(struct domain *d)
d->is_dying = DOMDYING_dead;
put_domain(d);
send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+ /* fallthrough */
case DOMDYING_dead:
break;
}
diff -r 230000d3ef32 -r 3b50a7e52ff2 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h Fri Aug 31 16:50:26 2007 +0100
+++ b/xen/include/asm-x86/domain.h Fri Aug 31 17:00:11 2007 +0100
@@ -234,6 +234,19 @@ struct arch_domain
bool_t is_32bit_pv;
/* Is shared-info page in 32-bit format? */
bool_t has_32bit_shinfo;
+
+ /* Continuable domain_relinquish_resources(). */
+ enum {
+ RELMEM_not_started,
+ RELMEM_xen_l4,
+ RELMEM_dom_l4,
+ RELMEM_xen_l3,
+ RELMEM_dom_l3,
+ RELMEM_xen_l2,
+ RELMEM_dom_l2,
+ RELMEM_done,
+ } relmem;
+ struct list_head relmem_list;
} __cacheline_aligned;

#ifdef CONFIG_X86_PAE

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog