Mailing List Archive

[xen-unstable] x86: Allow continue_hypercall_on_cpu() to be called from within an
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1221657190 -3600
# Node ID 366c78ff361bafb2271c551c4976e4caedea72b2
# Parent beb28a3975bd39c93c7934dd5e7ec80c69a86c4a
x86: Allow continue_hypercall_on_cpu() to be called from within an
existing continuation handler. This fix is needed for the new method
of microcode re-programming.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
---
xen/arch/x86/domain.c | 63 ++++++++++++++++++++++++++++++------------------
xen/common/schedule.c | 5 +++
xen/include/xen/sched.h | 1
3 files changed, 46 insertions(+), 23 deletions(-)

diff -r beb28a3975bd -r 366c78ff361b xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Wed Sep 17 13:11:40 2008 +0100
+++ b/xen/arch/x86/domain.c Wed Sep 17 14:13:10 2008 +0100
@@ -1356,6 +1356,7 @@ struct migrate_info {
void *data;
void (*saved_schedule_tail)(struct vcpu *);
cpumask_t saved_affinity;
+ unsigned int nest;
};

static void continue_hypercall_on_cpu_helper(struct vcpu *v)
@@ -1363,47 +1364,63 @@ static void continue_hypercall_on_cpu_he
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct migrate_info *info = v->arch.continue_info;
cpumask_t mask = info->saved_affinity;
+ void (*saved_schedule_tail)(struct vcpu *) = info->saved_schedule_tail;

regs->eax = info->func(info->data);

- v->arch.schedule_tail = info->saved_schedule_tail;
- v->arch.continue_info = NULL;
-
- xfree(info);
-
- vcpu_unlock_affinity(v, &mask);
- schedule_tail(v);
+ if ( info->nest-- == 0 )
+ {
+ xfree(info);
+ v->arch.schedule_tail = saved_schedule_tail;
+ v->arch.continue_info = NULL;
+ vcpu_unlock_affinity(v, &mask);
+ }
+
+ (*saved_schedule_tail)(v);
}

int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data)
{
struct vcpu *v = current;
struct migrate_info *info;
+ cpumask_t mask = cpumask_of_cpu(cpu);
int rc;

if ( cpu == smp_processor_id() )
return func(data);

- info = xmalloc(struct migrate_info);
+ info = v->arch.continue_info;
if ( info == NULL )
- return -ENOMEM;
+ {
+ info = xmalloc(struct migrate_info);
+ if ( info == NULL )
+ return -ENOMEM;
+
+ rc = vcpu_lock_affinity(v, &mask);
+ if ( rc )
+ {
+ xfree(info);
+ return rc;
+ }
+
+ info->saved_schedule_tail = v->arch.schedule_tail;
+ info->saved_affinity = mask;
+ info->nest = 0;
+
+ v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
+ v->arch.continue_info = info;
+ }
+ else
+ {
+ BUG_ON(info->nest != 0);
+ rc = vcpu_locked_change_affinity(v, &mask);
+ if ( rc )
+ return rc;
+ info->nest++;
+ }

info->func = func;
info->data = data;
- info->saved_schedule_tail = v->arch.schedule_tail;
- info->saved_affinity = cpumask_of_cpu(cpu);
-
- v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
- v->arch.continue_info = info;
-
- rc = vcpu_lock_affinity(v, &info->saved_affinity);
- if ( rc )
- {
- v->arch.schedule_tail = info->saved_schedule_tail;
- v->arch.continue_info = NULL;
- xfree(info);
- return rc;
- }

/* Dummy return value will be overwritten by new schedule_tail. */
BUG_ON(!test_bit(SCHEDULE_SOFTIRQ, &softirq_pending(smp_processor_id())));
diff -r beb28a3975bd -r 366c78ff361b xen/common/schedule.c
--- a/xen/common/schedule.c Wed Sep 17 13:11:40 2008 +0100
+++ b/xen/common/schedule.c Wed Sep 17 14:13:10 2008 +0100
@@ -380,6 +380,11 @@ int vcpu_lock_affinity(struct vcpu *v, c
return __vcpu_set_affinity(v, affinity, 0, 1);
}

+int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity)
+{
+ return __vcpu_set_affinity(v, affinity, 1, 1);
+}
+
void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity)
{
cpumask_t online_affinity;
diff -r beb28a3975bd -r 366c78ff361b xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Wed Sep 17 13:11:40 2008 +0100
+++ b/xen/include/xen/sched.h Wed Sep 17 14:13:10 2008 +0100
@@ -527,6 +527,7 @@ void cpu_disable_scheduler(void);
void cpu_disable_scheduler(void);
int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity);
+int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity);
void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity);

void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog