Mailing List Archive

[xen-unstable] Implement tasklets as running in VCPU context (sepcifically, idle-VCPU context)
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1271668361 -3600
# Node ID ac9293787beb2af3f310cd80b575da4b8d426dbc
# Parent fd142087a8f6a71375a7da47ee373530fd27ab2a
Implement tasklets as running in VCPU context (sepcifically, idle-VCPU context)

...rather than in softirq context. This is expected to avoid a lot of
subtle deadlocks relating to the fact that softirqs can interrupt a
scheduled vcpu.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
---
xen/arch/ia64/xen/domain.c | 5 +++--
xen/arch/x86/acpi/cpu_idle.c | 3 +--
xen/arch/x86/domain.c | 4 ++--
xen/common/sched_credit.c | 14 +++++++++++++-
xen/common/sched_credit2.c | 14 ++++++++++----
xen/common/sched_sedf.c | 29 +++++++++++------------------
xen/common/tasklet.c | 36 ++++++++++++++++++++++--------------
xen/include/xen/sched.h | 7 +++++++
xen/include/xen/softirq.h | 1 -
xen/include/xen/tasklet.h | 6 ++++--
10 files changed, 73 insertions(+), 46 deletions(-)

diff -r fd142087a8f6 -r ac9293787beb xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Mon Apr 19 10:06:42 2010 +0100
+++ b/xen/arch/ia64/xen/domain.c Mon Apr 19 10:12:41 2010 +0100
@@ -338,7 +338,7 @@ static void default_idle(void)
static void default_idle(void)
{
local_irq_disable();
- if ( !softirq_pending(smp_processor_id()) ) {
+ if ( cpu_is_haltable(smp_processor_id()) ) {
if (can_do_pal_halt)
safe_halt();
else
@@ -360,9 +360,10 @@ static void continue_cpu_idle_loop(void)
#else
irq_stat[cpu].idle_timestamp = jiffies;
#endif
- while ( !softirq_pending(cpu) )
+ while ( cpu_is_haltable(cpu) )
default_idle();
raise_softirq(SCHEDULE_SOFTIRQ);
+ do_tasklet();
do_softirq();
if (!cpu_online(cpu))
play_dead();
diff -r fd142087a8f6 -r ac9293787beb xen/arch/x86/acpi/cpu_idle.c
--- a/xen/arch/x86/acpi/cpu_idle.c Mon Apr 19 10:06:42 2010 +0100
+++ b/xen/arch/x86/acpi/cpu_idle.c Mon Apr 19 10:12:41 2010 +0100
@@ -310,8 +310,7 @@ static void acpi_processor_idle(void)
*/
local_irq_disable();

- if ( softirq_pending(smp_processor_id()) ||
- cpu_is_offline(smp_processor_id()) )
+ if ( !cpu_is_haltable(smp_processor_id()) )
{
local_irq_enable();
sched_tick_resume();
diff -r fd142087a8f6 -r ac9293787beb xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Mon Apr 19 10:06:42 2010 +0100
+++ b/xen/arch/x86/domain.c Mon Apr 19 10:12:41 2010 +0100
@@ -83,8 +83,7 @@ static void default_idle(void)
static void default_idle(void)
{
local_irq_disable();
- if ( !softirq_pending(smp_processor_id()) &&
- cpu_online(smp_processor_id()) )
+ if ( cpu_is_haltable(smp_processor_id()) )
safe_halt();
else
local_irq_enable();
@@ -124,6 +123,7 @@ void idle_loop(void)
if ( cpu_is_offline(smp_processor_id()) )
play_dead();
(*pm_idle)();
+ do_tasklet();
do_softirq();
}
}
diff -r fd142087a8f6 -r ac9293787beb xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Mon Apr 19 10:06:42 2010 +0100
+++ b/xen/common/sched_credit.c Mon Apr 19 10:12:41 2010 +0100
@@ -1167,11 +1167,16 @@ csched_schedule(s_time_t now)
CSCHED_STAT_CRANK(schedule);
CSCHED_VCPU_CHECK(current);

- /* Update credits */
if ( !is_idle_vcpu(scurr->vcpu) )
{
+ /* Update credits of a non-idle VCPU. */
burn_credits(scurr, now);
scurr->start_time -= now;
+ }
+ else
+ {
+ /* Re-instate a boosted idle VCPU as normal-idle. */
+ scurr->pri = CSCHED_PRI_IDLE;
}

/*
@@ -1183,6 +1188,13 @@ csched_schedule(s_time_t now)
BUG_ON( is_idle_vcpu(current) || list_empty(runq) );

snext = __runq_elem(runq->next);
+
+ /* Tasklet work (which runs in idle VCPU context) overrides all else. */
+ if ( !tasklet_queue_empty(cpu) )
+ {
+ snext = CSCHED_VCPU(idle_vcpu[cpu]);
+ snext->pri = CSCHED_PRI_TS_BOOST;
+ }

/*
* SMP Load balance:
diff -r fd142087a8f6 -r ac9293787beb xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c Mon Apr 19 10:06:42 2010 +0100
+++ b/xen/common/sched_credit2.c Mon Apr 19 10:12:41 2010 +0100
@@ -173,7 +173,6 @@ struct csched_private {
struct csched_private {
spinlock_t lock;
uint32_t ncpus;
- struct domain *idle_domain;

struct list_head sdom; /* Used mostly for dump keyhandler. */

@@ -572,8 +571,6 @@ csched_vcpu_init(struct vcpu *vc)
BUG_ON( sdom != NULL );
svc->credit = CSCHED_IDLE_CREDIT;
svc->weight = 0;
- if ( csched_priv.idle_domain == NULL )
- csched_priv.idle_domain = dom;
}

CSCHED_VCPU_CHECK(vc);
@@ -876,6 +873,13 @@ csched_schedule(s_time_t now)

/* Update credits */
burn_credits(rqd, scurr, now);
+
+ /* Tasklet work (which runs in idle VCPU context) overrides all else. */
+ if ( !tasklet_queue_empty(cpu) )
+ {
+ snext = CSCHED_VCPU(idle_vcpu[cpu]);
+ goto out;
+ }

/*
* Select next runnable local VCPU (ie top of local runq).
@@ -891,7 +895,7 @@ csched_schedule(s_time_t now)
* vcpu for this processor.
*/
if ( list_empty(runq) )
- snext = CSCHED_VCPU(csched_priv.idle_domain->vcpu[cpu]);
+ snext = CSCHED_VCPU(idle_vcpu[cpu]);
else
snext = __runq_elem(runq->next);

@@ -946,6 +950,8 @@ csched_schedule(s_time_t now)
snext->start_time = now;
snext->vcpu->processor = cpu; /* Safe because lock for old processor is held */
}
+
+ out:
/*
* Return task to run next...
*/
diff -r fd142087a8f6 -r ac9293787beb xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c Mon Apr 19 10:06:42 2010 +0100
+++ b/xen/common/sched_sedf.c Mon Apr 19 10:12:41 2010 +0100
@@ -790,7 +790,13 @@ static struct task_slice sedf_do_schedul
/*now simply pick the first domain from the runqueue, which has the
earliest deadline, because the list is sorted*/

- if ( !list_empty(runq) )
+ /* Tasklet work (which runs in idle VCPU context) overrides all else. */
+ if ( !tasklet_queue_empty(cpu) || (list_empty(runq) && list_empty(waitq)) )
+ {
+ ret.task = IDLETASK(cpu);
+ ret.time = SECONDS(1);
+ }
+ else if ( !list_empty(runq) )
{
runinf = list_entry(runq->next,struct sedf_vcpu_info,list);
ret.task = runinf->vcpu;
@@ -808,29 +814,16 @@ static struct task_slice sedf_do_schedul
{
ret.time = runinf->slice - runinf->cputime;
}
- CHECK(ret.time > 0);
- goto sched_done;
- }
-
- if ( !list_empty(waitq) )
+ }
+ else
{
waitinf = list_entry(waitq->next,struct sedf_vcpu_info, list);
/*we could not find any suitable domain
=> look for domains that are aware of extratime*/
ret = sedf_do_extra_schedule(now, PERIOD_BEGIN(waitinf),
extraq, cpu);
- CHECK(ret.time > 0);
- }
- else
- {
- /*this could probably never happen, but one never knows...*/
- /*it can... imagine a second CPU, which is pure scifi ATM,
- but one never knows ;)*/
- ret.task = IDLETASK(cpu);
- ret.time = SECONDS(1);
- }
-
- sched_done:
+ }
+
/*TODO: Do something USEFUL when this happens and find out, why it
still can happen!!!*/
if ( ret.time < 0)
diff -r fd142087a8f6 -r ac9293787beb xen/common/tasklet.c
--- a/xen/common/tasklet.c Mon Apr 19 10:06:42 2010 +0100
+++ b/xen/common/tasklet.c Mon Apr 19 10:12:41 2010 +0100
@@ -1,8 +1,8 @@
/******************************************************************************
* tasklet.c
*
- * Dynamically-allocatable tasks run in softirq context on at most one CPU at
- * a time.
+ * Tasklets are dynamically-allocatable tasks run in VCPU context
+ * (specifically, the idle VCPU's context) on at most one CPU at a time.
*
* Copyright (c) 2010, Citrix Systems, Inc.
* Copyright (c) 1992, Linus Torvalds
@@ -17,8 +17,16 @@
#include <xen/softirq.h>
#include <xen/tasklet.h>

+/* Some subsystems call into us before we are initialised. We ignore them. */
static bool_t tasklets_initialised;
+
+/*
+ * NB. Any modification to a tasklet_list requires the scheduler to run
+ * on the related CPU so that its idle VCPU's priority is set correctly.
+ */
static DEFINE_PER_CPU(struct list_head, tasklet_list);
+
+/* Protects all lists and tasklet structures. */
static DEFINE_SPINLOCK(tasklet_lock);

void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
@@ -34,7 +42,7 @@ void tasklet_schedule_on_cpu(struct task
{
list_del(&t->list);
list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
- cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
}
}

@@ -46,7 +54,7 @@ void tasklet_schedule(struct tasklet *t)
tasklet_schedule_on_cpu(t, smp_processor_id());
}

-static void tasklet_action(void)
+void do_tasklet(void)
{
unsigned int cpu = smp_processor_id();
struct list_head *list = &per_cpu(tasklet_list, cpu);
@@ -78,17 +86,17 @@ static void tasklet_action(void)
BUG_ON(t->is_dead || !list_empty(&t->list));
list_add_tail(&t->list, &per_cpu(tasklet_list, t->scheduled_on));
if ( t->scheduled_on != cpu )
- cpu_raise_softirq(t->scheduled_on, TASKLET_SOFTIRQ);
+ cpu_raise_softirq(t->scheduled_on, SCHEDULE_SOFTIRQ);
}

- /*
- * If there is more work to do then reschedule. We don't grab more work
- * immediately as we want to allow other softirq work to happen first.
- */
- if ( !list_empty(list) )
- raise_softirq(TASKLET_SOFTIRQ);
+ raise_softirq(SCHEDULE_SOFTIRQ);

spin_unlock_irq(&tasklet_lock);
+}
+
+bool_t tasklet_queue_empty(unsigned int cpu)
+{
+ return list_empty(&per_cpu(tasklet_list, cpu));
}

void tasklet_kill(struct tasklet *t)
@@ -101,7 +109,9 @@ void tasklet_kill(struct tasklet *t)
{
BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0));
list_del_init(&t->list);
+ cpu_raise_softirq(t->scheduled_on, SCHEDULE_SOFTIRQ);
}
+
t->scheduled_on = -1;
t->is_dead = 1;

@@ -132,7 +142,7 @@ void migrate_tasklets_from_cpu(unsigned
list_add_tail(&t->list, &this_cpu(tasklet_list));
}

- raise_softirq(TASKLET_SOFTIRQ);
+ raise_softirq(SCHEDULE_SOFTIRQ);

spin_unlock_irqrestore(&tasklet_lock, flags);
}
@@ -154,8 +164,6 @@ void __init tasklet_subsys_init(void)
for_each_possible_cpu ( cpu )
INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));

- open_softirq(TASKLET_SOFTIRQ, tasklet_action);
-
tasklets_initialised = 1;
}

diff -r fd142087a8f6 -r ac9293787beb xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Mon Apr 19 10:06:42 2010 +0100
+++ b/xen/include/xen/sched.h Mon Apr 19 10:12:41 2010 +0100
@@ -579,6 +579,13 @@ void vcpu_runstate_get(struct vcpu *v, s
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
uint64_t get_cpu_idle_time(unsigned int cpu);

+/*
+ * Used by idle loop to decide whether there is work to do:
+ * (1) Run softirqs; or (2) Play dead; or (3) Run tasklets.
+ */
+#define cpu_is_haltable(cpu) \
+ (!softirq_pending(cpu) && cpu_online(cpu) && tasklet_queue_empty(cpu))
+
#define IS_PRIV(_d) ((_d)->is_privileged)
#define IS_PRIV_FOR(_d, _t) (IS_PRIV(_d) || ((_d)->target && (_d)->target == (_t)))

diff -r fd142087a8f6 -r ac9293787beb xen/include/xen/softirq.h
--- a/xen/include/xen/softirq.h Mon Apr 19 10:06:42 2010 +0100
+++ b/xen/include/xen/softirq.h Mon Apr 19 10:12:41 2010 +0100
@@ -9,7 +9,6 @@ enum {
PAGE_SCRUB_SOFTIRQ,
RCU_SOFTIRQ,
STOPMACHINE_SOFTIRQ,
- TASKLET_SOFTIRQ,
NR_COMMON_SOFTIRQS
};

diff -r fd142087a8f6 -r ac9293787beb xen/include/xen/tasklet.h
--- a/xen/include/xen/tasklet.h Mon Apr 19 10:06:42 2010 +0100
+++ b/xen/include/xen/tasklet.h Mon Apr 19 10:12:41 2010 +0100
@@ -1,8 +1,8 @@
/******************************************************************************
* tasklet.h
*
- * Dynamically-allocatable tasks run in softirq context on at most one CPU at
- * a time.
+ * Tasklets are dynamically-allocatable tasks run in VCPU context
+ * (specifically, the idle VCPU's context) on at most one CPU at a time.
*/

#ifndef __XEN_TASKLET_H__
@@ -26,6 +26,8 @@ struct tasklet

void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu);
void tasklet_schedule(struct tasklet *t);
+void do_tasklet(void);
+bool_t tasklet_queue_empty(unsigned int cpu);
void tasklet_kill(struct tasklet *t);
void migrate_tasklets_from_cpu(unsigned int cpu);
void tasklet_init(

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog