About tickling, and PCPU selection.
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
---
Changes from v1:
* Dummy `struct d {}', accommodating `cpu' only, removed
in spite of the much more readable `trace_var(..., sizeof(cpu), &cpu)',
as suggested.
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -21,6 +21,7 @@
#include <asm/atomic.h>
#include <xen/errno.h>
#include <xen/keyhandler.h>
+#include <xen/trace.h>
/*
@@ -97,6 +98,18 @@
/*
+ * Credit tracing events ("only" 512 available!). Check
+ * include/public/trace.h for more details.
+ */
+#define TRC_CSCHED_SCHED_TASKLET TRC_SCHED_CLASS_EVT(CSCHED, 1)
+#define TRC_CSCHED_ACCOUNT_START TRC_SCHED_CLASS_EVT(CSCHED, 2)
+#define TRC_CSCHED_ACCOUNT_STOP TRC_SCHED_CLASS_EVT(CSCHED, 3)
+#define TRC_CSCHED_STOLEN_VCPU TRC_SCHED_CLASS_EVT(CSCHED, 4)
+#define TRC_CSCHED_PICKED_CPU TRC_SCHED_CLASS_EVT(CSCHED, 5)
+#define TRC_CSCHED_TICKLE TRC_SCHED_CLASS_EVT(CSCHED, 6)
+
+
+/*
* Boot parameters
*/
static int __read_mostly sched_credit_tslice_ms = CSCHED_DEFAULT_TSLICE_MS;
@@ -315,9 +328,18 @@ static inline void
}
}
- /* Send scheduler interrupts to designated CPUs */
if ( !cpumask_empty(&mask) )
+ {
+ if ( unlikely(tb_init_done) )
+ {
+ /* Avoid TRACE_*: saves checking !tb_init_done each step */
+ for_each_cpu(cpu, &mask)
+ trace_var(TRC_CSCHED_TICKLE, 0, sizeof(cpu), &cpu);
+ }
+
+ /* Send scheduler interrupts to designated CPUs */
cpumask_raise_softirq(&mask, SCHEDULE_SOFTIRQ);
+ }
}
static void
@@ -554,6 +576,8 @@ static int
if ( commit && spc )
spc->idle_bias = cpu;
+ TRACE_3D(TRC_CSCHED_PICKED_CPU, vc->domain->domain_id, vc->vcpu_id, cpu);
+
return cpu;
}
@@ -586,6 +610,9 @@ static inline void
}
}
+ TRACE_3D(TRC_CSCHED_ACCOUNT_START, sdom->dom->domain_id,
+ svc->vcpu->vcpu_id, sdom->active_vcpu_count);
+
spin_unlock_irqrestore(&prv->lock, flags);
}
@@ -608,6 +635,9 @@ static inline void
{
list_del_init(&sdom->active_sdom_elem);
}
+
+ TRACE_3D(TRC_CSCHED_ACCOUNT_STOP, sdom->dom->domain_id,
+ svc->vcpu->vcpu_id, sdom->active_vcpu_count);
}
static void
@@ -1241,6 +1271,8 @@ csched_runq_steal(int peer_cpu, int cpu,
if (__csched_vcpu_is_migrateable(vc, cpu))
{
/* We got a candidate. Grab it! */
+ TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
+ vc->domain->domain_id, vc->vcpu_id);
SCHED_VCPU_STAT_CRANK(speer, migrate_q);
SCHED_STAT_CRANK(migrate_queued);
WARN_ON(vc->is_urgent);
@@ -1401,6 +1433,7 @@ csched_schedule(
/* Tasklet work (which runs in idle VCPU context) overrides all else. */
if ( tasklet_work_scheduled )
{
+ TRACE_0D(TRC_CSCHED_SCHED_TASKLET);
snext = CSCHED_VCPU(idle_vcpu[cpu]);
snext->pri = CSCHED_PRI_TS_BOOST;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
---
Changes from v1:
* Dummy `struct d {}', accommodating `cpu' only, removed
in spite of the much more readable `trace_var(..., sizeof(cpu), &cpu)',
as suggested.
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -21,6 +21,7 @@
#include <asm/atomic.h>
#include <xen/errno.h>
#include <xen/keyhandler.h>
+#include <xen/trace.h>
/*
@@ -97,6 +98,18 @@
/*
+ * Credit tracing events ("only" 512 available!). Check
+ * include/public/trace.h for more details.
+ */
+#define TRC_CSCHED_SCHED_TASKLET TRC_SCHED_CLASS_EVT(CSCHED, 1)
+#define TRC_CSCHED_ACCOUNT_START TRC_SCHED_CLASS_EVT(CSCHED, 2)
+#define TRC_CSCHED_ACCOUNT_STOP TRC_SCHED_CLASS_EVT(CSCHED, 3)
+#define TRC_CSCHED_STOLEN_VCPU TRC_SCHED_CLASS_EVT(CSCHED, 4)
+#define TRC_CSCHED_PICKED_CPU TRC_SCHED_CLASS_EVT(CSCHED, 5)
+#define TRC_CSCHED_TICKLE TRC_SCHED_CLASS_EVT(CSCHED, 6)
+
+
+/*
* Boot parameters
*/
static int __read_mostly sched_credit_tslice_ms = CSCHED_DEFAULT_TSLICE_MS;
@@ -315,9 +328,18 @@ static inline void
}
}
- /* Send scheduler interrupts to designated CPUs */
if ( !cpumask_empty(&mask) )
+ {
+ if ( unlikely(tb_init_done) )
+ {
+ /* Avoid TRACE_*: saves checking !tb_init_done each step */
+ for_each_cpu(cpu, &mask)
+ trace_var(TRC_CSCHED_TICKLE, 0, sizeof(cpu), &cpu);
+ }
+
+ /* Send scheduler interrupts to designated CPUs */
cpumask_raise_softirq(&mask, SCHEDULE_SOFTIRQ);
+ }
}
static void
@@ -554,6 +576,8 @@ static int
if ( commit && spc )
spc->idle_bias = cpu;
+ TRACE_3D(TRC_CSCHED_PICKED_CPU, vc->domain->domain_id, vc->vcpu_id, cpu);
+
return cpu;
}
@@ -586,6 +610,9 @@ static inline void
}
}
+ TRACE_3D(TRC_CSCHED_ACCOUNT_START, sdom->dom->domain_id,
+ svc->vcpu->vcpu_id, sdom->active_vcpu_count);
+
spin_unlock_irqrestore(&prv->lock, flags);
}
@@ -608,6 +635,9 @@ static inline void
{
list_del_init(&sdom->active_sdom_elem);
}
+
+ TRACE_3D(TRC_CSCHED_ACCOUNT_STOP, sdom->dom->domain_id,
+ svc->vcpu->vcpu_id, sdom->active_vcpu_count);
}
static void
@@ -1241,6 +1271,8 @@ csched_runq_steal(int peer_cpu, int cpu,
if (__csched_vcpu_is_migrateable(vc, cpu))
{
/* We got a candidate. Grab it! */
+ TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
+ vc->domain->domain_id, vc->vcpu_id);
SCHED_VCPU_STAT_CRANK(speer, migrate_q);
SCHED_STAT_CRANK(migrate_queued);
WARN_ON(vc->is_urgent);
@@ -1401,6 +1433,7 @@ csched_schedule(
/* Tasklet work (which runs in idle VCPU context) overrides all else. */
if ( tasklet_work_scheduled )
{
+ TRACE_0D(TRC_CSCHED_SCHED_TASKLET);
snext = CSCHED_VCPU(idle_vcpu[cpu]);
snext->pri = CSCHED_PRI_TS_BOOST;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel