Mailing List Archive

[PATCH 02/18] xen: allow global VIRQ handlers to be delegated to other domains
This patch sends global VIRQs to a domain designated as the VIRQ handler
instead of sending all global VIRQ events to dom0. This is required in
order to run xenstored in a stubdom, because VIRQ_DOM_EXC must be sent
to xenstored for domain destruction to work properly.

This patch was inspired by the xenstored stubdomain patch series sent to
xen-devel by Alex Zeffertt in 2009.

Signed-off-by: Diego Ongaro <diego.ongaro@citrix.com>
Signed-off-by: Alex Zeffertt <alex.zeffertt@eu.citrix.com>
Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
---
tools/flask/policy/policy/flask/access_vectors | 1 +
xen/arch/x86/cpu/mcheck/amd_nonfatal.c | 2 +-
xen/arch/x86/cpu/mcheck/mce.c | 2 +-
xen/arch/x86/cpu/mcheck/mce_intel.c | 6 +-
xen/arch/x86/cpu/mcheck/non-fatal.c | 2 +-
xen/common/cpu.c | 4 +-
xen/common/domain.c | 10 ++--
xen/common/domctl.c | 17 ++++++
xen/common/event_channel.c | 63 +++++++++++++++++++++++-
xen/common/trace.c | 2 +-
xen/drivers/char/console.c | 4 +-
xen/include/public/domctl.h | 8 +++
xen/include/xen/event.h | 18 ++++++-
xen/include/xsm/xsm.h | 6 ++
xen/xsm/dummy.c | 6 ++
xen/xsm/flask/hooks.c | 6 ++
xen/xsm/flask/include/av_perm_to_string.h | 1 +
xen/xsm/flask/include/av_permissions.h | 1 +
18 files changed, 140 insertions(+), 19 deletions(-)

diff --git a/tools/flask/policy/policy/flask/access_vectors b/tools/flask/policy/policy/flask/access_vectors
index 644f2e1..5901911 100644
--- a/tools/flask/policy/policy/flask/access_vectors
+++ b/tools/flask/policy/policy/flask/access_vectors
@@ -85,6 +85,7 @@ class domain
getpodtarget
setpodtarget
set_misc_info
+ set_virq_handler
}

class hvm
diff --git a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
index 50288bd..9222098 100644
--- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
+++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
@@ -100,7 +100,7 @@ static void mce_amd_checkregs(void *info)

if (dom0_vmce_enabled()) {
mctelem_commit(mctc);
- send_guest_global_virq(dom0, VIRQ_MCA);
+ send_global_virq(VIRQ_MCA);
} else if (++dumpcount >= 10) {
x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc));
mctelem_dismiss(mctc);
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index b592041..c4e4477 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -594,7 +594,7 @@ void mcheck_cmn_handler(struct cpu_user_regs *regs, long error_code,
if (dom0_vmce_enabled()) {
if (mctc != NULL)
mctelem_commit(mctc);
- send_guest_global_virq(dom0, VIRQ_MCA);
+ send_global_virq(VIRQ_MCA);
} else {
x86_mcinfo_dump(mci);
if (mctc != NULL)
diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c
index 0986025..0894080 100644
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -354,7 +354,7 @@ static void mce_softirq(void)
/* Step2: Send Log to DOM0 through vIRQ */
if (dom0_vmce_enabled()) {
mce_printk(MCE_VERBOSE, "MCE: send MCE# to DOM0 through virq\n");
- send_guest_global_virq(dom0, VIRQ_MCA);
+ send_global_virq(VIRQ_MCA);
}
}

@@ -1085,7 +1085,7 @@ static void cmci_discover(void)
if (bs.errcnt && mctc != NULL) {
if (dom0_vmce_enabled()) {
mctelem_commit(mctc);
- send_guest_global_virq(dom0, VIRQ_MCA);
+ send_global_virq(VIRQ_MCA);
} else {
x86_mcinfo_dump(mctelem_dataptr(mctc));
mctelem_dismiss(mctc);
@@ -1205,7 +1205,7 @@ fastcall void smp_cmci_interrupt(struct cpu_user_regs *regs)
if (dom0_vmce_enabled()) {
mctelem_commit(mctc);
mce_printk(MCE_VERBOSE, "CMCI: send CMCI to DOM0 through virq\n");
- send_guest_global_virq(dom0, VIRQ_MCA);
+ send_global_virq(VIRQ_MCA);
} else {
x86_mcinfo_dump(mctelem_dataptr(mctc));
mctelem_dismiss(mctc);
diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c b/xen/arch/x86/cpu/mcheck/non-fatal.c
index c57688f..1dded9b 100644
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c
@@ -55,7 +55,7 @@ static void mce_checkregs (void *info)

if (dom0_vmce_enabled()) {
mctelem_commit(mctc);
- send_guest_global_virq(dom0, VIRQ_MCA);
+ send_global_virq(VIRQ_MCA);
} else if (++dumpcount >= 10) {
x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc));
mctelem_dismiss(mctc);
diff --git a/xen/common/cpu.c b/xen/common/cpu.c
index 79abdb7..630881e 100644
--- a/xen/common/cpu.c
+++ b/xen/common/cpu.c
@@ -108,7 +108,7 @@ int cpu_down(unsigned int cpu)
notifier_rc = notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu, NULL);
BUG_ON(notifier_rc != NOTIFY_DONE);

- send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
+ send_global_virq(VIRQ_PCPU_STATE);
cpu_hotplug_done();
return 0;

@@ -148,7 +148,7 @@ int cpu_up(unsigned int cpu)
notifier_rc = notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu, NULL);
BUG_ON(notifier_rc != NOTIFY_DONE);

- send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
+ send_global_virq(VIRQ_PCPU_STATE);

cpu_hotplug_done();
return 0;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 52a63ef..c4d98d9 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -116,7 +116,7 @@ static void __domain_finalise_shutdown(struct domain *d)
if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn )
evtchn_send(d, d->suspend_evtchn);
else
- send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+ send_global_virq(VIRQ_DOM_EXC);
}

static void vcpu_check_shutdown(struct vcpu *v)
@@ -492,7 +492,7 @@ int domain_kill(struct domain *d)
}
d->is_dying = DOMDYING_dead;
put_domain(d);
- send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+ send_global_virq(VIRQ_DOM_EXC);
/* fallthrough */
case DOMDYING_dead:
break;
@@ -633,7 +633,7 @@ void domain_pause_for_debugger(void)
for_each_vcpu ( d, v )
vcpu_sleep_nosync(v);

- send_guest_global_virq(dom0, VIRQ_DEBUGGER);
+ send_global_virq(VIRQ_DEBUGGER);
}

/* Complete domain destroy after RCU readers are not holding old references. */
@@ -659,6 +659,8 @@ static void complete_domain_destroy(struct rcu_head *head)

watchdog_domain_destroy(d);

+ clear_global_virq_handlers(d);
+
rangeset_domain_destroy(d);

cpupool_rm_domain(d);
@@ -690,7 +692,7 @@ static void complete_domain_destroy(struct rcu_head *head)
free_cpumask_var(d->domain_dirty_cpumask);
free_domain_struct(d);

- send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+ send_global_virq(VIRQ_DOM_EXC);
}

/* Release resources belonging to task @p. */
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index d6ae09b..a775aa3 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -994,6 +994,23 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
}
break;

+ case XEN_DOMCTL_set_virq_handler:
+ {
+ struct domain *d;
+ int virq = op->u.set_virq_handler.virq;
+
+ ret = -ESRCH;
+ d = rcu_lock_domain_by_id(op->domain);
+ if ( d != NULL )
+ {
+ ret = xsm_set_virq_handler(d, virq);
+ if ( !ret )
+ ret = set_global_virq_handler(d, virq);
+ rcu_unlock_domain(d);
+ }
+ }
+ break;
+
default:
ret = arch_do_domctl(op, u_domctl);
break;
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 9212042..77c7a27 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -689,7 +689,7 @@ void send_guest_vcpu_virq(struct vcpu *v, int virq)
spin_unlock_irqrestore(&v->virq_lock, flags);
}

-void send_guest_global_virq(struct domain *d, int virq)
+static void send_guest_global_virq(struct domain *d, int virq)
{
unsigned long flags;
int port;
@@ -739,6 +739,67 @@ int send_guest_pirq(struct domain *d, const struct pirq *pirq)
return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
}

+static struct domain* global_virq_handlers[NR_VIRQS];
+
+spinlock_t global_virq_handlers_lock = SPIN_LOCK_UNLOCKED;
+
+static struct domain* _get_global_virq_handler(int virq)
+{
+ struct domain *d;
+
+ d = global_virq_handlers[virq];
+ return d != NULL ? d : dom0;
+}
+
+void send_global_virq(int virq)
+{
+ ASSERT(virq >= 0 && virq < NR_VIRQS);
+ ASSERT(virq_is_global(virq));
+
+ send_guest_global_virq(_get_global_virq_handler(virq), virq);
+}
+
+int set_global_virq_handler(struct domain *d, int virq)
+{
+ struct domain *old;
+
+ if (virq < 0 || virq >= NR_VIRQS)
+ return -EINVAL;
+ if (!virq_is_global(virq))
+ return -EINVAL;
+
+ if (global_virq_handlers[virq] == d)
+ return 0;
+
+ if (unlikely(!get_domain(d)))
+ return -EINVAL;
+
+ spin_lock(&global_virq_handlers_lock);
+
+ old = global_virq_handlers[virq];
+ global_virq_handlers[virq] = d;
+ if (old != NULL)
+ put_domain(old);
+ spin_unlock(&global_virq_handlers_lock);
+
+ return 0;
+}
+
+void clear_global_virq_handlers(struct domain *d)
+{
+ int virq;
+
+ spin_lock(&global_virq_handlers_lock);
+
+ for (virq = 0; virq < NR_VIRQS; virq++) {
+ if (global_virq_handlers[virq] == d) {
+ global_virq_handlers[virq] = NULL;
+ put_domain(d);
+ }
+ }
+
+ spin_unlock(&global_virq_handlers_lock);
+}

static long evtchn_status(evtchn_status_t *status)
{
diff --git a/xen/common/trace.c b/xen/common/trace.c
index 5772f24..58cbf39 100644
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -661,7 +661,7 @@ static inline void insert_lost_records(struct t_buf *buf)
*/
static void trace_notify_dom0(unsigned long unused)
{
- send_guest_global_virq(dom0, VIRQ_TBUF);
+ send_global_virq(VIRQ_TBUF);
}
static DECLARE_SOFTIRQ_TASKLET(trace_notify_dom0_tasklet,
trace_notify_dom0, 0);
diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c
index 8a4c684..79b266f 100644
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -287,7 +287,7 @@ static void __serial_rx(char c, struct cpu_user_regs *regs)
if ( (serial_rx_prod-serial_rx_cons) != SERIAL_RX_SIZE )
serial_rx_ring[SERIAL_RX_MASK(serial_rx_prod++)] = c;
/* Always notify the guest: prevents receive path from getting stuck. */
- send_guest_global_virq(dom0, VIRQ_CONSOLE);
+ send_global_virq(VIRQ_CONSOLE);
}

static void serial_rx(char c, struct cpu_user_regs *regs)
@@ -314,7 +314,7 @@ static void serial_rx(char c, struct cpu_user_regs *regs)

static void notify_dom0_con_ring(unsigned long unused)
{
- send_guest_global_virq(dom0, VIRQ_CON_RING);
+ send_global_virq(VIRQ_CON_RING);
}
static DECLARE_SOFTIRQ_TASKLET(notify_dom0_con_ring_tasklet,
notify_dom0_con_ring, 0);
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index c7640aa..b1eb425 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -813,6 +813,12 @@ struct xen_domctl_audit_p2m {
typedef struct xen_domctl_audit_p2m xen_domctl_audit_p2m_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_audit_p2m_t);

+struct xen_domctl_set_virq_handler {
+ uint32_t virq; /* IN */
+};
+typedef struct xen_domctl_set_virq_handler xen_domctl_set_virq_handler_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_virq_handler_t);
+
#if defined(__i386__) || defined(__x86_64__)
/* XEN_DOMCTL_setvcpuextstate */
/* XEN_DOMCTL_getvcpuextstate */
@@ -912,6 +918,7 @@ struct xen_domctl {
#define XEN_DOMCTL_getvcpuextstate 63
#define XEN_DOMCTL_set_access_required 64
#define XEN_DOMCTL_audit_p2m 65
+#define XEN_DOMCTL_set_virq_handler 71
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -966,6 +973,7 @@ struct xen_domctl {
#endif
struct xen_domctl_set_access_required access_required;
struct xen_domctl_audit_p2m audit_p2m;
+ struct xen_domctl_set_virq_handler set_virq_handler;
struct xen_domctl_gdbsx_memio gdbsx_guest_memio;
struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
struct xen_domctl_gdbsx_domstatus gdbsx_domstatus;
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 7e5ad7b..2df65a0 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -24,11 +24,23 @@
void send_guest_vcpu_virq(struct vcpu *v, int virq);

/*
- * send_guest_global_virq: Notify guest via a global VIRQ.
- * @d: Domain to which virtual IRQ should be sent
+ * send_global_virq: Notify the domain handling a global VIRQ.
* @virq: Virtual IRQ number (VIRQ_*)
*/
-void send_guest_global_virq(struct domain *d, int virq);
+void send_global_virq(int virq);
+
+/*
+ * sent_global_virq_handler: Set a global VIRQ handler.
+ * @d: New target domain for this VIRQ
+ * @virq: Virtual IRQ number (VIRQ_*), must be global
+ */
+int set_global_virq_handler(struct domain *d, int virq);
+
+/*
+ * clear_global_virq_handlers: Remove a domain as a handler for global VIRQs.
+ * @d: Domain to no longer handle global virtual IRQs
+ */
+void clear_global_virq_handlers(struct domain *d);

/*
* send_guest_pirq:
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 566c808..c89c6ed 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -64,6 +64,7 @@ struct xsm_operations {
int (*domain_settime) (struct domain *d);
int (*set_target) (struct domain *d, struct domain *e);
int (*domctl) (struct domain *d, int cmd);
+ int (*set_virq_handler) (struct domain *d, int virq);
int (*tbufcontrol) (void);
int (*readconsole) (uint32_t clear);
int (*sched_id) (void);
@@ -265,6 +266,11 @@ static inline int xsm_domctl (struct domain *d, int cmd)
return xsm_call(domctl(d, cmd));
}

+static inline int xsm_set_virq_handler (struct domain *d, int virq)
+{
+ return xsm_call(set_virq_handler(d, virq));
+}
+
static inline int xsm_tbufcontrol (void)
{
return xsm_call(tbufcontrol());
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 65daa4e..59db86d 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -94,6 +94,11 @@ static int dummy_domctl(struct domain *d, int cmd)
return 0;
}

+static int dummy_set_virq_handler(struct domain *d, int virq)
+{
+ return 0;
+}
+
static int dummy_tbufcontrol (void)
{
return 0;
@@ -596,6 +601,7 @@ void xsm_fixup_ops (struct xsm_operations *ops)
set_to_dummy_if_null(ops, domain_settime);
set_to_dummy_if_null(ops, set_target);
set_to_dummy_if_null(ops, domctl);
+ set_to_dummy_if_null(ops, set_virq_handler);
set_to_dummy_if_null(ops, tbufcontrol);
set_to_dummy_if_null(ops, readconsole);
set_to_dummy_if_null(ops, sched_id);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index a2020a9..a1feb8d 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -597,6 +597,11 @@ static int flask_domctl(struct domain *d, int cmd)
return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__SET_MISC_INFO);
}

+static int flask_set_virq_handler(struct domain *d, int virq)
+{
+ return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__SET_VIRQ_HANDLER);
+}
+
static int flask_tbufcontrol(void)
{
return domain_has_xen(current->domain, XEN__TBUFCONTROL);
@@ -1460,6 +1465,7 @@ static struct xsm_operations flask_ops = {
.domain_settime = flask_domain_settime,
.set_target = flask_set_target,
.domctl = flask_domctl,
+ .set_virq_handler = flask_set_virq_handler,
.tbufcontrol = flask_tbufcontrol,
.readconsole = flask_readconsole,
.sched_id = flask_sched_id,
diff --git a/xen/xsm/flask/include/av_perm_to_string.h b/xen/xsm/flask/include/av_perm_to_string.h
index 85cbffc..17a1c36 100644
--- a/xen/xsm/flask/include/av_perm_to_string.h
+++ b/xen/xsm/flask/include/av_perm_to_string.h
@@ -60,6 +60,7 @@
S_(SECCLASS_DOMAIN, DOMAIN__GETPODTARGET, "getpodtarget")
S_(SECCLASS_DOMAIN, DOMAIN__SETPODTARGET, "setpodtarget")
S_(SECCLASS_DOMAIN, DOMAIN__SET_MISC_INFO, "set_misc_info")
+ S_(SECCLASS_DOMAIN, DOMAIN__SET_VIRQ_HANDLER, "set_virq_handler")
S_(SECCLASS_HVM, HVM__SETHVMC, "sethvmc")
S_(SECCLASS_HVM, HVM__GETHVMC, "gethvmc")
S_(SECCLASS_HVM, HVM__SETPARAM, "setparam")
diff --git a/xen/xsm/flask/include/av_permissions.h b/xen/xsm/flask/include/av_permissions.h
index 9e55a86..42eaf81 100644
--- a/xen/xsm/flask/include/av_permissions.h
+++ b/xen/xsm/flask/include/av_permissions.h
@@ -61,6 +61,7 @@
#define DOMAIN__GETPODTARGET 0x10000000UL
#define DOMAIN__SETPODTARGET 0x20000000UL
#define DOMAIN__SET_MISC_INFO 0x40000000UL
+#define DOMAIN__SET_VIRQ_HANDLER 0x80000000UL

#define HVM__SETHVMC 0x00000001UL
#define HVM__GETHVMC 0x00000002UL
--
1.7.7.5


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH 02/18] xen: allow global VIRQ handlers to be delegated to other domains [ In reply to ]
>>> On 11.01.12 at 18:21, Daniel De Graaf <dgdegra@tycho.nsa.gov> wrote:
> @@ -659,6 +659,8 @@ static void complete_domain_destroy(struct rcu_head *head)
>
> watchdog_domain_destroy(d);
>
> + clear_global_virq_handlers(d);

This is too late, I'm afraid. The domain can't possibly service the vIRQ(s)
anymore as soon as its destruction begins.

> +
> rangeset_domain_destroy(d);
>
> cpupool_rm_domain(d);
> @@ -739,6 +739,67 @@ int send_guest_pirq(struct domain *d, const struct pirq *pirq)
> return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
> }
>
> +static struct domain* global_virq_handlers[NR_VIRQS];

__read_mostly?

Also, the formatting is wrong (also elsewhere in the patch) - the
space should be before the star, not after.

> +
> +spinlock_t global_virq_handlers_lock = SPIN_LOCK_UNLOCKED;

static? DEFINE_SPINLOCK()?

> +
> +static struct domain* _get_global_virq_handler(int virq)
> +{
> + struct domain *d;
> +
> + d = global_virq_handlers[virq];
> + return d != NULL ? d : dom0;

This can be done in a single line:

return global_virq_handlers[virq] ?: dom0;

> +}
> +
> +void send_global_virq(int virq)
> +{
> + ASSERT(virq >= 0 && virq < NR_VIRQS);
> + ASSERT(virq_is_global(virq));
> +
> + send_guest_global_virq(_get_global_virq_handler(virq), virq);
> +}
> +
> +int set_global_virq_handler(struct domain *d, int virq)

In the domctl interface structure the virq (correctly) is uint32_t,
so why is it (signed) int here (and elsewhere)?

> +{
> + struct domain *old;
> +
> + if (virq < 0 || virq >= NR_VIRQS)

The < 0 part would then become unnecessary.

> + return -EINVAL;
> + if (!virq_is_global(virq))
> + return -EINVAL;
> +
> + if (global_virq_handlers[virq] == d)
> + return 0;
> +
> + if (unlikely(!get_domain(d)))
> + return -EINVAL;
> +
> + spin_lock(&global_virq_handlers_lock);
> +
> + old = global_virq_handlers[virq];
> + global_virq_handlers[virq] = d;
> + if (old != NULL)
> + put_domain(old);

This should happen outside the lock.

> + spin_unlock(&global_virq_handlers_lock);
> +
> + return 0;
> +}
> +
> +void clear_global_virq_handlers(struct domain *d)
> +{
> + int virq;
> +
> + spin_lock(&global_virq_handlers_lock);
> +
> + for (virq = 0; virq < NR_VIRQS; virq++) {
> + if (global_virq_handlers[virq] == d) {
> + global_virq_handlers[virq] = NULL;
> + put_domain(d);

Same here (albeit resulting in some code growth).

> + }
> + }
> +
> + spin_unlock(&global_virq_handlers_lock);
> +}
>
> static long evtchn_status(evtchn_status_t *status)
> {
> @@ -912,6 +918,7 @@ struct xen_domctl {
> #define XEN_DOMCTL_getvcpuextstate 63
> #define XEN_DOMCTL_set_access_required 64
> #define XEN_DOMCTL_audit_p2m 65
> +#define XEN_DOMCTL_set_virq_handler 71

Any reason for picking a non-contiguous value here?

> #define XEN_DOMCTL_gdbsx_guestmemio 1000
> #define XEN_DOMCTL_gdbsx_pausevcpu 1001
> #define XEN_DOMCTL_gdbsx_unpausevcpu 1002

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
[PATCH 02/18] xen: allow global VIRQ handlers to be delegated to other domains [ In reply to ]
This patch sends global VIRQs to a domain designated as the VIRQ handler
instead of sending all global VIRQ events to dom0. This is required in
order to run xenstored in a stubdom, because VIRQ_DOM_EXC must be sent
to xenstored for domain destruction to work properly.

This patch was inspired by the xenstored stubdomain patch series sent to
xen-devel by Alex Zeffertt in 2009.

Signed-off-by: Diego Ongaro <diego.ongaro@citrix.com>
Signed-off-by: Alex Zeffertt <alex.zeffertt@eu.citrix.com>
Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
---
tools/flask/policy/policy/flask/access_vectors | 1 +
tools/libxc/xc_domain.c | 10 ++++
tools/libxc/xenctrl.h | 9 +++
xen/arch/x86/cpu/mcheck/amd_nonfatal.c | 2 +-
xen/arch/x86/cpu/mcheck/mce.c | 2 +-
xen/arch/x86/cpu/mcheck/mce_intel.c | 6 +-
xen/arch/x86/cpu/mcheck/non-fatal.c | 2 +-
xen/common/cpu.c | 4 +-
xen/common/domain.c | 8 ++--
xen/common/domctl.c | 17 ++++++
xen/common/event_channel.c | 63 +++++++++++++++++++++++-
xen/common/trace.c | 2 +-
xen/drivers/char/console.c | 4 +-
xen/include/public/domctl.h | 8 +++
xen/include/xen/event.h | 12 +++-
xen/include/xsm/xsm.h | 6 ++
xen/xsm/dummy.c | 6 ++
xen/xsm/flask/hooks.c | 6 ++
xen/xsm/flask/include/av_perm_to_string.h | 1 +
xen/xsm/flask/include/av_permissions.h | 1 +
20 files changed, 151 insertions(+), 19 deletions(-)

diff --git a/tools/flask/policy/policy/flask/access_vectors b/tools/flask/policy/policy/flask/access_vectors
index 644f2e1..5901911 100644
--- a/tools/flask/policy/policy/flask/access_vectors
+++ b/tools/flask/policy/policy/flask/access_vectors
@@ -85,6 +85,7 @@ class domain
getpodtarget
setpodtarget
set_misc_info
+ set_virq_handler
}

class hvm
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index ab019b8..d98e68b 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1504,6 +1504,16 @@ int xc_domain_set_access_required(xc_interface *xch,
return do_domctl(xch, &domctl);
}

+int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_set_virq_handler;
+ domctl.domain = domid;
+ domctl.u.set_virq_handler.virq = virq;
+ return do_domctl(xch, &domctl);
+}
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 8b34769..8f3426f 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -747,6 +747,15 @@ int xc_domain_p2m_audit(xc_interface *xch,
int xc_domain_set_access_required(xc_interface *xch,
uint32_t domid,
unsigned int required);
+/**
+ * This function sets the handler of global VIRQs sent by the hypervisor
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id which will handle the VIRQ
+ * @parm virq the virq number (VIRQ_*)
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq);

/*
* CPUPOOL MANAGEMENT FUNCTIONS
diff --git a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
index 50288bd..9222098 100644
--- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
+++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
@@ -100,7 +100,7 @@ static void mce_amd_checkregs(void *info)

if (dom0_vmce_enabled()) {
mctelem_commit(mctc);
- send_guest_global_virq(dom0, VIRQ_MCA);
+ send_global_virq(VIRQ_MCA);
} else if (++dumpcount >= 10) {
x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc));
mctelem_dismiss(mctc);
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index b592041..c4e4477 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -594,7 +594,7 @@ void mcheck_cmn_handler(struct cpu_user_regs *regs, long error_code,
if (dom0_vmce_enabled()) {
if (mctc != NULL)
mctelem_commit(mctc);
- send_guest_global_virq(dom0, VIRQ_MCA);
+ send_global_virq(VIRQ_MCA);
} else {
x86_mcinfo_dump(mci);
if (mctc != NULL)
diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c
index 0986025..0894080 100644
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -354,7 +354,7 @@ static void mce_softirq(void)
/* Step2: Send Log to DOM0 through vIRQ */
if (dom0_vmce_enabled()) {
mce_printk(MCE_VERBOSE, "MCE: send MCE# to DOM0 through virq\n");
- send_guest_global_virq(dom0, VIRQ_MCA);
+ send_global_virq(VIRQ_MCA);
}
}

@@ -1085,7 +1085,7 @@ static void cmci_discover(void)
if (bs.errcnt && mctc != NULL) {
if (dom0_vmce_enabled()) {
mctelem_commit(mctc);
- send_guest_global_virq(dom0, VIRQ_MCA);
+ send_global_virq(VIRQ_MCA);
} else {
x86_mcinfo_dump(mctelem_dataptr(mctc));
mctelem_dismiss(mctc);
@@ -1205,7 +1205,7 @@ fastcall void smp_cmci_interrupt(struct cpu_user_regs *regs)
if (dom0_vmce_enabled()) {
mctelem_commit(mctc);
mce_printk(MCE_VERBOSE, "CMCI: send CMCI to DOM0 through virq\n");
- send_guest_global_virq(dom0, VIRQ_MCA);
+ send_global_virq(VIRQ_MCA);
} else {
x86_mcinfo_dump(mctelem_dataptr(mctc));
mctelem_dismiss(mctc);
diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c b/xen/arch/x86/cpu/mcheck/non-fatal.c
index c57688f..1dded9b 100644
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c
@@ -55,7 +55,7 @@ static void mce_checkregs (void *info)

if (dom0_vmce_enabled()) {
mctelem_commit(mctc);
- send_guest_global_virq(dom0, VIRQ_MCA);
+ send_global_virq(VIRQ_MCA);
} else if (++dumpcount >= 10) {
x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc));
mctelem_dismiss(mctc);
diff --git a/xen/common/cpu.c b/xen/common/cpu.c
index 79abdb7..630881e 100644
--- a/xen/common/cpu.c
+++ b/xen/common/cpu.c
@@ -108,7 +108,7 @@ int cpu_down(unsigned int cpu)
notifier_rc = notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu, NULL);
BUG_ON(notifier_rc != NOTIFY_DONE);

- send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
+ send_global_virq(VIRQ_PCPU_STATE);
cpu_hotplug_done();
return 0;

@@ -148,7 +148,7 @@ int cpu_up(unsigned int cpu)
notifier_rc = notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu, NULL);
BUG_ON(notifier_rc != NOTIFY_DONE);

- send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
+ send_global_virq(VIRQ_PCPU_STATE);

cpu_hotplug_done();
return 0;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 52a63ef..f1a7ede 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -116,7 +116,7 @@ static void __domain_finalise_shutdown(struct domain *d)
if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn )
evtchn_send(d, d->suspend_evtchn);
else
- send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+ send_global_virq(VIRQ_DOM_EXC);
}

static void vcpu_check_shutdown(struct vcpu *v)
@@ -492,7 +492,7 @@ int domain_kill(struct domain *d)
}
d->is_dying = DOMDYING_dead;
put_domain(d);
- send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+ send_global_virq(VIRQ_DOM_EXC);
/* fallthrough */
case DOMDYING_dead:
break;
@@ -633,7 +633,7 @@ void domain_pause_for_debugger(void)
for_each_vcpu ( d, v )
vcpu_sleep_nosync(v);

- send_guest_global_virq(dom0, VIRQ_DEBUGGER);
+ send_global_virq(VIRQ_DEBUGGER);
}

/* Complete domain destroy after RCU readers are not holding old references. */
@@ -690,7 +690,7 @@ static void complete_domain_destroy(struct rcu_head *head)
free_cpumask_var(d->domain_dirty_cpumask);
free_domain_struct(d);

- send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+ send_global_virq(VIRQ_DOM_EXC);
}

/* Release resources belonging to task @p. */
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index d6ae09b..97c7d53 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -994,6 +994,23 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
}
break;

+ case XEN_DOMCTL_set_virq_handler:
+ {
+ struct domain *d;
+ uint32_t virq = op->u.set_virq_handler.virq;
+
+ ret = -ESRCH;
+ d = rcu_lock_domain_by_id(op->domain);
+ if ( d != NULL )
+ {
+ ret = xsm_set_virq_handler(d, virq);
+ if ( !ret )
+ ret = set_global_virq_handler(d, virq);
+ rcu_unlock_domain(d);
+ }
+ }
+ break;
+
default:
ret = arch_do_domctl(op, u_domctl);
break;
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 9212042..e507481 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -689,7 +689,7 @@ void send_guest_vcpu_virq(struct vcpu *v, int virq)
spin_unlock_irqrestore(&v->virq_lock, flags);
}

-void send_guest_global_virq(struct domain *d, int virq)
+static void send_guest_global_virq(struct domain *d, int virq)
{
unsigned long flags;
int port;
@@ -739,6 +739,65 @@ int send_guest_pirq(struct domain *d, const struct pirq *pirq)
return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
}

+static struct domain *global_virq_handlers[NR_VIRQS] __read_mostly;
+
+static DEFINE_SPINLOCK(global_virq_handlers_lock);
+
+void send_global_virq(uint32_t virq)
+{
+ ASSERT(virq < NR_VIRQS);
+ ASSERT(virq_is_global(virq));
+
+ send_guest_global_virq(global_virq_handlers[virq] ?: dom0, virq);
+}
+
+int set_global_virq_handler(struct domain *d, uint32_t virq)
+{
+ struct domain *old;
+
+ if (virq >= NR_VIRQS)
+ return -EINVAL;
+ if (!virq_is_global(virq))
+ return -EINVAL;
+
+ if (global_virq_handlers[virq] == d)
+ return 0;
+
+ if (unlikely(!get_domain(d)))
+ return -EINVAL;
+
+ spin_lock(&global_virq_handlers_lock);
+ old = global_virq_handlers[virq];
+ global_virq_handlers[virq] = d;
+ spin_unlock(&global_virq_handlers_lock);
+
+ if (old != NULL)
+ put_domain(old);
+
+ return 0;
+}
+
+static void clear_global_virq_handlers(struct domain *d)
+{
+ uint32_t virq;
+ int put_count = 0;
+
+ spin_lock(&global_virq_handlers_lock);
+
+ for (virq = 0; virq < NR_VIRQS; virq++) {
+ if (global_virq_handlers[virq] == d) {
+ global_virq_handlers[virq] = NULL;
+ put_count++;
+ }
+ }
+
+ spin_unlock(&global_virq_handlers_lock);
+
+ while (put_count) {
+ put_domain(d);
+ put_count--;
+ }
+}

static long evtchn_status(evtchn_status_t *status)
{
@@ -1160,6 +1219,8 @@ void evtchn_destroy(struct domain *d)
d->evtchn[i] = NULL;
}
spin_unlock(&d->event_lock);
+
+ clear_global_virq_handlers(d);
}


diff --git a/xen/common/trace.c b/xen/common/trace.c
index 5772f24..58cbf39 100644
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -661,7 +661,7 @@ static inline void insert_lost_records(struct t_buf *buf)
*/
static void trace_notify_dom0(unsigned long unused)
{
- send_guest_global_virq(dom0, VIRQ_TBUF);
+ send_global_virq(VIRQ_TBUF);
}
static DECLARE_SOFTIRQ_TASKLET(trace_notify_dom0_tasklet,
trace_notify_dom0, 0);
diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c
index 8a4c684..79b266f 100644
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -287,7 +287,7 @@ static void __serial_rx(char c, struct cpu_user_regs *regs)
if ( (serial_rx_prod-serial_rx_cons) != SERIAL_RX_SIZE )
serial_rx_ring[SERIAL_RX_MASK(serial_rx_prod++)] = c;
/* Always notify the guest: prevents receive path from getting stuck. */
- send_guest_global_virq(dom0, VIRQ_CONSOLE);
+ send_global_virq(VIRQ_CONSOLE);
}

static void serial_rx(char c, struct cpu_user_regs *regs)
@@ -314,7 +314,7 @@ static void serial_rx(char c, struct cpu_user_regs *regs)

static void notify_dom0_con_ring(unsigned long unused)
{
- send_guest_global_virq(dom0, VIRQ_CON_RING);
+ send_global_virq(VIRQ_CON_RING);
}
static DECLARE_SOFTIRQ_TASKLET(notify_dom0_con_ring_tasklet,
notify_dom0_con_ring, 0);
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index c7640aa..75be370 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -813,6 +813,12 @@ struct xen_domctl_audit_p2m {
typedef struct xen_domctl_audit_p2m xen_domctl_audit_p2m_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_audit_p2m_t);

+struct xen_domctl_set_virq_handler {
+ uint32_t virq; /* IN */
+};
+typedef struct xen_domctl_set_virq_handler xen_domctl_set_virq_handler_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_virq_handler_t);
+
#if defined(__i386__) || defined(__x86_64__)
/* XEN_DOMCTL_setvcpuextstate */
/* XEN_DOMCTL_getvcpuextstate */
@@ -912,6 +918,7 @@ struct xen_domctl {
#define XEN_DOMCTL_getvcpuextstate 63
#define XEN_DOMCTL_set_access_required 64
#define XEN_DOMCTL_audit_p2m 65
+#define XEN_DOMCTL_set_virq_handler 66
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -966,6 +973,7 @@ struct xen_domctl {
#endif
struct xen_domctl_set_access_required access_required;
struct xen_domctl_audit_p2m audit_p2m;
+ struct xen_domctl_set_virq_handler set_virq_handler;
struct xen_domctl_gdbsx_memio gdbsx_guest_memio;
struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
struct xen_domctl_gdbsx_domstatus gdbsx_domstatus;
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 7e5ad7b..d505ee9 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -24,11 +24,17 @@
void send_guest_vcpu_virq(struct vcpu *v, int virq);

/*
- * send_guest_global_virq: Notify guest via a global VIRQ.
- * @d: Domain to which virtual IRQ should be sent
+ * send_global_virq: Notify the domain handling a global VIRQ.
* @virq: Virtual IRQ number (VIRQ_*)
*/
-void send_guest_global_virq(struct domain *d, int virq);
+void send_global_virq(uint32_t virq);
+
+/*
+ * sent_global_virq_handler: Set a global VIRQ handler.
+ * @d: New target domain for this VIRQ
+ * @virq: Virtual IRQ number (VIRQ_*), must be global
+ */
+int set_global_virq_handler(struct domain *d, uint32_t virq);

/*
* send_guest_pirq:
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 566c808..c89c6ed 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -64,6 +64,7 @@ struct xsm_operations {
int (*domain_settime) (struct domain *d);
int (*set_target) (struct domain *d, struct domain *e);
int (*domctl) (struct domain *d, int cmd);
+ int (*set_virq_handler) (struct domain *d, int virq);
int (*tbufcontrol) (void);
int (*readconsole) (uint32_t clear);
int (*sched_id) (void);
@@ -265,6 +266,11 @@ static inline int xsm_domctl (struct domain *d, int cmd)
return xsm_call(domctl(d, cmd));
}

+static inline int xsm_set_virq_handler (struct domain *d, int virq)
+{
+ return xsm_call(set_virq_handler(d, virq));
+}
+
static inline int xsm_tbufcontrol (void)
{
return xsm_call(tbufcontrol());
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 65daa4e..59db86d 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -94,6 +94,11 @@ static int dummy_domctl(struct domain *d, int cmd)
return 0;
}

+static int dummy_set_virq_handler(struct domain *d, int virq)
+{
+ return 0;
+}
+
static int dummy_tbufcontrol (void)
{
return 0;
@@ -596,6 +601,7 @@ void xsm_fixup_ops (struct xsm_operations *ops)
set_to_dummy_if_null(ops, domain_settime);
set_to_dummy_if_null(ops, set_target);
set_to_dummy_if_null(ops, domctl);
+ set_to_dummy_if_null(ops, set_virq_handler);
set_to_dummy_if_null(ops, tbufcontrol);
set_to_dummy_if_null(ops, readconsole);
set_to_dummy_if_null(ops, sched_id);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index a2020a9..a1feb8d 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -597,6 +597,11 @@ static int flask_domctl(struct domain *d, int cmd)
return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__SET_MISC_INFO);
}

+static int flask_set_virq_handler(struct domain *d, int virq)
+{
+ return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__SET_VIRQ_HANDLER);
+}
+
static int flask_tbufcontrol(void)
{
return domain_has_xen(current->domain, XEN__TBUFCONTROL);
@@ -1460,6 +1465,7 @@ static struct xsm_operations flask_ops = {
.domain_settime = flask_domain_settime,
.set_target = flask_set_target,
.domctl = flask_domctl,
+ .set_virq_handler = flask_set_virq_handler,
.tbufcontrol = flask_tbufcontrol,
.readconsole = flask_readconsole,
.sched_id = flask_sched_id,
diff --git a/xen/xsm/flask/include/av_perm_to_string.h b/xen/xsm/flask/include/av_perm_to_string.h
index 85cbffc..17a1c36 100644
--- a/xen/xsm/flask/include/av_perm_to_string.h
+++ b/xen/xsm/flask/include/av_perm_to_string.h
@@ -60,6 +60,7 @@
S_(SECCLASS_DOMAIN, DOMAIN__GETPODTARGET, "getpodtarget")
S_(SECCLASS_DOMAIN, DOMAIN__SETPODTARGET, "setpodtarget")
S_(SECCLASS_DOMAIN, DOMAIN__SET_MISC_INFO, "set_misc_info")
+ S_(SECCLASS_DOMAIN, DOMAIN__SET_VIRQ_HANDLER, "set_virq_handler")
S_(SECCLASS_HVM, HVM__SETHVMC, "sethvmc")
S_(SECCLASS_HVM, HVM__GETHVMC, "gethvmc")
S_(SECCLASS_HVM, HVM__SETPARAM, "setparam")
diff --git a/xen/xsm/flask/include/av_permissions.h b/xen/xsm/flask/include/av_permissions.h
index 9e55a86..42eaf81 100644
--- a/xen/xsm/flask/include/av_permissions.h
+++ b/xen/xsm/flask/include/av_permissions.h
@@ -61,6 +61,7 @@
#define DOMAIN__GETPODTARGET 0x10000000UL
#define DOMAIN__SETPODTARGET 0x20000000UL
#define DOMAIN__SET_MISC_INFO 0x40000000UL
+#define DOMAIN__SET_VIRQ_HANDLER 0x80000000UL

#define HVM__SETHVMC 0x00000001UL
#define HVM__GETHVMC 0x00000002UL
--
1.7.7.5


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH 02/18] xen: allow global VIRQ handlers to be delegated to other domains [ In reply to ]
>>> On 13.01.12 at 00:35, Daniel De Graaf <dgdegra@tycho.nsa.gov> wrote:
> +int set_global_virq_handler(struct domain *d, uint32_t virq)
> +{
> + struct domain *old;
> +
> + if (virq >= NR_VIRQS)
> + return -EINVAL;
> + if (!virq_is_global(virq))
> + return -EINVAL;
> +
> + if (global_virq_handlers[virq] == d)
> + return 0;
> +
> + if (unlikely(!get_domain(d)))
> + return -EINVAL;
> +
> + spin_lock(&global_virq_handlers_lock);
> + old = global_virq_handlers[virq];
> + global_virq_handlers[virq] = d;
> + spin_unlock(&global_virq_handlers_lock);
> +
> + if (old != NULL)
> + put_domain(old);
> +
> + return 0;
> +}
> +
> +static void clear_global_virq_handlers(struct domain *d)
> +{
> + uint32_t virq;
> + int put_count = 0;
> +
> + spin_lock(&global_virq_handlers_lock);
> +
> + for (virq = 0; virq < NR_VIRQS; virq++) {
> + if (global_virq_handlers[virq] == d) {
> + global_virq_handlers[virq] = NULL;
> + put_count++;
> + }
> + }
> +
> + spin_unlock(&global_virq_handlers_lock);
> +
> + while (put_count) {
> + put_domain(d);
> + put_count--;
> + }
> +}

Formatting in this entire hunk should be changed to match that of the
rest of the file.

> --- a/xen/include/xsm/xsm.h
> +++ b/xen/include/xsm/xsm.h
> @@ -64,6 +64,7 @@ struct xsm_operations {
> int (*domain_settime) (struct domain *d);
> int (*set_target) (struct domain *d, struct domain *e);
> int (*domctl) (struct domain *d, int cmd);
> + int (*set_virq_handler) (struct domain *d, int virq);

Here and further down, the 'int' still survived.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH 02/18] xen: allow global VIRQ handlers to be delegated to other domains [ In reply to ]
On 01/13/2012 03:03 AM, Jan Beulich wrote:
>>>> On 13.01.12 at 00:35, Daniel De Graaf <dgdegra@tycho.nsa.gov> wrote:
>> +int set_global_virq_handler(struct domain *d, uint32_t virq)
>> +{
>> + struct domain *old;
>> +
>> + if (virq >= NR_VIRQS)
>> + return -EINVAL;
>> + if (!virq_is_global(virq))
>> + return -EINVAL;
>> +
>> + if (global_virq_handlers[virq] == d)
>> + return 0;
>> +
>> + if (unlikely(!get_domain(d)))
>> + return -EINVAL;
>> +
>> + spin_lock(&global_virq_handlers_lock);
>> + old = global_virq_handlers[virq];
>> + global_virq_handlers[virq] = d;
>> + spin_unlock(&global_virq_handlers_lock);
>> +
>> + if (old != NULL)
>> + put_domain(old);
>> +
>> + return 0;
>> +}
>> +
>> +static void clear_global_virq_handlers(struct domain *d)
>> +{
>> + uint32_t virq;
>> + int put_count = 0;
>> +
>> + spin_lock(&global_virq_handlers_lock);
>> +
>> + for (virq = 0; virq < NR_VIRQS; virq++) {
>> + if (global_virq_handlers[virq] == d) {
>> + global_virq_handlers[virq] = NULL;
>> + put_count++;
>> + }
>> + }
>> +
>> + spin_unlock(&global_virq_handlers_lock);
>> +
>> + while (put_count) {
>> + put_domain(d);
>> + put_count--;
>> + }
>> +}
>
> Formatting in this entire hunk should be changed to match that of the
> rest of the file.
>
>> --- a/xen/include/xsm/xsm.h
>> +++ b/xen/include/xsm/xsm.h
>> @@ -64,6 +64,7 @@ struct xsm_operations {
>> int (*domain_settime) (struct domain *d);
>> int (*set_target) (struct domain *d, struct domain *e);
>> int (*domctl) (struct domain *d, int cmd);
>> + int (*set_virq_handler) (struct domain *d, int virq);
>
> Here and further down, the 'int' still survived.
>
> Jan
>

Much of the existing code handling virqs uses int; should I also change
these instances to uint32_t?

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH 02/18] xen: allow global VIRQ handlers to be delegated to other domains [ In reply to ]
>>> On 13.01.12 at 14:58, Daniel De Graaf <dgdegra@tycho.nsa.gov> wrote:
> On 01/13/2012 03:03 AM, Jan Beulich wrote:
>>>>> On 13.01.12 at 00:35, Daniel De Graaf <dgdegra@tycho.nsa.gov> wrote:
>>> +int set_global_virq_handler(struct domain *d, uint32_t virq)
>>> +{
>>> + struct domain *old;
>>> +
>>> + if (virq >= NR_VIRQS)
>>> + return -EINVAL;
>>> + if (!virq_is_global(virq))
>>> + return -EINVAL;
>>> +
>>> + if (global_virq_handlers[virq] == d)
>>> + return 0;
>>> +
>>> + if (unlikely(!get_domain(d)))
>>> + return -EINVAL;
>>> +
>>> + spin_lock(&global_virq_handlers_lock);
>>> + old = global_virq_handlers[virq];
>>> + global_virq_handlers[virq] = d;
>>> + spin_unlock(&global_virq_handlers_lock);
>>> +
>>> + if (old != NULL)
>>> + put_domain(old);
>>> +
>>> + return 0;
>>> +}
>>> +
>>> +static void clear_global_virq_handlers(struct domain *d)
>>> +{
>>> + uint32_t virq;
>>> + int put_count = 0;
>>> +
>>> + spin_lock(&global_virq_handlers_lock);
>>> +
>>> + for (virq = 0; virq < NR_VIRQS; virq++) {
>>> + if (global_virq_handlers[virq] == d) {
>>> + global_virq_handlers[virq] = NULL;
>>> + put_count++;
>>> + }
>>> + }
>>> +
>>> + spin_unlock(&global_virq_handlers_lock);
>>> +
>>> + while (put_count) {
>>> + put_domain(d);
>>> + put_count--;
>>> + }
>>> +}
>>
>> Formatting in this entire hunk should be changed to match that of the
>> rest of the file.
>>
>>> --- a/xen/include/xsm/xsm.h
>>> +++ b/xen/include/xsm/xsm.h
>>> @@ -64,6 +64,7 @@ struct xsm_operations {
>>> int (*domain_settime) (struct domain *d);
>>> int (*set_target) (struct domain *d, struct domain *e);
>>> int (*domctl) (struct domain *d, int cmd);
>>> + int (*set_virq_handler) (struct domain *d, int virq);
>>
>> Here and further down, the 'int' still survived.
>>
>> Jan
>>
>
> Much of the existing code handling virqs uses int; should I also change
> these instances to uint32_t?

That would be nice (if you do, making this a separate patch would be
desirable). Here I'm just asking to not repeat the mistake.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH 02/18] xen: allow global VIRQ handlers to be delegated to other domains [ In reply to ]
On Thu, 2012-01-12 at 23:35 +0000, Daniel De Graaf wrote:
>
> +static void clear_global_virq_handlers(struct domain *d)
> +{
> + uint32_t virq;
> + int put_count = 0;
> +
> + spin_lock(&global_virq_handlers_lock);
> +
> + for (virq = 0; virq < NR_VIRQS; virq++) {
> + if (global_virq_handlers[virq] == d) {
> + global_virq_handlers[virq] = NULL;

I don't suppose we should rebind to dom0, should we?

Seems like we are pretty hosed if this ever happens in a non-controlled
manner anyway...

> + put_count++;
> + }
> + }
> +
> + spin_unlock(&global_virq_handlers_lock);
> +
> + while (put_count) {
> + put_domain(d);
> + put_count--;
> + }
> +}


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH 02/18] xen: allow global VIRQ handlers to be delegated to other domains [ In reply to ]
>>> On 18.01.12 at 11:39, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> On Thu, 2012-01-12 at 23:35 +0000, Daniel De Graaf wrote:
>>
>> +static void clear_global_virq_handlers(struct domain *d)
>> +{
>> + uint32_t virq;
>> + int put_count = 0;
>> +
>> + spin_lock(&global_virq_handlers_lock);
>> +
>> + for (virq = 0; virq < NR_VIRQS; virq++) {
>> + if (global_virq_handlers[virq] == d) {
>> + global_virq_handlers[virq] = NULL;
>
> I don't suppose we should rebind to dom0, should we?

Storing NULL here effectively means re-binding to Dom0.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH 02/18] xen: allow global VIRQ handlers to be delegated to other domains [ In reply to ]
On Wed, 2012-01-18 at 11:28 +0000, Jan Beulich wrote:
> >>> On 18.01.12 at 11:39, Ian Campbell <Ian.Campbell@citrix.com> wrote:
> > On Thu, 2012-01-12 at 23:35 +0000, Daniel De Graaf wrote:
> >>
> >> +static void clear_global_virq_handlers(struct domain *d)
> >> +{
> >> + uint32_t virq;
> >> + int put_count = 0;
> >> +
> >> + spin_lock(&global_virq_handlers_lock);
> >> +
> >> + for (virq = 0; virq < NR_VIRQS; virq++) {
> >> + if (global_virq_handlers[virq] == d) {
> >> + global_virq_handlers[virq] = NULL;
> >
> > I don't suppose we should rebind to dom0, should we?
>
> Storing NULL here effectively means re-binding to Dom0.

Oh, good, thanks. In that case:
Acked-by: Ian Campbell <ian.campbell@citrix.com>

Ian.

>
> Jan
>



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel