Mailing List Archive

[linux-2.6.18-xen] linux/x86: use shared page indicating the need for an EOI notification
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1227879027 0
# Node ID d545a95fca739d0b1963b73a9eb64ea64a244e76
# Parent 2268be46c75ec6eddb7cd387af8a236a565f6140
linux/x86: use shared page indicating the need for an EOI notification

Signed-off-by: Jan Beulich <jbeulich@novell.com>
---
drivers/xen/core/evtchn.c | 64 ++++++++++++++++++++++++++++++++--------
include/xen/interface/physdev.h | 15 +++++++++
2 files changed, 67 insertions(+), 12 deletions(-)

diff -r 2268be46c75e -r d545a95fca73 drivers/xen/core/evtchn.c
--- a/drivers/xen/core/evtchn.c Fri Nov 28 13:07:36 2008 +0000
+++ b/drivers/xen/core/evtchn.c Fri Nov 28 13:30:27 2008 +0000
@@ -123,9 +123,6 @@ DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS])
/* Reference counts for bindings to IRQs. */
static int irq_bindcount[NR_IRQS];

-/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
-static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
-
#ifdef CONFIG_SMP

static u8 cpu_evtchn[NR_EVENT_CHANNELS];
@@ -756,16 +753,48 @@ static struct hw_interrupt_type dynirq_t
.retrigger = resend_irq_on_evtchn,
};

-static inline void pirq_unmask_notify(int irq)
+/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
+static bool pirq_eoi_does_unmask;
+static DECLARE_BITMAP(pirq_needs_eoi, ALIGN(NR_PIRQS, PAGE_SIZE * 8))
+ __attribute__ ((__section__(".bss.page_aligned"), __aligned__(PAGE_SIZE)));
+
+static void pirq_unmask_and_notify(unsigned int evtchn, unsigned int irq)
{
struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
- if (unlikely(test_bit(irq - PIRQ_BASE, pirq_needs_eoi)))
- VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
+
+ if (pirq_eoi_does_unmask) {
+ if (test_bit(eoi.irq, pirq_needs_eoi))
+ VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
+ else
+ unmask_evtchn(evtchn);
+ } else if (test_bit(irq - PIRQ_BASE, pirq_needs_eoi)) {
+ if (smp_processor_id() != cpu_from_evtchn(evtchn)) {
+ struct evtchn_unmask unmask = { .port = evtchn };
+ struct multicall_entry mcl[2];
+
+ mcl[0].op = __HYPERVISOR_event_channel_op;
+ mcl[0].args[0] = EVTCHNOP_unmask;
+ mcl[0].args[1] = (unsigned long)&unmask;
+ mcl[1].op = __HYPERVISOR_physdev_op;
+ mcl[1].args[0] = PHYSDEVOP_eoi;
+ mcl[1].args[1] = (unsigned long)&eoi;
+
+ if (HYPERVISOR_multicall(mcl, 2))
+ BUG();
+ } else {
+ unmask_evtchn(evtchn);
+ VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
+ }
+ } else
+ unmask_evtchn(evtchn);
}

static inline void pirq_query_unmask(int irq)
{
struct physdev_irq_status_query irq_status;
+
+ if (pirq_eoi_does_unmask)
+ return;
irq_status.irq = evtchn_get_xen_pirq(irq);
if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
irq_status.flags = 0;
@@ -806,8 +835,7 @@ static unsigned int startup_pirq(unsigne
irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);

out:
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq);
+ pirq_unmask_and_notify(evtchn, irq);

return 0;
}
@@ -859,10 +887,8 @@ static void end_pirq(unsigned int irq)
if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) ==
(IRQ_DISABLED|IRQ_PENDING)) {
shutdown_pirq(irq);
- } else if (VALID_EVTCHN(evtchn)) {
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq);
- }
+ } else if (VALID_EVTCHN(evtchn))
+ pirq_unmask_and_notify(evtchn, irq);
}

static struct hw_interrupt_type pirq_type = {
@@ -1011,6 +1037,14 @@ void irq_resume(void)
unsigned int cpu, irq, evtchn;

init_evtchn_cpu_bindings();
+
+ if (pirq_eoi_does_unmask) {
+ struct physdev_pirq_eoi_mfn eoi_mfn;
+
+ eoi_mfn.mfn = virt_to_bus(pirq_needs_eoi) >> PAGE_SHIFT;
+ if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_mfn, &eoi_mfn))
+ BUG();
+ }

/* New event-channel space is not 'live' yet. */
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
@@ -1098,8 +1132,14 @@ void __init xen_init_IRQ(void)
void __init xen_init_IRQ(void)
{
unsigned int i;
+ struct physdev_pirq_eoi_mfn eoi_mfn;

init_evtchn_cpu_bindings();
+
+ BUG_ON(!bitmap_empty(pirq_needs_eoi, PAGE_SIZE * 8));
+ eoi_mfn.mfn = virt_to_bus(pirq_needs_eoi) >> PAGE_SHIFT;
+ if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_mfn, &eoi_mfn) == 0)
+ pirq_eoi_does_unmask = true;

/* No event channels are 'live' right now. */
for (i = 0; i < NR_EVENT_CHANNELS; i++)
diff -r 2268be46c75e -r d545a95fca73 include/xen/interface/physdev.h
--- a/include/xen/interface/physdev.h Fri Nov 28 13:07:36 2008 +0000
+++ b/include/xen/interface/physdev.h Fri Nov 28 13:30:27 2008 +0000
@@ -41,6 +41,21 @@ DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);

/*
+ * Register a shared page for the hypervisor to indicate whether the guest
+ * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly
+ * once the guest used this function in that the associated event channel
+ * will automatically get unmasked. The page registered is used as a bit
+ * array indexed by Xen's PIRQ value.
+ */
+#define PHYSDEVOP_pirq_eoi_mfn 17
+struct physdev_pirq_eoi_mfn {
+ /* IN */
+ xen_pfn_t mfn;
+};
+typedef struct physdev_pirq_eoi_mfn physdev_pirq_eoi_mfn_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_mfn_t);
+
+/*
* Query the status of an IRQ line.
* @arg == pointer to physdev_irq_status_query structure.
*/

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog