Mailing List Archive

[PATCH 14/14] arm: implement basic interrupt handling mechanism.
arm: implement basic interrupt handling mechanism.

xen/arch/arm/tegra/tegra250.c | 9 +++-
xen/arch/arm/xen/cpu.c | 4 +-
xen/arch/arm/xen/irq.c | 201 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---
3 files changed, 204 insertions(+), 10 deletions(-)

Signed-off-by: Jaemin Ryu <jm77.ryu@samsung.com>

diff -r 3f1e64a8f61a xen/arch/arm/tegra/tegra250.c
--- a/xen/arch/arm/tegra/tegra250.c Sun Feb 12 15:49:12 2012 +0900
+++ b/xen/arch/arm/tegra/tegra250.c Sun Feb 12 16:12:38 2012 +0900
@@ -245,13 +245,20 @@ asm(
" .long tegra250_core_map \n"
);

+void machine_trigger_cpus(unsigned long cpu_map, unsigned int event)
+{
+ mmio_writel(cpu_map << 16 | event, tegra_gic_dist_base + _ICDSGIR);
+}
+
int wakeup_cpu(unsigned int cpu)
{
tegra250_core_map |= 1 << cpu;

cpu_flush_cache_all();

- return 0;
+ machine_trigger_cpus(tegra250_core_map, 1);
+
+ return 1;
}

extern void tegra250_slave_cpu_start(void);
diff -r 3f1e64a8f61a xen/arch/arm/xen/cpu.c
--- a/xen/arch/arm/xen/cpu.c Sun Feb 12 15:49:12 2012 +0900
+++ b/xen/arch/arm/xen/cpu.c Sun Feb 12 16:12:38 2012 +0900
@@ -116,6 +116,8 @@ asmlinkage void start_xen_on_slave_cpu(v
v = idle_vcpu[cpu];
set_current(idle_vcpu[cpu]);

+ VCPU_REG(v, ttbr0) = get_ttbr();
+
set_cpu_sibling_map(cpu);

notify_cpu_starting(cpu);
@@ -139,7 +141,7 @@ void smp_send_event_check_mask(const cpu
map |= 1 << cpu;
}

- /* Trigger remote CPU */
+ machine_trigger_cpus(map, 1);
}

void smp_call_function(void (*f)(void *param), void *param, int wait)
diff -r 3f1e64a8f61a xen/arch/arm/xen/irq.c
--- a/xen/arch/arm/xen/irq.c Sun Feb 12 15:49:12 2012 +0900
+++ b/xen/arch/arm/xen/irq.c Sun Feb 12 16:12:38 2012 +0900
@@ -42,8 +42,6 @@ hw_irq_controller no_irq_type = {
.ack = irq_ack_none,
};

-//struct irq_desc *irq_desc;
-
irq_desc_t irq_desc[NR_IRQS] = {
[0 ... NR_IRQS - 1] = {
.status = IRQ_DISABLED,
@@ -60,6 +58,38 @@ struct irq_cfg irq_cfg[NR_IRQS] = {
};


+static inline void set_pirq_eoi(struct domain *d, unsigned int irq)
+{
+ if (d->arch.pirq_eoi_map) {
+ set_bit(irq, d->arch.pirq_eoi_map);
+ }
+}
+
+static inline void clear_pirq_eoi(struct domain *d, unsigned int irq)
+{
+ if (d->arch.pirq_eoi_map) {
+ clear_bit(irq, d->arch.pirq_eoi_map);
+ }
+}
+
+void pirq_guest_eoi(struct pirq *pirq)
+{
+ struct irq_desc *desc;
+ unsigned int flags;
+
+ if (pirq->pirq >= NR_IRQS) {
+ return;
+ }
+
+ desc = irq_to_desc(pirq->pirq);
+ spin_lock_irqsave(&desc->lock, flags);
+
+ desc->handler->end(desc);
+
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+
int pirq_guest_unmask(struct domain *d)
{
NOT_YET();
@@ -67,16 +97,105 @@ int pirq_guest_unmask(struct domain *d)
return 0;
}

+
int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share)
{
- NOT_YET();
+ int rc = 0;
+ struct irq_desc *desc;
+ irq_guest_action_t *action;
+ unsigned long flags;
+ unsigned int irq;

- return 0;
+ irq = pirq->pirq;
+
+ if (irq >= NR_IRQS) {
+ return -EINVAL;
+ }
+
+ desc = irq_to_desc(irq);
+
+ spin_lock_irqsave(&desc->lock, flags);
+
+ if (!(desc->status & IRQ_GUEST)) {
+ if (desc->action != NULL) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ action = xmalloc(irq_guest_action_t);
+ if ((desc->action = (struct irqaction *)action) == NULL ) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ action->shareable = 1;
+ action->nr_guests = 0;
+ action->in_flight = 0;
+
+ desc->status |= IRQ_GUEST;
+
+ desc->handler->enable(desc);
+ } else if (!will_share) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ if ( action->nr_guests == IRQ_MAX_GUESTS ) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ action->guest[action->nr_guests++] = v->domain;
+
+out:
+ spin_unlock_irqrestore(&desc->lock, flags);
+
+ return rc;;
}

void pirq_guest_unbind(struct domain *d, struct pirq *pirq)
{
- NOT_YET();
+ struct irq_desc *desc;
+ unsigned int flags, irq, i;
+ irq_guest_action_t *action;
+
+ irq = pirq->pirq;
+
+ if (irq >= NR_IRQS) {
+ return;
+ }
+
+ desc = irq_to_desc(irq);
+
+ spin_lock_irqsave(&desc->lock, flags);
+
+ action = (irq_guest_action_t *)desc->action;
+
+ if (unlikely(action == NULL)) {
+ while(1);
+ }
+
+ BUG_ON(!(desc->status & IRQ_GUEST));
+
+
+ if ( action->nr_guests == 1 ) {
+ desc->action = NULL;
+ xfree(action);
+
+ desc->status &= ~IRQ_GUEST;
+ } else {
+ i = 0;
+ while ( action->guest[i] && (action->guest[i] != d) )
+ i++;
+ memmove(&action->guest[i], &action->guest[i+1], (action->nr_guests - i - 1) * sizeof(action->guest[0]));
+ action->nr_guests--;
+ }
+
+ desc->status |= IRQ_DISABLED;
+
+ desc->handler->disable(desc);
+
+ spin_unlock_irqrestore(&desc->lock, flags);
}


@@ -85,12 +204,17 @@ void pirq_set_affinity(struct domain *d,
NOT_YET();
}

-
struct pirq *alloc_pirq_struct(struct domain *d)
{
- NOT_YET();
+ struct pirq *pirq;

- return NULL;
+ pirq = xmalloc(struct pirq);
+
+ if (!pirq) {
+ return NULL;
+ }
+
+ return pirq;
}

int setup_irq(unsigned int irq, struct irqaction *new)
@@ -119,6 +243,67 @@ int setup_irq(unsigned int irq, struct i
return 0;
}

+void do_guest_irq(unsigned int irq)
+{
+ int i;
+ struct irq_desc *desc;
+ struct domain *d;
+ struct pirq *pirq;
+
+ irq_guest_action_t *action;
+
+ desc = irq_to_desc(irq);
+
+ action = (irq_guest_action_t *)desc->action;
+ for (i = 0; i < action->nr_guests; i++) {
+ d = action->guest[i];
+ pirq = pirq_info(d, irq);
+ action->in_flight++;
+ send_guest_pirq(d, pirq);
+ }
+
+
+}
+
+asmlinkage void do_ipi(unsigned int ipi, struct cpu_user_regs *regs)
+{
+}
+
+asmlinkage void do_irq(unsigned int irq, struct cpu_user_regs *regs)
+{
+ struct irq_desc *desc;
+ struct irqaction *action;
+
+ if (irq >= NR_IRQS) {
+ printk("Bad IRQ = %d\n", irq);
+ }
+
+ desc = irq_to_desc(irq);
+
+ spin_lock(&desc->lock);
+
+ desc->handler->ack(desc);
+
+
+ if (likely(desc->status & IRQ_GUEST)) {
+ do_guest_irq(irq);
+ spin_unlock(&desc->lock);
+
+ return;
+ }
+
+ action = desc->action;
+
+ BUG_ON(!action);
+
+ spin_unlock(&desc->lock);
+
+ action->handler(irq, action->dev_id, regs);
+
+ desc->handler->end(desc);
+}
+
+
int arch_init_one_irq_desc(struct irq_desc *desc)
{
NOT_YET();