Mailing List Archive

[PATCH] arm: support fewer LRs register than virtual irqs
If the vgic needs to inject a virtual irq into the guest, but no free
LR registers are available, add the irq to a list and return.
Whenever an LR register becomes available we add the queued irq to it
and remove it from the list.
We use the gic lock to protect the list and the bitmask.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
xen/arch/arm/gic.c | 46 +++++++++++++++++++++++++++++++++++++----
xen/include/asm-arm/domain.h | 1 +
2 files changed, 42 insertions(+), 5 deletions(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index adc10bb..97c223c 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -25,6 +25,7 @@
#include <xen/sched.h>
#include <xen/errno.h>
#include <xen/softirq.h>
+#include <xen/list.h>
#include <asm/p2m.h>
#include <asm/domain.h>

@@ -45,6 +46,8 @@ static struct {
unsigned int lines;
unsigned int cpus;
spinlock_t lock;
+ uint64_t lr_mask;
+ struct list_head lr_pending;
} gic;

irq_desc_t irq_desc[NR_IRQS];
@@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)

GICH[GICH_HCR] = GICH_HCR_EN;
GICH[GICH_MISR] = GICH_MISR_EOI;
+ gic.lr_mask = 0ULL;
+ INIT_LIST_HEAD(&gic.lr_pending);
}

/* Set up the GIC */
@@ -345,16 +350,35 @@ int __init setup_irq(unsigned int irq, struct irqaction *new)
return rc;
}

-void gic_set_guest_irq(unsigned int virtual_irq,
+static inline void gic_set_lr(int lr, unsigned int virtual_irq,
unsigned int state, unsigned int priority)
{
- BUG_ON(virtual_irq > nr_lrs);
- GICH[GICH_LR + virtual_irq] = state |
+ BUG_ON(lr > nr_lrs);
+ GICH[GICH_LR + lr] = state |
GICH_LR_MAINTENANCE_IRQ |
((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
}

+void gic_set_guest_irq(unsigned int virtual_irq,
+ unsigned int state, unsigned int priority)
+{
+ int i;
+
+ spin_lock(&gic.lock);
+ for (i = 0; i < nr_lrs; i++) {
+ if (!test_and_set_bit(i, &gic.lr_mask))
+ {
+ gic_set_lr(i, virtual_irq, state, priority);
+ spin_unlock(&gic.lock);
+ return;
+ }
+ }
+ list_add_tail(&irq_to_pending(current, virtual_irq)->lr_link, &gic.lr_pending);
+ spin_unlock(&gic.lock);
+ return;
+}
+
void gic_inject_irq_start(void)
{
uint32_t hcr;
@@ -435,13 +459,26 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
uint32_t lr;
uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);

- for ( i = 0; i < 64; i++ ) {
+ for ( i = 0; i < nr_lrs; i++ ) {
if ( eisr & ((uint64_t)1 << i) ) {
struct pending_irq *p;

+ spin_lock(&gic.lock);
lr = GICH[GICH_LR + i];
virq = lr & GICH_LR_VIRTUAL_MASK;
GICH[GICH_LR + i] = 0;
+ clear_bit(i, &gic.lr_mask);
+
+ if ( !list_empty(gic.lr_pending.next) ) {
+ p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
+ gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
+ list_del(&p->lr_link);
+ INIT_LIST_HEAD(&p->lr_link);
+ set_bit(i, &gic.lr_mask);
+ } else {
+ gic_inject_irq_stop();
+ }
+ spin_unlock(&gic.lock);

spin_lock(&current->arch.vgic.lock);
p = irq_to_pending(current, virq);
@@ -449,7 +486,6 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
p->desc->status &= ~IRQ_INPROGRESS;
GICC[GICC_DIR] = virq;
}
- gic_inject_irq_stop();
list_del(&p->link);
INIT_LIST_HEAD(&p->link);
cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 3372d14..75095ff 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -21,6 +21,7 @@ struct pending_irq
struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
uint8_t priority;
struct list_head link;
+ struct list_head lr_link;
};

struct arch_domain
--
1.7.8.3


_______________________________________________
Xen-arm mailing list
Xen-arm@lists.xensource.com
http://lists.xensource.com/mailman/listinfo/xen-arm
Re: [PATCH] arm: support fewer LRs register than virtual irqs [ In reply to ]
On Tue, 2012-02-14 at 13:07 +0000, Stefano Stabellini wrote:
> If the vgic needs to inject a virtual irq into the guest, but no free
> LR registers are available, add the irq to a list and return.
> Whenever an LR register becomes available we add the queued irq to it
> and remove it from the list.
> We use the gic lock to protect the list and the bitmask.

There's no need to order the IRQs by priority and ensure that the
highest priorities are in the LRs?

Ian.

>
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> ---
> xen/arch/arm/gic.c | 46 +++++++++++++++++++++++++++++++++++++----
> xen/include/asm-arm/domain.h | 1 +
> 2 files changed, 42 insertions(+), 5 deletions(-)
>
> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> index adc10bb..97c223c 100644
> --- a/xen/arch/arm/gic.c
> +++ b/xen/arch/arm/gic.c
> @@ -25,6 +25,7 @@
> #include <xen/sched.h>
> #include <xen/errno.h>
> #include <xen/softirq.h>
> +#include <xen/list.h>
> #include <asm/p2m.h>
> #include <asm/domain.h>
>
> @@ -45,6 +46,8 @@ static struct {
> unsigned int lines;
> unsigned int cpus;
> spinlock_t lock;
> + uint64_t lr_mask;
> + struct list_head lr_pending;
> } gic;
>
> irq_desc_t irq_desc[NR_IRQS];
> @@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)
>
> GICH[GICH_HCR] = GICH_HCR_EN;
> GICH[GICH_MISR] = GICH_MISR_EOI;
> + gic.lr_mask = 0ULL;
> + INIT_LIST_HEAD(&gic.lr_pending);
> }
>
> /* Set up the GIC */
> @@ -345,16 +350,35 @@ int __init setup_irq(unsigned int irq, struct irqaction *new)
> return rc;
> }
>
> -void gic_set_guest_irq(unsigned int virtual_irq,
> +static inline void gic_set_lr(int lr, unsigned int virtual_irq,
> unsigned int state, unsigned int priority)
> {
> - BUG_ON(virtual_irq > nr_lrs);
> - GICH[GICH_LR + virtual_irq] = state |
> + BUG_ON(lr > nr_lrs);
> + GICH[GICH_LR + lr] = state |
> GICH_LR_MAINTENANCE_IRQ |
> ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
> ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
> }
>
> +void gic_set_guest_irq(unsigned int virtual_irq,
> + unsigned int state, unsigned int priority)
> +{
> + int i;
> +
> + spin_lock(&gic.lock);
> + for (i = 0; i < nr_lrs; i++) {
> + if (!test_and_set_bit(i, &gic.lr_mask))
> + {
> + gic_set_lr(i, virtual_irq, state, priority);
> + spin_unlock(&gic.lock);
> + return;
> + }
> + }
> + list_add_tail(&irq_to_pending(current, virtual_irq)->lr_link, &gic.lr_pending);
> + spin_unlock(&gic.lock);
> + return;
> +}
> +
> void gic_inject_irq_start(void)
> {
> uint32_t hcr;
> @@ -435,13 +459,26 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
> uint32_t lr;
> uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
>
> - for ( i = 0; i < 64; i++ ) {
> + for ( i = 0; i < nr_lrs; i++ ) {
> if ( eisr & ((uint64_t)1 << i) ) {
> struct pending_irq *p;
>
> + spin_lock(&gic.lock);
> lr = GICH[GICH_LR + i];
> virq = lr & GICH_LR_VIRTUAL_MASK;
> GICH[GICH_LR + i] = 0;
> + clear_bit(i, &gic.lr_mask);
> +
> + if ( !list_empty(gic.lr_pending.next) ) {
> + p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
> + gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
> + list_del(&p->lr_link);
> + INIT_LIST_HEAD(&p->lr_link);
> + set_bit(i, &gic.lr_mask);
> + } else {
> + gic_inject_irq_stop();
> + }
> + spin_unlock(&gic.lock);
>
> spin_lock(&current->arch.vgic.lock);
> p = irq_to_pending(current, virq);
> @@ -449,7 +486,6 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
> p->desc->status &= ~IRQ_INPROGRESS;
> GICC[GICC_DIR] = virq;
> }
> - gic_inject_irq_stop();
> list_del(&p->link);
> INIT_LIST_HEAD(&p->link);
> cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 3372d14..75095ff 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -21,6 +21,7 @@ struct pending_irq
> struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
> uint8_t priority;
> struct list_head link;
> + struct list_head lr_link;
> };
>
> struct arch_domain



_______________________________________________
Xen-arm mailing list
Xen-arm@lists.xensource.com
http://lists.xensource.com/mailman/listinfo/xen-arm
Re: [PATCH] arm: support fewer LRs register than virtual irqs [ In reply to ]
On Tue, 14 Feb 2012, Ian Campbell wrote:
> On Tue, 2012-02-14 at 13:07 +0000, Stefano Stabellini wrote:
> > If the vgic needs to inject a virtual irq into the guest, but no free
> > LR registers are available, add the irq to a list and return.
> > Whenever an LR register becomes available we add the queued irq to it
> > and remove it from the list.
> > We use the gic lock to protect the list and the bitmask.
>
> There's no need to order the IRQs by priority and ensure that the
> highest priorities are in the LRs?

You are right, they need to be ordered by priority.

-->8--

>From 027ddc0a08c5608797b03e66b87178cd2522ad07 Mon Sep 17 00:00:00 2001
From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Date: Tue, 14 Feb 2012 13:23:56 +0000
Subject: [PATCH] arm: support fewer LR registers than virtual irqs

If the vgic needs to inject a virtual irq into the guest, but no free
LR registers are available, add the irq to a list and return.
Whenever an LR register becomes available we add the queued irq to it
and remove it from the list.
We use the gic lock to protect the list and the bitmask.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
xen/arch/arm/gic.c | 58 ++++++++++++++++++++++++++++++++++++++---
xen/include/asm-arm/domain.h | 1 +
2 files changed, 54 insertions(+), 5 deletions(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index adc10bb..129b7ff 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -25,6 +25,7 @@
#include <xen/sched.h>
#include <xen/errno.h>
#include <xen/softirq.h>
+#include <xen/list.h>
#include <asm/p2m.h>
#include <asm/domain.h>

@@ -45,6 +46,8 @@ static struct {
unsigned int lines;
unsigned int cpus;
spinlock_t lock;
+ uint64_t lr_mask;
+ struct list_head lr_pending;
} gic;

irq_desc_t irq_desc[NR_IRQS];
@@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)

GICH[GICH_HCR] = GICH_HCR_EN;
GICH[GICH_MISR] = GICH_MISR_EOI;
+ gic.lr_mask = 0ULL;
+ INIT_LIST_HEAD(&gic.lr_pending);
}

/* Set up the GIC */
@@ -345,16 +350,47 @@ int __init setup_irq(unsigned int irq, struct irqaction *new)
return rc;
}

-void gic_set_guest_irq(unsigned int virtual_irq,
+static inline void gic_set_lr(int lr, unsigned int virtual_irq,
unsigned int state, unsigned int priority)
{
- BUG_ON(virtual_irq > nr_lrs);
- GICH[GICH_LR + virtual_irq] = state |
+ BUG_ON(lr > nr_lrs);
+ GICH[GICH_LR + lr] = state |
GICH_LR_MAINTENANCE_IRQ |
((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
}

+void gic_set_guest_irq(unsigned int virtual_irq,
+ unsigned int state, unsigned int priority)
+{
+ int i;
+ struct pending_irq *iter, *n;
+
+ spin_lock(&gic.lock);
+ for (i = 0; i < nr_lrs; i++) {
+ if (!test_and_set_bit(i, &gic.lr_mask))
+ {
+ gic_set_lr(i, virtual_irq, state, priority);
+ spin_unlock(&gic.lock);
+ return;
+ }
+ }
+
+ n = irq_to_pending(current, virtual_irq);
+ list_for_each_entry ( iter, &gic.lr_pending, lr_link )
+ {
+ if ( iter->priority < priority )
+ {
+ list_add_tail(&n->lr_link, &iter->lr_link);
+ spin_unlock(&gic.lock);
+ return;
+ }
+ }
+ list_add(&n->lr_link, &gic.lr_pending);
+ spin_unlock(&gic.lock);
+ return;
+}
+
void gic_inject_irq_start(void)
{
uint32_t hcr;
@@ -435,13 +471,26 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
uint32_t lr;
uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);

- for ( i = 0; i < 64; i++ ) {
+ for ( i = 0; i < nr_lrs; i++ ) {
if ( eisr & ((uint64_t)1 << i) ) {
struct pending_irq *p;

+ spin_lock(&gic.lock);
lr = GICH[GICH_LR + i];
virq = lr & GICH_LR_VIRTUAL_MASK;
GICH[GICH_LR + i] = 0;
+ clear_bit(i, &gic.lr_mask);
+
+ if ( !list_empty(gic.lr_pending.next) ) {
+ p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
+ gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
+ list_del(&p->lr_link);
+ INIT_LIST_HEAD(&p->lr_link);
+ set_bit(i, &gic.lr_mask);
+ } else {
+ gic_inject_irq_stop();
+ }
+ spin_unlock(&gic.lock);

spin_lock(&current->arch.vgic.lock);
p = irq_to_pending(current, virq);
@@ -449,7 +498,6 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
p->desc->status &= ~IRQ_INPROGRESS;
GICC[GICC_DIR] = virq;
}
- gic_inject_irq_stop();
list_del(&p->link);
INIT_LIST_HEAD(&p->link);
cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 3372d14..75095ff 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -21,6 +21,7 @@ struct pending_irq
struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
uint8_t priority;
struct list_head link;
+ struct list_head lr_link;
};

struct arch_domain
--
1.7.8.3


_______________________________________________
Xen-arm mailing list
Xen-arm@lists.xensource.com
http://lists.xensource.com/mailman/listinfo/xen-arm
Re: [PATCH] arm: support fewer LRs register than virtual irqs [ In reply to ]
On Tue, Feb 14, 2012 at 1:07 PM, Stefano Stabellini
<stefano.stabellini@eu.citrix.com> wrote:
> If the vgic needs to inject a virtual irq into the guest, but no free
> LR registers are available, add the irq to a list and return.
> Whenever an LR register becomes available we add the queued irq to it
> and remove it from the list.
> We use the gic lock to protect the list and the bitmask.
>
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> ---
>  xen/arch/arm/gic.c           |   46 +++++++++++++++++++++++++++++++++++++----
>  xen/include/asm-arm/domain.h |    1 +
>  2 files changed, 42 insertions(+), 5 deletions(-)
>
> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> index adc10bb..97c223c 100644
> --- a/xen/arch/arm/gic.c
> +++ b/xen/arch/arm/gic.c
> @@ -25,6 +25,7 @@
>  #include <xen/sched.h>
>  #include <xen/errno.h>
>  #include <xen/softirq.h>
> +#include <xen/list.h>
>  #include <asm/p2m.h>
>  #include <asm/domain.h>
>
> @@ -45,6 +46,8 @@ static struct {
>     unsigned int lines;
>     unsigned int cpus;
>     spinlock_t lock;
> +    uint64_t lr_mask;
> +    struct list_head lr_pending;
>  } gic;
>
>  irq_desc_t irq_desc[NR_IRQS];
> @@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)
>
>     GICH[GICH_HCR] = GICH_HCR_EN;
>     GICH[GICH_MISR] = GICH_MISR_EOI;
> +    gic.lr_mask = 0ULL;
> +    INIT_LIST_HEAD(&gic.lr_pending);
>  }
>
>  /* Set up the GIC */
> @@ -345,16 +350,35 @@ int __init setup_irq(unsigned int irq, struct irqaction *new)
>     return rc;
>  }
>
> -void gic_set_guest_irq(unsigned int virtual_irq,
> +static inline void gic_set_lr(int lr, unsigned int virtual_irq,
>         unsigned int state, unsigned int priority)
>  {
> -    BUG_ON(virtual_irq > nr_lrs);
> -    GICH[GICH_LR + virtual_irq] = state |
> +    BUG_ON(lr > nr_lrs);
> +    GICH[GICH_LR + lr] = state |
>         GICH_LR_MAINTENANCE_IRQ |
>         ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
>         ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
>  }
>
> +void gic_set_guest_irq(unsigned int virtual_irq,
> +        unsigned int state, unsigned int priority)
> +{
> +    int i;
> +
> +    spin_lock(&gic.lock);
> +    for (i = 0; i < nr_lrs; i++) {
> +        if (!test_and_set_bit(i, &gic.lr_mask))
> +        {
> +            gic_set_lr(i, virtual_irq, state, priority);
> +            spin_unlock(&gic.lock);
> +            return;
> +        }
> +    }
> +    list_add_tail(&irq_to_pending(current, virtual_irq)->lr_link, &gic.lr_pending);
> +    spin_unlock(&gic.lock);
> +    return;
> +}
> +
>  void gic_inject_irq_start(void)
>  {
>     uint32_t hcr;
> @@ -435,13 +459,26 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
>     uint32_t lr;
>     uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
>
> -    for ( i = 0; i < 64; i++ ) {
> +    for ( i = 0; i < nr_lrs; i++ ) {
>         if ( eisr & ((uint64_t)1 << i) ) {
>             struct pending_irq *p;
>
> +            spin_lock(&gic.lock);
>             lr = GICH[GICH_LR + i];
>             virq = lr & GICH_LR_VIRTUAL_MASK;
>             GICH[GICH_LR + i] = 0;
> +            clear_bit(i, &gic.lr_mask);
> +
> +            if ( !list_empty(gic.lr_pending.next) ) {
> +                p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
> +                gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
> +                list_del(&p->lr_link);
> +                INIT_LIST_HEAD(&p->lr_link);
> +                set_bit(i, &gic.lr_mask);
> +            } else {
> +                gic_inject_irq_stop();
> +            }
> +            spin_unlock(&gic.lock);
>
>             spin_lock(&current->arch.vgic.lock);
>             p = irq_to_pending(current, virq);
> @@ -449,7 +486,6 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
>                 p->desc->status &= ~IRQ_INPROGRESS;
>                 GICC[GICC_DIR] = virq;
>             }
> -            gic_inject_irq_stop();
>             list_del(&p->link);
>             INIT_LIST_HEAD(&p->link);
>             cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 3372d14..75095ff 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -21,6 +21,7 @@ struct pending_irq
>     struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
>     uint8_t priority;
>     struct list_head link;
> +    struct list_head lr_link;
>  };
>
>  struct arch_domain
> --
> 1.7.8.3
>

The patch fixes the BUG() call from gic_set_guest_irq() but now I'm
getting another guest data abort error.
I used the arm-tools-2 branch -> the GIC code looks slightly
different? Which branch should I be using?

Below my boot log with the error.

__ __ _ _ ____ _ _ _
\ \/ /___ _ __ | || | |___ \ _ _ _ __ ___| |_ __ _| |__ | | ___
\ // _ \ '_ \ | || |_ __) |__| | | | '_ \/ __| __/ _` | '_ \| |/ _ \
/ \ __/ | | | |__ _| / __/|__| |_| | | | \__ \ || (_| | |_) | | __/
/_/\_\___|_| |_| |_|(_)_____| \__,_|_| |_|___/\__\__,_|_.__/|_|\___|


(XEN) Latest ChangeSet: unavailable
(XEN) Using generic timer at 100000000 Hz
(XEN) Domain heap initialised
(XEN) Set hyp vector base to 237ec0 (expected 00237ec0)
(XEN) GIC: 64 lines, 1 cpu, secure (IID 0000043b).
(XEN) GICH: 4 list registers available
(XEN) Using scheduler: SMP Credit Scheduler (credit)
(XEN) Allocated console ring of 16 KiB.
(XEN) Brought up 1 CPUs
(XEN) *** LOADING DOMAIN 0 ***
(XEN) Copying 0x8 bytes from flash 00000000001c8550 to
0029fc48Populate P2M 0x80000000->0x88000000
(XEN) Map CS2 MMIO regions 1:1 in the P2M 0x18000000->0x1bffffff
(XEN) Map CS3 MMIO regions 1:1 in the P2M 0x1c000000->0x1fffffff
(XEN) Map VGIC MMIO regions 1:1 in the P2M 0x2c008000->0x2dffffff
(XEN) mapping GICC at 0x2c002000 to 0x2c006000
(XEN) Routing peripheral interrupts to guest
(XEN) VTTBR dom0 = 10080bdffe000
(XEN) Loading 00000000001c9d5c byte zImage from flash 0000000000000000
to 0000000080008000-00000000801d1d5c: [..]
(XEN) Std. Loglevel: All
(XEN) Guest Loglevel: All
(XEN) *** Serial input -> DOM0 (type 'CTRL-a' three times to switch
input to Xen)
(XEN) Freed 48kB init memory.
(XEN) context switch 32767:0 (idle) -> 0:0
(XEN) VTTBR dom0 = 10080bdffe000
Uncompressing Linux... done, booting the kernel.
(XEN) Guest data abort: Translation fault at level 2
(XEN) gva=c880cfe0 gpa=000000002b0a0fe0
(XEN) size=2 sign=0 write=0 reg=3
(XEN) eat=0 cm=0 s1ptw=0 dfsc=6
(XEN) ----[ Xen-4.2-unstable x86_64 debug=y Not tainted ]----
(XEN) CPU: 0
(XEN) PC: c015c8a8
(XEN) CPSR: 60000013 MODE:SVC
(XEN) R0: c880cfe0 R1: 00000fe0 R2: 00000000 R3: 00001000
(XEN) R4: c783cc00 R5: 00001000 R6: c783ccc8 R7: 00000000
(XEN) R8: c880c000 R9: 00000000 R10:00000000 R11:c0290f7c R12:c0010c80
(XEN) USR: SP: 00000000 LR: 00000000 CPSR:60000013
(XEN) SVC: SP: c781bf18 LR: c015c64c SPSR:00000093
(XEN) ABT: SP: c035cecc LR: c035cecc SPSR:00000000
(XEN) UND: SP: c035ced8 LR: c035ced8 SPSR:00000000
(XEN) IRQ: SP: c035cec0 LR: c0008b20 SPSR:200001d3
(XEN) FIQ: SP: 00000000 LR: 00000000 SPSR:00000000
(XEN) FIQ: R8: 00000000 R9: 00000000 R10:00000000 R11:00000000 R12:00000000
(XEN)
(XEN) TTBR0 80004059 TTBR1 80004059 TTBCR 00000000
(XEN) SCTLR 10c53c7d
(XEN) VTTBR 10080bdffe000
(XEN)
(XEN) HTTBR 80ffe91000
(XEN) HDFAR c880cfe0
(XEN) HIFAR 0
(XEN) HPFAR 2b0a00
(XEN) HCR 00000031
(XEN) HSR 93830006
(XEN)
(XEN) DFSR 0 DFAR 0
(XEN) IFSR 0 IFAR 0
(XEN)
(XEN) GUEST STACK GOES HERE
(XEN)
(XEN) ****************************************
(XEN) Panic on CPU 0:
(XEN) Unhandled guest data abort
(XEN) ****************************************

_______________________________________________
Xen-arm mailing list
Xen-arm@lists.xensource.com
http://lists.xensource.com/mailman/listinfo/xen-arm
Re: [PATCH] arm: support fewer LRs register than virtual irqs [ In reply to ]
On Tue, 14 Feb 2012, Jenny Smith wrote:
> On Tue, Feb 14, 2012 at 1:07 PM, Stefano Stabellini
> <stefano.stabellini@eu.citrix.com> wrote:
> > If the vgic needs to inject a virtual irq into the guest, but no free
> > LR registers are available, add the irq to a list and return.
> > Whenever an LR register becomes available we add the queued irq to it
> > and remove it from the list.
> > We use the gic lock to protect the list and the bitmask.
> >
> > Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> > ---
> >  xen/arch/arm/gic.c           |   46 +++++++++++++++++++++++++++++++++++++----
> >  xen/include/asm-arm/domain.h |    1 +
> >  2 files changed, 42 insertions(+), 5 deletions(-)
> >
> > diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> > index adc10bb..97c223c 100644
> > --- a/xen/arch/arm/gic.c
> > +++ b/xen/arch/arm/gic.c
> > @@ -25,6 +25,7 @@
> >  #include <xen/sched.h>
> >  #include <xen/errno.h>
> >  #include <xen/softirq.h>
> > +#include <xen/list.h>
> >  #include <asm/p2m.h>
> >  #include <asm/domain.h>
> >
> > @@ -45,6 +46,8 @@ static struct {
> >     unsigned int lines;
> >     unsigned int cpus;
> >     spinlock_t lock;
> > +    uint64_t lr_mask;
> > +    struct list_head lr_pending;
> >  } gic;
> >
> >  irq_desc_t irq_desc[NR_IRQS];
> > @@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)
> >
> >     GICH[GICH_HCR] = GICH_HCR_EN;
> >     GICH[GICH_MISR] = GICH_MISR_EOI;
> > +    gic.lr_mask = 0ULL;
> > +    INIT_LIST_HEAD(&gic.lr_pending);
> >  }
> >
> >  /* Set up the GIC */
> > @@ -345,16 +350,35 @@ int __init setup_irq(unsigned int irq, struct irqaction *new)
> >     return rc;
> >  }
> >
> > -void gic_set_guest_irq(unsigned int virtual_irq,
> > +static inline void gic_set_lr(int lr, unsigned int virtual_irq,
> >         unsigned int state, unsigned int priority)
> >  {
> > -    BUG_ON(virtual_irq > nr_lrs);
> > -    GICH[GICH_LR + virtual_irq] = state |
> > +    BUG_ON(lr > nr_lrs);
> > +    GICH[GICH_LR + lr] = state |
> >         GICH_LR_MAINTENANCE_IRQ |
> >         ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
> >         ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
> >  }
> >
> > +void gic_set_guest_irq(unsigned int virtual_irq,
> > +        unsigned int state, unsigned int priority)
> > +{
> > +    int i;
> > +
> > +    spin_lock(&gic.lock);
> > +    for (i = 0; i < nr_lrs; i++) {
> > +        if (!test_and_set_bit(i, &gic.lr_mask))
> > +        {
> > +            gic_set_lr(i, virtual_irq, state, priority);
> > +            spin_unlock(&gic.lock);
> > +            return;
> > +        }
> > +    }
> > +    list_add_tail(&irq_to_pending(current, virtual_irq)->lr_link, &gic.lr_pending);
> > +    spin_unlock(&gic.lock);
> > +    return;
> > +}
> > +
> >  void gic_inject_irq_start(void)
> >  {
> >     uint32_t hcr;
> > @@ -435,13 +459,26 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
> >     uint32_t lr;
> >     uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
> >
> > -    for ( i = 0; i < 64; i++ ) {
> > +    for ( i = 0; i < nr_lrs; i++ ) {
> >         if ( eisr & ((uint64_t)1 << i) ) {
> >             struct pending_irq *p;
> >
> > +            spin_lock(&gic.lock);
> >             lr = GICH[GICH_LR + i];
> >             virq = lr & GICH_LR_VIRTUAL_MASK;
> >             GICH[GICH_LR + i] = 0;
> > +            clear_bit(i, &gic.lr_mask);
> > +
> > +            if ( !list_empty(gic.lr_pending.next) ) {
> > +                p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
> > +                gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
> > +                list_del(&p->lr_link);
> > +                INIT_LIST_HEAD(&p->lr_link);
> > +                set_bit(i, &gic.lr_mask);
> > +            } else {
> > +                gic_inject_irq_stop();
> > +            }
> > +            spin_unlock(&gic.lock);
> >
> >             spin_lock(&current->arch.vgic.lock);
> >             p = irq_to_pending(current, virq);
> > @@ -449,7 +486,6 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
> >                 p->desc->status &= ~IRQ_INPROGRESS;
> >                 GICC[GICC_DIR] = virq;
> >             }
> > -            gic_inject_irq_stop();
> >             list_del(&p->link);
> >             INIT_LIST_HEAD(&p->link);
> >             cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
> > diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> > index 3372d14..75095ff 100644
> > --- a/xen/include/asm-arm/domain.h
> > +++ b/xen/include/asm-arm/domain.h
> > @@ -21,6 +21,7 @@ struct pending_irq
> >     struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
> >     uint8_t priority;
> >     struct list_head link;
> > +    struct list_head lr_link;
> >  };
> >
> >  struct arch_domain
> > --
> > 1.7.8.3
> >
>
> The patch fixes the BUG() call from gic_set_guest_irq() but now I'm
> getting another guest data abort error.
> I used the arm-tools-2 branch -> the GIC code looks slightly
> different? Which branch should I be using?

I would ignore the tools branch for the moment, that is only useful to
upstream tool patches at the moment.

Just switch to xen-unstable that has all the patches that you need to
boot xen and linux on the Cortex-A15 emulator.
Re: [PATCH] arm: support fewer LRs register than virtual irqs [ In reply to ]
On 14/02/12 13:35, Jenny Smith wrote:
>
> Uncompressing Linux... done, booting the kernel.
> (XEN) Guest data abort: Translation fault at level 2
> (XEN) gva=c880cfe0 gpa=000000002b0a0fe0

This guest physical address (gpa) is (according to the A15 device tree)
the pl341 memory controller peripheral @ 0x2b0a0000. This peripheral
isn't implemented on the model and isn't passed through to the guest.

The AEM device tree does not list this peripheral so it looks like
you're not using the correct device tree.

David

_______________________________________________
Xen-arm mailing list
Xen-arm@lists.xensource.com
http://lists.xensource.com/mailman/listinfo/xen-arm
Re: [PATCH] arm: support fewer LRs register than virtual irqs [ In reply to ]
On Tue, Feb 14, 2012 at 3:05 PM, David Vrabel <david.vrabel@citrix.com> wrote:
> On 14/02/12 13:35, Jenny Smith wrote:
>>
>> Uncompressing Linux... done, booting the kernel.
>> (XEN) Guest data abort: Translation fault at level 2
>> (XEN)     gva=c880cfe0 gpa=000000002b0a0fe0
>
> This guest physical address (gpa) is (according to the A15 device tree)
> the pl341 memory controller peripheral @ 0x2b0a0000.  This peripheral
> isn't implemented on the model and isn't passed through to the guest.
>
> The AEM device tree does not list this peripheral so it looks like
> you're not using the correct device tree.

Yep, that fixed it. Booting up now.

Cheers,
Jenny

_______________________________________________
Xen-arm mailing list
Xen-arm@lists.xensource.com
http://lists.xensource.com/mailman/listinfo/xen-arm
Re: [Xen-devel] [PATCH] arm: support fewer LRs register than virtual irqs [ In reply to ]
On 14/02/12 13:34, Stefano Stabellini wrote:
> On Tue, 14 Feb 2012, Ian Campbell wrote:
>> On Tue, 2012-02-14 at 13:07 +0000, Stefano Stabellini wrote:
>>> If the vgic needs to inject a virtual irq into the guest, but no free
>>> LR registers are available, add the irq to a list and return.
>>> Whenever an LR register becomes available we add the queued irq to it
>>> and remove it from the list.
>>> We use the gic lock to protect the list and the bitmask.
>>
>> There's no need to order the IRQs by priority and ensure that the
>> highest priorities are in the LRs?
>
> You are right, they need to be ordered by priority.
>
> -->8--
>
>>From 027ddc0a08c5608797b03e66b87178cd2522ad07 Mon Sep 17 00:00:00 2001
> From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> Date: Tue, 14 Feb 2012 13:23:56 +0000
> Subject: [PATCH] arm: support fewer LR registers than virtual irqs
>
> If the vgic needs to inject a virtual irq into the guest, but no free
> LR registers are available, add the irq to a list and return.
> Whenever an LR register becomes available we add the queued irq to it
> and remove it from the list.
> We use the gic lock to protect the list and the bitmask.
>
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> ---
> xen/arch/arm/gic.c | 58 ++++++++++++++++++++++++++++++++++++++---
> xen/include/asm-arm/domain.h | 1 +
> 2 files changed, 54 insertions(+), 5 deletions(-)
>
> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> index adc10bb..129b7ff 100644
> --- a/xen/arch/arm/gic.c
> +++ b/xen/arch/arm/gic.c
> @@ -25,6 +25,7 @@
> #include <xen/sched.h>
> #include <xen/errno.h>
> #include <xen/softirq.h>
> +#include <xen/list.h>
> #include <asm/p2m.h>
> #include <asm/domain.h>
>
> @@ -45,6 +46,8 @@ static struct {
> unsigned int lines;
> unsigned int cpus;
> spinlock_t lock;
> + uint64_t lr_mask;
> + struct list_head lr_pending;
> } gic;
>
> irq_desc_t irq_desc[NR_IRQS];
> @@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)
>
> GICH[GICH_HCR] = GICH_HCR_EN;
> GICH[GICH_MISR] = GICH_MISR_EOI;
> + gic.lr_mask = 0ULL;
> + INIT_LIST_HEAD(&gic.lr_pending);
> }
>
> /* Set up the GIC */
> @@ -345,16 +350,47 @@ int __init setup_irq(unsigned int irq, struct irqaction *new)
> return rc;
> }
>
> -void gic_set_guest_irq(unsigned int virtual_irq,
> +static inline void gic_set_lr(int lr, unsigned int virtual_irq,
> unsigned int state, unsigned int priority)
> {
> - BUG_ON(virtual_irq > nr_lrs);
> - GICH[GICH_LR + virtual_irq] = state |
> + BUG_ON(lr > nr_lrs);
> + GICH[GICH_LR + lr] = state |
> GICH_LR_MAINTENANCE_IRQ |
> ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
> ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
> }
>
> +void gic_set_guest_irq(unsigned int virtual_irq,
> + unsigned int state, unsigned int priority)
> +{
> + int i;
> + struct pending_irq *iter, *n;
> +
> + spin_lock(&gic.lock);
> + for (i = 0; i < nr_lrs; i++) {
> + if (!test_and_set_bit(i, &gic.lr_mask))
> + {
> + gic_set_lr(i, virtual_irq, state, priority);
> + spin_unlock(&gic.lock);
> + return;
> + }
> + }

You can skip this loop if gic.lr_pending is non-empty as there won't be
any spare bits in gic.lr_mask.

> + n = irq_to_pending(current, virtual_irq);
> + list_for_each_entry ( iter, &gic.lr_pending, lr_link )
> + {
> + if ( iter->priority < priority )
> + {
> + list_add_tail(&n->lr_link, &iter->lr_link);
> + spin_unlock(&gic.lock);
> + return;
> + }
> + }

How many pending irqs are expected? If it's lots then looping through a
simple list like this might be slow. Something to keep in mind -- I
wouldn't try and fix it now.

> + list_add(&n->lr_link, &gic.lr_pending);
> + spin_unlock(&gic.lock);
> + return;
> +}
> +
> void gic_inject_irq_start(void)
> {
> uint32_t hcr;
> @@ -435,13 +471,26 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
> uint32_t lr;
> uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
>
> - for ( i = 0; i < 64; i++ ) {
> + for ( i = 0; i < nr_lrs; i++ ) {
> if ( eisr & ((uint64_t)1 << i) ) {
> struct pending_irq *p;
>
> + spin_lock(&gic.lock);
> lr = GICH[GICH_LR + i];
> virq = lr & GICH_LR_VIRTUAL_MASK;
> GICH[GICH_LR + i] = 0;
> + clear_bit(i, &gic.lr_mask);
> +
> + if ( !list_empty(gic.lr_pending.next) ) {
> + p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
> + gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
> + list_del(&p->lr_link);
> + INIT_LIST_HEAD(&p->lr_link);

I don't think you need the INIT_LIST_HEAD() here (and even if you did
you should use list_del_init()). You only need to init nodes if you
need to test if they are in a list or not.

> + set_bit(i, &gic.lr_mask);
> + } else {
> + gic_inject_irq_stop();
> + }
> + spin_unlock(&gic.lock);
>
> spin_lock(&current->arch.vgic.lock);
> p = irq_to_pending(current, virq);
> @@ -449,7 +498,6 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
> p->desc->status &= ~IRQ_INPROGRESS;
> GICC[GICC_DIR] = virq;
> }
> - gic_inject_irq_stop();
> list_del(&p->link);
> INIT_LIST_HEAD(&p->link);

Similarly, here (but this should be fixed up in a separate patch).

> cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 3372d14..75095ff 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -21,6 +21,7 @@ struct pending_irq
> struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
> uint8_t priority;
> struct list_head link;
> + struct list_head lr_link;
> };
>
> struct arch_domain


_______________________________________________
Xen-arm mailing list
Xen-arm@lists.xensource.com
http://lists.xensource.com/mailman/listinfo/xen-arm
Re: [Xen-devel] [PATCH] arm: support fewer LRs register than virtual irqs [ In reply to ]
On Tue, 14 Feb 2012, David Vrabel wrote:
> > +void gic_set_guest_irq(unsigned int virtual_irq,
> > + unsigned int state, unsigned int priority)
> > +{
> > + int i;
> > + struct pending_irq *iter, *n;
> > +
> > + spin_lock(&gic.lock);
> > + for (i = 0; i < nr_lrs; i++) {
> > + if (!test_and_set_bit(i, &gic.lr_mask))
> > + {
> > + gic_set_lr(i, virtual_irq, state, priority);
> > + spin_unlock(&gic.lock);
> > + return;
> > + }
> > + }
>
> You can skip this loop if gic.lr_pending is non-empty as there won't be
> any spare bits in gic.lr_mask.

Right


> > + n = irq_to_pending(current, virtual_irq);
> > + list_for_each_entry ( iter, &gic.lr_pending, lr_link )
> > + {
> > + if ( iter->priority < priority )
> > + {
> > + list_add_tail(&n->lr_link, &iter->lr_link);
> > + spin_unlock(&gic.lock);
> > + return;
> > + }
> > + }
>
> How many pending irqs are expected? If it's lots then looping through a
> simple list like this might be slow. Something to keep in mind -- I
> wouldn't try and fix it now.

How many interrupts are the guests going to receive while 4 are already
in service? I am not sure yet.


> > + list_add(&n->lr_link, &gic.lr_pending);
> > + spin_unlock(&gic.lock);
> > + return;
> > +}
> > +
> > void gic_inject_irq_start(void)
> > {
> > uint32_t hcr;
> > @@ -435,13 +471,26 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
> > uint32_t lr;
> > uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
> >
> > - for ( i = 0; i < 64; i++ ) {
> > + for ( i = 0; i < nr_lrs; i++ ) {
> > if ( eisr & ((uint64_t)1 << i) ) {
> > struct pending_irq *p;
> >
> > + spin_lock(&gic.lock);
> > lr = GICH[GICH_LR + i];
> > virq = lr & GICH_LR_VIRTUAL_MASK;
> > GICH[GICH_LR + i] = 0;
> > + clear_bit(i, &gic.lr_mask);
> > +
> > + if ( !list_empty(gic.lr_pending.next) ) {
> > + p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
> > + gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
> > + list_del(&p->lr_link);
> > + INIT_LIST_HEAD(&p->lr_link);
>
> I don't think you need the INIT_LIST_HEAD() here (and even if you did
> you should use list_del_init()). You only need to init nodes if you
> need to test if they are in a list or not.

OK


> > + set_bit(i, &gic.lr_mask);
> > + } else {
> > + gic_inject_irq_stop();
> > + }
> > + spin_unlock(&gic.lock);
> >
> > spin_lock(&current->arch.vgic.lock);
> > p = irq_to_pending(current, virq);
> > @@ -449,7 +498,6 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
> > p->desc->status &= ~IRQ_INPROGRESS;
> > GICC[GICC_DIR] = virq;
> > }
> > - gic_inject_irq_stop();
> > list_del(&p->link);
> > INIT_LIST_HEAD(&p->link);
>
> Similarly, here (but this should be fixed up in a separate patch).

OK

_______________________________________________
Xen-arm mailing list
Xen-arm@lists.xensource.com
http://lists.xensource.com/mailman/listinfo/xen-arm