Mailing List Archive

[PATCH v2 05/11] x86/vioapic: switch to use the EOI callback mechanism
Switch the emulated IO-APIC code to use the local APIC EOI callback
mechanism. This allows to remove the last hardcoded callback from
vlapic_handle_EOI. Removing the hardcoded vIO-APIC callback also
allows to getting rid of setting the EOI exit bitmap based on the
triggering mode, as now all users that require an EOI action use the
newly introduced callback mechanism.

Move and rename the vioapic_update_EOI now that it can be made static.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Changes since v1:
- Remove the triggering check in the update_eoi_exit_bitmap call.
- Register the vlapic callbacks when loading the vIO-APIC state.
- Reduce scope of ent.
---
xen/arch/x86/hvm/vioapic.c | 131 ++++++++++++++++++++++++-------------
xen/arch/x86/hvm/vlapic.c | 5 +-
2 files changed, 86 insertions(+), 50 deletions(-)

diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 752fc410db..dce98b1479 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -375,6 +375,50 @@ static const struct hvm_mmio_ops vioapic_mmio_ops = {
.write = vioapic_write
};

+static void eoi_callback(unsigned int vector, void *data)
+{
+ struct domain *d = current->domain;
+ struct hvm_irq *hvm_irq = hvm_domain_irq(d);
+ unsigned int i;
+
+ ASSERT(has_vioapic(d));
+
+ spin_lock(&d->arch.hvm.irq_lock);
+
+ for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
+ {
+ struct hvm_vioapic *vioapic = domain_vioapic(d, i);
+ unsigned int pin;
+
+ for ( pin = 0; pin < vioapic->nr_pins; pin++ )
+ {
+ union vioapic_redir_entry *ent = &vioapic->redirtbl[pin];
+
+ if ( ent->fields.vector != vector )
+ continue;
+
+ ent->fields.remote_irr = 0;
+
+ if ( is_iommu_enabled(d) )
+ {
+ spin_unlock(&d->arch.hvm.irq_lock);
+ hvm_dpci_eoi(vioapic->base_gsi + pin, ent);
+ spin_lock(&d->arch.hvm.irq_lock);
+ }
+
+ if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
+ !ent->fields.mask &&
+ hvm_irq->gsi_assert_count[vioapic->base_gsi + pin] )
+ {
+ ent->fields.remote_irr = 1;
+ vioapic_deliver(vioapic, pin);
+ }
+ }
+ }
+
+ spin_unlock(&d->arch.hvm.irq_lock);
+}
+
static void ioapic_inj_irq(
struct hvm_vioapic *vioapic,
struct vlapic *target,
@@ -388,7 +432,8 @@ static void ioapic_inj_irq(
ASSERT((delivery_mode == dest_Fixed) ||
(delivery_mode == dest_LowestPrio));

- vlapic_set_irq(target, vector, trig_mode);
+ vlapic_set_irq_callback(target, vector, trig_mode,
+ trig_mode ? eoi_callback : NULL, NULL);
}

static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin)
@@ -495,50 +540,6 @@ void vioapic_irq_positive_edge(struct domain *d, unsigned int irq)
}
}

-void vioapic_update_EOI(unsigned int vector)
-{
- struct domain *d = current->domain;
- struct hvm_irq *hvm_irq = hvm_domain_irq(d);
- union vioapic_redir_entry *ent;
- unsigned int i;
-
- ASSERT(has_vioapic(d));
-
- spin_lock(&d->arch.hvm.irq_lock);
-
- for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
- {
- struct hvm_vioapic *vioapic = domain_vioapic(d, i);
- unsigned int pin;
-
- for ( pin = 0; pin < vioapic->nr_pins; pin++ )
- {
- ent = &vioapic->redirtbl[pin];
- if ( ent->fields.vector != vector )
- continue;
-
- ent->fields.remote_irr = 0;
-
- if ( is_iommu_enabled(d) )
- {
- spin_unlock(&d->arch.hvm.irq_lock);
- hvm_dpci_eoi(vioapic->base_gsi + pin, ent);
- spin_lock(&d->arch.hvm.irq_lock);
- }
-
- if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
- !ent->fields.mask &&
- hvm_irq->gsi_assert_count[vioapic->base_gsi + pin] )
- {
- ent->fields.remote_irr = 1;
- vioapic_deliver(vioapic, pin);
- }
- }
- }
-
- spin_unlock(&d->arch.hvm.irq_lock);
-}
-
int vioapic_get_mask(const struct domain *d, unsigned int gsi)
{
unsigned int pin = 0; /* See gsi_vioapic */
@@ -592,6 +593,8 @@ static int ioapic_save(struct vcpu *v, hvm_domain_context_t *h)
static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
{
struct hvm_vioapic *s;
+ unsigned int i;
+ int rc;

if ( !has_vioapic(d) )
return -ENODEV;
@@ -602,7 +605,43 @@ static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
d->arch.hvm.nr_vioapics != 1 )
return -EOPNOTSUPP;

- return hvm_load_entry(IOAPIC, h, &s->domU);
+ rc = hvm_load_entry(IOAPIC, h, &s->domU);
+ if ( rc )
+ return rc;
+
+ for ( i = 0; i < ARRAY_SIZE(s->domU.redirtbl); i++ )
+ {
+ const union vioapic_redir_entry *ent = &s->domU.redirtbl[i];
+ unsigned int vector = ent->fields.vector;
+ unsigned int delivery_mode = ent->fields.delivery_mode;
+ struct vcpu *v;
+
+ /*
+ * Add a callback for each possible vector injected by a redirection
+ * entry.
+ */
+ if ( vector < 16 || !ent->fields.remote_irr ||
+ (delivery_mode != dest_LowestPrio && delivery_mode != dest_Fixed) )
+ continue;
+
+ for_each_vcpu ( d, v )
+ {
+ struct vlapic *vlapic = vcpu_vlapic(v);
+
+ /*
+ * NB: if the vlapic registers where restored before the vio-apic
+ * ones we could test whether the vector is set in the vlapic IRR
+ * or ISR registers before unconditionally setting the callback.
+ * This is harmless as eoi_callback is capable of dealing with
+ * spurious callbacks.
+ */
+ if ( vlapic_match_dest(vlapic, NULL, 0, ent->fields.dest_id,
+ ent->fields.dest_mode) )
+ vlapic_set_callback(vlapic, vector, eoi_callback, NULL);
+ }
+ }
+
+ return 0;
}

HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 8a18b33428..35f12e0909 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -189,7 +189,7 @@ void vlapic_set_irq_callback(struct vlapic *vlapic, uint8_t vec, uint8_t trig,

if ( hvm_funcs.update_eoi_exit_bitmap )
alternative_vcall(hvm_funcs.update_eoi_exit_bitmap, target, vec,
- trig || callback);
+ callback);

if ( hvm_funcs.deliver_posted_intr )
alternative_vcall(hvm_funcs.deliver_posted_intr, target, vec);
@@ -493,9 +493,6 @@ void vlapic_handle_EOI(struct vlapic *vlapic, u8 vector)
unsigned long flags;
unsigned int index = vector - 16;

- if ( vlapic_test_vector(vector, &vlapic->regs->data[APIC_TMR]) )
- vioapic_update_EOI(vector);
-
spin_lock_irqsave(&vlapic->callback_lock, flags);
callback = vlapic->callbacks[index].callback;
vlapic->callbacks[index].callback = NULL;
--
2.28.0
RE: [PATCH v2 05/11] x86/vioapic: switch to use the EOI callback mechanism [ In reply to ]
> -----Original Message-----
> From: Xen-devel <xen-devel-bounces@lists.xenproject.org> On Behalf Of Roger Pau Monne
> Sent: 30 September 2020 11:41
> To: xen-devel@lists.xenproject.org
> Cc: Roger Pau Monne <roger.pau@citrix.com>; Jan Beulich <jbeulich@suse.com>; Andrew Cooper
> <andrew.cooper3@citrix.com>; Wei Liu <wl@xen.org>
> Subject: [PATCH v2 05/11] x86/vioapic: switch to use the EOI callback mechanism
>
> Switch the emulated IO-APIC code to use the local APIC EOI callback
> mechanism. This allows to remove the last hardcoded callback from
> vlapic_handle_EOI. Removing the hardcoded vIO-APIC callback also
> allows to getting rid of setting the EOI exit bitmap based on the
> triggering mode, as now all users that require an EOI action use the
> newly introduced callback mechanism.
>
> Move and rename the vioapic_update_EOI now that it can be made static.
>
> Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
> ---
> Changes since v1:
> - Remove the triggering check in the update_eoi_exit_bitmap call.
> - Register the vlapic callbacks when loading the vIO-APIC state.
> - Reduce scope of ent.
> ---
> xen/arch/x86/hvm/vioapic.c | 131 ++++++++++++++++++++++++-------------
> xen/arch/x86/hvm/vlapic.c | 5 +-
> 2 files changed, 86 insertions(+), 50 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
> index 752fc410db..dce98b1479 100644
> --- a/xen/arch/x86/hvm/vioapic.c
> +++ b/xen/arch/x86/hvm/vioapic.c
> @@ -375,6 +375,50 @@ static const struct hvm_mmio_ops vioapic_mmio_ops = {
> .write = vioapic_write
> };
>
> +static void eoi_callback(unsigned int vector, void *data)
> +{
> + struct domain *d = current->domain;
> + struct hvm_irq *hvm_irq = hvm_domain_irq(d);
> + unsigned int i;
> +
> + ASSERT(has_vioapic(d));
> +
> + spin_lock(&d->arch.hvm.irq_lock);
> +
> + for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
> + {
> + struct hvm_vioapic *vioapic = domain_vioapic(d, i);
> + unsigned int pin;
> +
> + for ( pin = 0; pin < vioapic->nr_pins; pin++ )
> + {
> + union vioapic_redir_entry *ent = &vioapic->redirtbl[pin];
> +
> + if ( ent->fields.vector != vector )
> + continue;
> +
> + ent->fields.remote_irr = 0;
> +
> + if ( is_iommu_enabled(d) )
> + {
> + spin_unlock(&d->arch.hvm.irq_lock);
> + hvm_dpci_eoi(vioapic->base_gsi + pin, ent);
> + spin_lock(&d->arch.hvm.irq_lock);

Is this safe? If so, why lock around the whole loop in the first place?

Paul

> + }
> +
> + if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
> + !ent->fields.mask &&
> + hvm_irq->gsi_assert_count[vioapic->base_gsi + pin] )
> + {
> + ent->fields.remote_irr = 1;
> + vioapic_deliver(vioapic, pin);
> + }
> + }
> + }
> +
> + spin_unlock(&d->arch.hvm.irq_lock);
> +}
> +
> static void ioapic_inj_irq(
> struct hvm_vioapic *vioapic,
> struct vlapic *target,
> @@ -388,7 +432,8 @@ static void ioapic_inj_irq(
> ASSERT((delivery_mode == dest_Fixed) ||
> (delivery_mode == dest_LowestPrio));
>
> - vlapic_set_irq(target, vector, trig_mode);
> + vlapic_set_irq_callback(target, vector, trig_mode,
> + trig_mode ? eoi_callback : NULL, NULL);
> }
>
> static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin)
> @@ -495,50 +540,6 @@ void vioapic_irq_positive_edge(struct domain *d, unsigned int irq)
> }
> }
>
> -void vioapic_update_EOI(unsigned int vector)
> -{
> - struct domain *d = current->domain;
> - struct hvm_irq *hvm_irq = hvm_domain_irq(d);
> - union vioapic_redir_entry *ent;
> - unsigned int i;
> -
> - ASSERT(has_vioapic(d));
> -
> - spin_lock(&d->arch.hvm.irq_lock);
> -
> - for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
> - {
> - struct hvm_vioapic *vioapic = domain_vioapic(d, i);
> - unsigned int pin;
> -
> - for ( pin = 0; pin < vioapic->nr_pins; pin++ )
> - {
> - ent = &vioapic->redirtbl[pin];
> - if ( ent->fields.vector != vector )
> - continue;
> -
> - ent->fields.remote_irr = 0;
> -
> - if ( is_iommu_enabled(d) )
> - {
> - spin_unlock(&d->arch.hvm.irq_lock);
> - hvm_dpci_eoi(vioapic->base_gsi + pin, ent);
> - spin_lock(&d->arch.hvm.irq_lock);
> - }
> -
> - if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
> - !ent->fields.mask &&
> - hvm_irq->gsi_assert_count[vioapic->base_gsi + pin] )
> - {
> - ent->fields.remote_irr = 1;
> - vioapic_deliver(vioapic, pin);
> - }
> - }
> - }
> -
> - spin_unlock(&d->arch.hvm.irq_lock);
> -}
> -
> int vioapic_get_mask(const struct domain *d, unsigned int gsi)
> {
> unsigned int pin = 0; /* See gsi_vioapic */
> @@ -592,6 +593,8 @@ static int ioapic_save(struct vcpu *v, hvm_domain_context_t *h)
> static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
> {
> struct hvm_vioapic *s;
> + unsigned int i;
> + int rc;
>
> if ( !has_vioapic(d) )
> return -ENODEV;
> @@ -602,7 +605,43 @@ static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
> d->arch.hvm.nr_vioapics != 1 )
> return -EOPNOTSUPP;
>
> - return hvm_load_entry(IOAPIC, h, &s->domU);
> + rc = hvm_load_entry(IOAPIC, h, &s->domU);
> + if ( rc )
> + return rc;
> +
> + for ( i = 0; i < ARRAY_SIZE(s->domU.redirtbl); i++ )
> + {
> + const union vioapic_redir_entry *ent = &s->domU.redirtbl[i];
> + unsigned int vector = ent->fields.vector;
> + unsigned int delivery_mode = ent->fields.delivery_mode;
> + struct vcpu *v;
> +
> + /*
> + * Add a callback for each possible vector injected by a redirection
> + * entry.
> + */
> + if ( vector < 16 || !ent->fields.remote_irr ||
> + (delivery_mode != dest_LowestPrio && delivery_mode != dest_Fixed) )
> + continue;
> +
> + for_each_vcpu ( d, v )
> + {
> + struct vlapic *vlapic = vcpu_vlapic(v);
> +
> + /*
> + * NB: if the vlapic registers where restored before the vio-apic
> + * ones we could test whether the vector is set in the vlapic IRR
> + * or ISR registers before unconditionally setting the callback.
> + * This is harmless as eoi_callback is capable of dealing with
> + * spurious callbacks.
> + */
> + if ( vlapic_match_dest(vlapic, NULL, 0, ent->fields.dest_id,
> + ent->fields.dest_mode) )
> + vlapic_set_callback(vlapic, vector, eoi_callback, NULL);
> + }
> + }
> +
> + return 0;
> }
>
> HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
> diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
> index 8a18b33428..35f12e0909 100644
> --- a/xen/arch/x86/hvm/vlapic.c
> +++ b/xen/arch/x86/hvm/vlapic.c
> @@ -189,7 +189,7 @@ void vlapic_set_irq_callback(struct vlapic *vlapic, uint8_t vec, uint8_t trig,
>
> if ( hvm_funcs.update_eoi_exit_bitmap )
> alternative_vcall(hvm_funcs.update_eoi_exit_bitmap, target, vec,
> - trig || callback);
> + callback);
>
> if ( hvm_funcs.deliver_posted_intr )
> alternative_vcall(hvm_funcs.deliver_posted_intr, target, vec);
> @@ -493,9 +493,6 @@ void vlapic_handle_EOI(struct vlapic *vlapic, u8 vector)
> unsigned long flags;
> unsigned int index = vector - 16;
>
> - if ( vlapic_test_vector(vector, &vlapic->regs->data[APIC_TMR]) )
> - vioapic_update_EOI(vector);
> -
> spin_lock_irqsave(&vlapic->callback_lock, flags);
> callback = vlapic->callbacks[index].callback;
> vlapic->callbacks[index].callback = NULL;
> --
> 2.28.0
>
Re: [PATCH v2 05/11] x86/vioapic: switch to use the EOI callback mechanism [ In reply to ]
On Wed, Sep 30, 2020 at 01:09:21PM +0100, Paul Durrant wrote:
> > -----Original Message-----
> > From: Xen-devel <xen-devel-bounces@lists.xenproject.org> On Behalf Of Roger Pau Monne
> > Sent: 30 September 2020 11:41
> > To: xen-devel@lists.xenproject.org
> > Cc: Roger Pau Monne <roger.pau@citrix.com>; Jan Beulich <jbeulich@suse.com>; Andrew Cooper
> > <andrew.cooper3@citrix.com>; Wei Liu <wl@xen.org>
> > Subject: [PATCH v2 05/11] x86/vioapic: switch to use the EOI callback mechanism
> >
> > Switch the emulated IO-APIC code to use the local APIC EOI callback
> > mechanism. This allows to remove the last hardcoded callback from
> > vlapic_handle_EOI. Removing the hardcoded vIO-APIC callback also
> > allows to getting rid of setting the EOI exit bitmap based on the
> > triggering mode, as now all users that require an EOI action use the
> > newly introduced callback mechanism.
> >
> > Move and rename the vioapic_update_EOI now that it can be made static.
> >
> > Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
> > ---
> > Changes since v1:
> > - Remove the triggering check in the update_eoi_exit_bitmap call.
> > - Register the vlapic callbacks when loading the vIO-APIC state.
> > - Reduce scope of ent.
> > ---
> > xen/arch/x86/hvm/vioapic.c | 131 ++++++++++++++++++++++++-------------
> > xen/arch/x86/hvm/vlapic.c | 5 +-
> > 2 files changed, 86 insertions(+), 50 deletions(-)
> >
> > diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
> > index 752fc410db..dce98b1479 100644
> > --- a/xen/arch/x86/hvm/vioapic.c
> > +++ b/xen/arch/x86/hvm/vioapic.c
> > @@ -375,6 +375,50 @@ static const struct hvm_mmio_ops vioapic_mmio_ops = {
> > .write = vioapic_write
> > };
> >
> > +static void eoi_callback(unsigned int vector, void *data)
> > +{
> > + struct domain *d = current->domain;
> > + struct hvm_irq *hvm_irq = hvm_domain_irq(d);
> > + unsigned int i;
> > +
> > + ASSERT(has_vioapic(d));
> > +
> > + spin_lock(&d->arch.hvm.irq_lock);
> > +
> > + for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
> > + {
> > + struct hvm_vioapic *vioapic = domain_vioapic(d, i);
> > + unsigned int pin;
> > +
> > + for ( pin = 0; pin < vioapic->nr_pins; pin++ )
> > + {
> > + union vioapic_redir_entry *ent = &vioapic->redirtbl[pin];
> > +
> > + if ( ent->fields.vector != vector )
> > + continue;
> > +
> > + ent->fields.remote_irr = 0;
> > +
> > + if ( is_iommu_enabled(d) )
> > + {
> > + spin_unlock(&d->arch.hvm.irq_lock);
> > + hvm_dpci_eoi(vioapic->base_gsi + pin, ent);
> > + spin_lock(&d->arch.hvm.irq_lock);
>
> Is this safe? If so, why lock around the whole loop in the first place?

It's my understanding that locking around the whole loop is mostly
done for convenience reasons, as vioapic entries cannot go away after
initialization.

The lock here is taken to assure consistency of the contents of
vioapic_redir_entry entry, so that other concurrent read/writes don't
change the entry while being processed here - but the entry can never
disappear under our feet.

Jan expressed similar concerns on the previous version, but I'm afraid
I didn't look much at whether hvm_dpci_eoi could be called with the
lock taken, or whether we could move the call of the EOI hooks out of
the locked region. I was mostly leaving this part for later, since
the series is already fairly long and this didn't seem critical.

Thanks, Roger.
Re: [PATCH v2 05/11] x86/vioapic: switch to use the EOI callback mechanism [ In reply to ]
On 30.09.2020 12:41, Roger Pau Monne wrote:
> --- a/xen/arch/x86/hvm/vlapic.c
> +++ b/xen/arch/x86/hvm/vlapic.c
> @@ -189,7 +189,7 @@ void vlapic_set_irq_callback(struct vlapic *vlapic, uint8_t vec, uint8_t trig,
>
> if ( hvm_funcs.update_eoi_exit_bitmap )
> alternative_vcall(hvm_funcs.update_eoi_exit_bitmap, target, vec,
> - trig || callback);
> + callback);

There's a shortcoming in the alternative call framework which I
see no way to eliminate but which makes it necessary to use
!!callback here. Otherwise, if the callback happens to sit on a
256-byte boundary (low address byte zero), you'll pass false
when you mean true. (The original use, i.e. prior to patch 3,
of just "trig" was sufficiently okay, because the parameter
- despite being u8 - is effectively used as a boolean by the
callers iirc.)

Or perhaps the best thing is to require wrappers for all hooks
taking bool parameters, because then the necessary conversion
will be done when calling the wrapper.

Jan