Mailing List Archive

[RFC PATCH v1 2/2] xen/arm64: entry: Add missing code symbol annotations
From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>

Add missing code symbol annotations.

Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
---
xen/arch/arm/arm64/entry.S | 60 ++++++++++++++++++++++++++------------
1 file changed, 42 insertions(+), 18 deletions(-)

diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
index 6188dd2416..af9a592cae 100644
--- a/xen/arch/arm/arm64/entry.S
+++ b/xen/arch/arm/arm64/entry.S
@@ -289,21 +289,25 @@
b do_bad_mode
.endm

-hyp_sync_invalid:
+FUNC_LOCAL(hyp_sync_invalid)
entry hyp=1
invalid BAD_SYNC
+END(hyp_sync_invalid)

-hyp_irq_invalid:
+FUNC_LOCAL(hyp_irq_invalid)
entry hyp=1
invalid BAD_IRQ
+END(hyp_irq_invalid)

-hyp_fiq_invalid:
+FUNC_LOCAL(hyp_fiq_invalid)
entry hyp=1
invalid BAD_FIQ
+END(hyp_fiq_invalid)

-hyp_error_invalid:
+FUNC_LOCAL(hyp_error_invalid)
entry hyp=1
invalid BAD_ERROR
+END(hyp_error_invalid)

/*
* SError received while running in the hypervisor mode.
@@ -313,11 +317,12 @@ hyp_error_invalid:
* simplicity, as SError should be rare and potentially fatal,
* all interrupts are kept masked.
*/
-hyp_error:
+FUNC_LOCAL(hyp_error)
entry hyp=1
mov x0, sp
bl do_trap_hyp_serror
exit hyp=1
+END(hyp_error)

/*
* Synchronous exception received while running in the hypervisor mode.
@@ -327,7 +332,7 @@ hyp_error:
* some of them. So we want to inherit the state from the interrupted
* context.
*/
-hyp_sync:
+FUNC_LOCAL(hyp_sync)
entry hyp=1

/* Inherit interrupts */
@@ -338,6 +343,7 @@ hyp_sync:
mov x0, sp
bl do_trap_hyp_sync
exit hyp=1
+END(hyp_sync)

/*
* IRQ received while running in the hypervisor mode.
@@ -352,7 +358,7 @@ hyp_sync:
* would require some rework in some paths (e.g. panic, livepatch) to
* ensure the ordering is enforced everywhere.
*/
-hyp_irq:
+FUNC_LOCAL(hyp_irq)
entry hyp=1

/* Inherit D, A, F interrupts and keep I masked */
@@ -365,8 +371,9 @@ hyp_irq:
mov x0, sp
bl do_trap_irq
exit hyp=1
+END(hyp_irq)

-guest_sync:
+FUNC_LOCAL(guest_sync)
/*
* Save x0, x1 in advance
*/
@@ -413,8 +420,9 @@ fastpath_out_workaround:
mov x1, xzr
eret
sb
+END(guest_sync)

-wa2_ssbd:
+FUNC_LOCAL(wa2_ssbd)
#ifdef CONFIG_ARM_SSBD
alternative_cb arm_enable_wa2_handling
b wa2_end
@@ -450,42 +458,55 @@ wa2_end:
mov x0, xzr
eret
sb
-guest_sync_slowpath:
+END(wa2_ssbd)
+
+FUNC_LOCAL(guest_sync_slowpath)
/*
* x0/x1 may have been scratch by the fast path above, so avoid
* to save them.
*/
guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_sync, save_x0_x1=0
+END(guest_sync_slowpath)

-guest_irq:
+FUNC_LOCAL(guest_irq)
guest_vector compat=0, iflags=IFLAGS__A__, trap=irq
+END(guest_irq)

-guest_fiq_invalid:
+FUNC_LOCAL(guest_fiq_invalid)
entry hyp=0, compat=0
invalid BAD_FIQ
+END(guest_fiq_invalid)

-guest_error:
+FUNC_LOCAL(guest_error)
guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_serror
+END(guest_error)

-guest_sync_compat:
+FUNC_LOCAL(guest_sync_compat)
guest_vector compat=1, iflags=IFLAGS__AI_, trap=guest_sync
+END(guest_sync_compat)

-guest_irq_compat:
+FUNC_LOCAL(guest_irq_compat)
guest_vector compat=1, iflags=IFLAGS__A__, trap=irq
+END(guest_irq_compat)

-guest_fiq_invalid_compat:
+FUNC_LOCAL(guest_fiq_invalid_compat)
entry hyp=0, compat=1
invalid BAD_FIQ
+END(guest_fiq_invalid_compat)

-guest_error_compat:
+FUNC_LOCAL(guest_error_compat)
guest_vector compat=1, iflags=IFLAGS__AI_, trap=guest_serror
+END(guest_error_compat)

FUNC(return_to_new_vcpu32)
exit hyp=0, compat=1
+END(return_to_new_vcpu32)
+
FUNC(return_to_new_vcpu64)
exit hyp=0, compat=0
+END(return_to_new_vcpu64)

-return_from_trap:
+FUNC_LOCAL(return_from_trap)
msr daifset, #IFLAGS___I_ /* Mask interrupts */

ldr x21, [sp, #UREGS_PC] /* load ELR */
@@ -524,6 +545,7 @@ return_from_trap:

eret
sb
+END(return_from_trap)

/*
* Consume pending SError generated by the guest if any.
@@ -617,6 +639,7 @@ FUNC(hyp_traps_vector)
ventry guest_irq_compat /* IRQ 32-bit EL0/EL1 */
ventry guest_fiq_invalid_compat /* FIQ 32-bit EL0/EL1 */
ventry guest_error_compat /* Error 32-bit EL0/EL1 */
+END(hyp_traps_vector)

/*
* struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next)
@@ -647,6 +670,7 @@ FUNC(__context_switch)
ldr lr, [x8]
mov sp, x9
ret
+END(__context_switch)

/*
* Local variables:
--
2.40.1