Mailing List Archive

[xen staging-4.18] x86/spec-ctrl: Rename spec_ctrl_flags to scf
commit 57e5cab3decda418f89028de9f5854abd83c3c06
Author: Andrew Cooper <andrew.cooper3@citrix.com>
AuthorDate: Thu Mar 28 11:57:25 2024 +0000
Commit: Andrew Cooper <andrew.cooper3@citrix.com>
CommitDate: Tue Apr 9 16:45:01 2024 +0100

x86/spec-ctrl: Rename spec_ctrl_flags to scf

XSA-455 was ultimately caused by having fields with too-similar names.

Both {xen,last}_spec_ctrl are fields containing an architectural MSR_SPEC_CTRL
value. The spec_ctrl_flags field contains Xen-internal flags.

To more-obviously distinguish the two, rename spec_ctrl_flags to scf, which is
also the prefix of the constants used by the fields.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
(cherry picked from commit c62673c4334b3372ebd4292a7ac8185357e7ea27)
---
xen/arch/x86/acpi/power.c | 4 ++--
xen/arch/x86/domain.c | 8 ++++----
xen/arch/x86/hvm/svm/entry.S | 2 +-
xen/arch/x86/hvm/vmx/entry.S | 2 +-
xen/arch/x86/hvm/vmx/vmcs.c | 2 +-
xen/arch/x86/include/asm/current.h | 2 +-
xen/arch/x86/include/asm/domain.h | 2 +-
xen/arch/x86/include/asm/spec_ctrl.h | 16 ++++++++--------
xen/arch/x86/include/asm/spec_ctrl_asm.h | 22 +++++++++++-----------
xen/arch/x86/setup.c | 2 +-
xen/arch/x86/spec_ctrl.c | 18 +++++++++---------
xen/arch/x86/x86_64/asm-offsets.c | 2 +-
xen/arch/x86/x86_64/compat/entry.S | 4 ++--
xen/arch/x86/x86_64/entry.S | 2 +-
14 files changed, 44 insertions(+), 44 deletions(-)

diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c
index 81233738b1..f6ab522690 100644
--- a/xen/arch/x86/acpi/power.c
+++ b/xen/arch/x86/acpi/power.c
@@ -246,7 +246,7 @@ static int enter_state(u32 state)

ci = get_cpu_info();
/* Avoid NMI/#MC using unsafe MSRs until we've reloaded microcode. */
- ci->spec_ctrl_flags &= ~SCF_IST_MASK;
+ ci->scf &= ~SCF_IST_MASK;

ACPI_FLUSH_CPU_CACHE();

@@ -290,7 +290,7 @@ static int enter_state(u32 state)
panic("Missing previously available feature(s)\n");

/* Re-enabled default NMI/#MC use of MSRs now microcode is loaded. */
- ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_IST_MASK);
+ ci->scf |= (default_scf & SCF_IST_MASK);

if ( boot_cpu_has(X86_FEATURE_IBRSB) || boot_cpu_has(X86_FEATURE_IBRS) )
{
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 3712e36df9..13912f907b 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2109,10 +2109,10 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
}
}

- /* Update the top-of-stack block with the new spec_ctrl settings. */
- info->spec_ctrl_flags =
- (info->spec_ctrl_flags & ~SCF_DOM_MASK) |
- (nextd->arch.spec_ctrl_flags & SCF_DOM_MASK);
+ /* Update the top-of-stack block with the new speculation settings. */
+ info->scf =
+ (info->scf & ~SCF_DOM_MASK) |
+ (nextd->arch.scf & SCF_DOM_MASK);
}

sched_context_switched(prev, next);
diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S
index 5f31d71698..56a5865c29 100644
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -101,7 +101,7 @@ __UNLIKELY_END(nsvm_hap)
/* SPEC_CTRL_ENTRY_FROM_SVM Req: %rsp=regs/cpuinfo, %rdx=0 Clob: acd */

.macro svm_vmexit_cond_ibpb
- testb $SCF_entry_ibpb, CPUINFO_spec_ctrl_flags(%rsp)
+ testb $SCF_entry_ibpb, CPUINFO_scf(%rsp)
jz .L_skip_ibpb

mov $MSR_PRED_CMD, %ecx
diff --git a/xen/arch/x86/hvm/vmx/entry.S b/xen/arch/x86/hvm/vmx/entry.S
index 1bead826ca..744cc5186a 100644
--- a/xen/arch/x86/hvm/vmx/entry.S
+++ b/xen/arch/x86/hvm/vmx/entry.S
@@ -111,7 +111,7 @@ UNLIKELY_END(realmode)
BUILD_BUG_ON(SCF_verw & ~0xff)
movzbl VCPU_vmx_launched(%rbx), %ecx
shl $31, %ecx
- movzbl CPUINFO_spec_ctrl_flags(%rsp), %eax
+ movzbl CPUINFO_scf(%rsp), %eax
and $SCF_verw, %eax
or %eax, %ecx

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 04d32b18fd..4b6ca7c193 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1376,7 +1376,7 @@ static int construct_vmcs(struct vcpu *v)
rc = vmx_add_msr(v, MSR_FLUSH_CMD, FLUSH_CMD_L1D,
VMX_MSR_GUEST_LOADONLY);

- if ( !rc && (d->arch.spec_ctrl_flags & SCF_entry_ibpb) )
+ if ( !rc && (d->arch.scf & SCF_entry_ibpb) )
rc = vmx_add_msr(v, MSR_PRED_CMD, PRED_CMD_IBPB,
VMX_MSR_HOST);

diff --git a/xen/arch/x86/include/asm/current.h b/xen/arch/x86/include/asm/current.h
index da5e152a10..9cc8d8e3d4 100644
--- a/xen/arch/x86/include/asm/current.h
+++ b/xen/arch/x86/include/asm/current.h
@@ -57,7 +57,7 @@ struct cpu_info {
unsigned int shadow_spec_ctrl;
uint8_t xen_spec_ctrl;
uint8_t last_spec_ctrl;
- uint8_t spec_ctrl_flags;
+ uint8_t scf; /* SCF_* */

/*
* The following field controls copying of the L4 page table of 64-bit
diff --git a/xen/arch/x86/include/asm/domain.h b/xen/arch/x86/include/asm/domain.h
index 619e667938..237510fed3 100644
--- a/xen/arch/x86/include/asm/domain.h
+++ b/xen/arch/x86/include/asm/domain.h
@@ -324,7 +324,7 @@ struct arch_domain
uint32_t pci_cf8;
uint8_t cmos_idx;

- uint8_t spec_ctrl_flags; /* See SCF_DOM_MASK */
+ uint8_t scf; /* See SCF_DOM_MASK */

union {
struct pv_domain pv;
diff --git a/xen/arch/x86/include/asm/spec_ctrl.h b/xen/arch/x86/include/asm/spec_ctrl.h
index 011ab1db27..ee3119e128 100644
--- a/xen/arch/x86/include/asm/spec_ctrl.h
+++ b/xen/arch/x86/include/asm/spec_ctrl.h
@@ -9,10 +9,10 @@
#define __X86_SPEC_CTRL_H__

/*
- * Encoding of:
- * cpuinfo.spec_ctrl_flags
- * default_spec_ctrl_flags
- * domain.spec_ctrl_flags
+ * Encoding of Xen's speculation control flags in:
+ * cpuinfo.scf
+ * default_scf
+ * domain.scf
*
* Live settings are in the top-of-stack block, because they need to be
* accessable when XPTI is active. Some settings are fixed from boot, some
@@ -82,7 +82,7 @@ extern int8_t opt_l1d_flush;

extern bool bsp_delay_spec_ctrl;
extern uint8_t default_xen_spec_ctrl;
-extern uint8_t default_spec_ctrl_flags;
+extern uint8_t default_scf;

extern int8_t opt_xpti_hwdom, opt_xpti_domu;

@@ -102,7 +102,7 @@ static inline void init_shadow_spec_ctrl_state(void)

info->shadow_spec_ctrl = 0;
info->xen_spec_ctrl = default_xen_spec_ctrl;
- info->spec_ctrl_flags = default_spec_ctrl_flags;
+ info->scf = default_scf;

/*
* For least latency, the VERW selector should be a writeable data
@@ -126,7 +126,7 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info)
*/
info->shadow_spec_ctrl = val;
barrier();
- info->spec_ctrl_flags |= SCF_use_shadow;
+ info->scf |= SCF_use_shadow;
barrier();
alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE,
"a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
@@ -175,7 +175,7 @@ static always_inline void spec_ctrl_exit_idle(struct cpu_info *info)
* Disable shadowing before updating the MSR. There are no SMP issues
* here; only local processor ordering concerns.
*/
- info->spec_ctrl_flags &= ~SCF_use_shadow;
+ info->scf &= ~SCF_use_shadow;
barrier();
alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE,
"a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
diff --git a/xen/arch/x86/include/asm/spec_ctrl_asm.h b/xen/arch/x86/include/asm/spec_ctrl_asm.h
index 3c9aa56301..f593c5d621 100644
--- a/xen/arch/x86/include/asm/spec_ctrl_asm.h
+++ b/xen/arch/x86/include/asm/spec_ctrl_asm.h
@@ -39,7 +39,7 @@
* shadowing logic.
*
* Factor 2 is harder. We maintain a shadow_spec_ctrl value, and a use_shadow
- * boolean in the per cpu spec_ctrl_flags. The synchronous use is:
+ * boolean in the per cpu scf. The synchronous use is:
*
* 1) Store guest value in shadow_spec_ctrl
* 2) Set the use_shadow boolean
@@ -86,11 +86,11 @@
* interrupting Xen.
*/
.if \maybexen
- testb $SCF_entry_ibpb, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ testb $SCF_entry_ibpb, STACK_CPUINFO_FIELD(scf)(%r14)
jz .L\@_skip
testb $3, UREGS_cs(%rsp)
.else
- testb $SCF_entry_ibpb, CPUINFO_spec_ctrl_flags(%rsp)
+ testb $SCF_entry_ibpb, CPUINFO_scf(%rsp)
.endif
jz .L\@_skip

@@ -160,8 +160,8 @@
#define STK_REL(field, top_of_stk) ((field) - (top_of_stk))

.macro SPEC_CTRL_COND_VERW \
- scf=STK_REL(CPUINFO_spec_ctrl_flags, CPUINFO_error_code), \
- sel=STK_REL(CPUINFO_verw_sel, CPUINFO_error_code)
+ scf=STK_REL(CPUINFO_scf, CPUINFO_error_code), \
+ sel=STK_REL(CPUINFO_verw_sel, CPUINFO_error_code)
/*
* Requires \scf and \sel as %rsp-relative expressions
* Clobbers eflags
@@ -216,10 +216,10 @@
testb $3, UREGS_cs(%rsp)
setnz %al
not %eax
- and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ and %al, STACK_CPUINFO_FIELD(scf)(%r14)
movzbl STACK_CPUINFO_FIELD(xen_spec_ctrl)(%r14), %eax
.else
- andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
+ andb $~SCF_use_shadow, CPUINFO_scf(%rsp)
movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
.endif

@@ -238,7 +238,7 @@
mov %eax, CPUINFO_shadow_spec_ctrl(%rsp)

/* Set SPEC_CTRL shadowing *before* loading the guest value. */
- orb $SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
+ orb $SCF_use_shadow, CPUINFO_scf(%rsp)

mov $MSR_SPEC_CTRL, %ecx
xor %edx, %edx
@@ -316,7 +316,7 @@
* DO_SPEC_CTRL_ENTRY maybexen=1
* but with conditionals rather than alternatives.
*/
- movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %ebx
+ movzbl STACK_CPUINFO_FIELD(scf)(%r14), %ebx

test $SCF_ist_ibpb, %bl
jz .L\@_skip_ibpb
@@ -341,7 +341,7 @@
testb $3, UREGS_cs(%rsp)
setnz %al
not %eax
- and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ and %al, STACK_CPUINFO_FIELD(scf)(%r14)

/* Load Xen's intended value. */
mov $MSR_SPEC_CTRL, %ecx
@@ -375,7 +375,7 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
* Requires %r12=ist_exit, %r14=stack_end, %rsp=regs
* Clobbers %rax, %rbx, %rcx, %rdx
*/
- movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %ebx
+ movzbl STACK_CPUINFO_FIELD(scf)(%r14), %ebx

testb $SCF_ist_sc_msr, %bl
jz .L\@_skip_sc_msr
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index a3d3f797bb..25017b5d96 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -2034,7 +2034,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)

if ( bsp_delay_spec_ctrl )
{
- info->spec_ctrl_flags &= ~SCF_use_shadow;
+ info->scf &= ~SCF_use_shadow;
barrier();
wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
info->last_spec_ctrl = default_xen_spec_ctrl;
diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index 41c8174507..af13b32672 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -57,7 +57,7 @@ static bool __initdata opt_lock_harden;

bool __initdata bsp_delay_spec_ctrl;
uint8_t __ro_after_init default_xen_spec_ctrl;
-uint8_t __ro_after_init default_spec_ctrl_flags;
+uint8_t __ro_after_init default_scf;

paddr_t __ro_after_init l1tf_addr_mask, __ro_after_init l1tf_safe_maddr;
bool __ro_after_init cpu_has_bug_l1tf;
@@ -1105,7 +1105,7 @@ static void __init ibpb_calculations(void)
* NMI/#MC, so can't interrupt Xen ahead of having already flushed the
* BTB.
*/
- default_spec_ctrl_flags |= SCF_ist_ibpb;
+ default_scf |= SCF_ist_ibpb;
}
if ( opt_ibpb_entry_hvm )
setup_force_cpu_cap(X86_FEATURE_IBPB_ENTRY_HVM);
@@ -1606,7 +1606,7 @@ void spec_ctrl_init_domain(struct domain *d)
bool ibpb = ((pv ? opt_ibpb_entry_pv : opt_ibpb_entry_hvm) &&
(d->domain_id != 0 || opt_ibpb_entry_dom0));

- d->arch.spec_ctrl_flags =
+ d->arch.scf =
(verw ? SCF_verw : 0) |
(ibpb ? SCF_entry_ibpb : 0) |
0;
@@ -1711,7 +1711,7 @@ void __init init_speculation_mitigations(void)
{
if ( opt_msr_sc_pv )
{
- default_spec_ctrl_flags |= SCF_ist_sc_msr;
+ default_scf |= SCF_ist_sc_msr;
setup_force_cpu_cap(X86_FEATURE_SC_MSR_PV);
}

@@ -1722,7 +1722,7 @@ void __init init_speculation_mitigations(void)
* Xen's value is not restored atomically. An early NMI hitting
* the VMExit path needs to restore Xen's value for safety.
*/
- default_spec_ctrl_flags |= SCF_ist_sc_msr;
+ default_scf |= SCF_ist_sc_msr;
setup_force_cpu_cap(X86_FEATURE_SC_MSR_HVM);
}
}
@@ -1857,7 +1857,7 @@ void __init init_speculation_mitigations(void)
if ( opt_rsb_pv )
{
setup_force_cpu_cap(X86_FEATURE_SC_RSB_PV);
- default_spec_ctrl_flags |= SCF_ist_rsb;
+ default_scf |= SCF_ist_rsb;
}

/*
@@ -1880,7 +1880,7 @@ void __init init_speculation_mitigations(void)
* possible rogue RSB speculation.
*/
if ( !cpu_has_svm )
- default_spec_ctrl_flags |= SCF_ist_rsb;
+ default_scf |= SCF_ist_rsb;
}

srso_calculations(hw_smt_enabled);
@@ -1893,7 +1893,7 @@ void __init init_speculation_mitigations(void)
if ( opt_eager_fpu == -1 )
opt_eager_fpu = should_use_eager_fpu();

- /* (Re)init BSP state now that default_spec_ctrl_flags has been calculated. */
+ /* (Re)init BSP state now that default_scf has been calculated. */
init_shadow_spec_ctrl_state();

/*
@@ -2166,7 +2166,7 @@ void __init init_speculation_mitigations(void)
{
info->shadow_spec_ctrl = 0;
barrier();
- info->spec_ctrl_flags |= SCF_use_shadow;
+ info->scf |= SCF_use_shadow;
barrier();
}

diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c
index 85c7d0c989..9aa0970a18 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -154,7 +154,7 @@ void __dummy__(void)
OFFSET(CPUINFO_shadow_spec_ctrl, struct cpu_info, shadow_spec_ctrl);
OFFSET(CPUINFO_xen_spec_ctrl, struct cpu_info, xen_spec_ctrl);
OFFSET(CPUINFO_last_spec_ctrl, struct cpu_info, last_spec_ctrl);
- OFFSET(CPUINFO_spec_ctrl_flags, struct cpu_info, spec_ctrl_flags);
+ OFFSET(CPUINFO_scf, struct cpu_info, scf);
OFFSET(CPUINFO_root_pgt_changed, struct cpu_info, root_pgt_changed);
OFFSET(CPUINFO_use_pv_cr3, struct cpu_info, use_pv_cr3);
DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
index 3bbe3a79a5..375dba9ad3 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -164,8 +164,8 @@ ENTRY(compat_restore_all_guest)

/* Account for ev/ec having already been popped off the stack. */
SPEC_CTRL_COND_VERW \
- scf=STK_REL(CPUINFO_spec_ctrl_flags, CPUINFO_rip), \
- sel=STK_REL(CPUINFO_verw_sel, CPUINFO_rip)
+ scf=STK_REL(CPUINFO_scf, CPUINFO_rip), \
+ sel=STK_REL(CPUINFO_verw_sel, CPUINFO_rip)

.Lft0: iretq
_ASM_PRE_EXTABLE(.Lft0, handle_exception)
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 2c7512130f..4e49ef259a 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -692,7 +692,7 @@ UNLIKELY_END(exit_cr3)
/*
* When the CPU pushed this exception frame, it zero-extended eflags.
* For an IST exit, SPEC_CTRL_EXIT_TO_XEN stashed shadow copies of
- * spec_ctrl_flags and ver_sel above eflags, as we can't use any GPRs,
+ * scf and ver_sel above eflags, as we can't use any GPRs,
* and we're at a random place on the stack, not in a CPUFINFO block.
*
* Account for ev/ec having already been popped off the stack.
--
generated by git-patchbot for /home/xen/git/xen.git#staging-4.18