Mailing List Archive

[xen staging-4.15] x86/mm: add speculation barriers to open coded locks
commit 9c7cdd55dc05b9500792e3ea1ad4bf88470190ed
Author: Roger Pau Monné <roger.pau@citrix.com>
AuthorDate: Mon Mar 4 18:08:48 2024 +0100
Commit: Andrew Cooper <andrew.cooper3@citrix.com>
CommitDate: Tue Mar 12 16:37:45 2024 +0000

x86/mm: add speculation barriers to open coded locks

Add a speculation barrier to the clearly identified open-coded lock taking
functions.

Note that the memory sharing page_lock() replacement (_page_lock()) is left
as-is, as the code is experimental and not security supported.

This is part of XSA-453 / CVE-2024-2193

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
(cherry picked from commit 42a572a38e22a97d86a4b648a22597628d5b42e4)
---
xen/arch/x86/mm.c | 6 ++++--
xen/include/asm-x86/mm.h | 4 +++-
2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 44ac8cae76..ad22543d1b 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2004,7 +2004,7 @@ static inline bool current_locked_page_ne_check(struct page_info *page) {
#define current_locked_page_ne_check(x) true
#endif

-int page_lock(struct page_info *page)
+int page_lock_unsafe(struct page_info *page)
{
unsigned long x, nx;

@@ -2065,7 +2065,7 @@ void page_unlock(struct page_info *page)
* l3t_lock(), so to avoid deadlock we must avoid grabbing them in
* reverse order.
*/
-static void l3t_lock(struct page_info *page)
+static always_inline void l3t_lock(struct page_info *page)
{
unsigned long x, nx;

@@ -2074,6 +2074,8 @@ static void l3t_lock(struct page_info *page)
cpu_relax();
nx = x | PGT_locked;
} while ( cmpxchg(&page->u.inuse.type_info, x, nx) != x );
+
+ block_lock_speculation();
}

static void l3t_unlock(struct page_info *page)
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index cffd0d6425..917fbe29bb 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -393,7 +393,9 @@ const struct platform_bad_page *get_platform_badpages(unsigned int *array_size);
* The use of PGT_locked in mem_sharing does not collide, since mem_sharing is
* only supported for hvm guests, which do not have PV PTEs updated.
*/
-int page_lock(struct page_info *page);
+int page_lock_unsafe(struct page_info *page);
+#define page_lock(pg) lock_evaluate_nospec(page_lock_unsafe(pg))
+
void page_unlock(struct page_info *page);

void put_page_type(struct page_info *page);
--
generated by git-patchbot for /home/xen/git/xen.git#staging-4.15