Mailing List Archive

[xen staging] x86/ept: force WB cache attributes for grant and foreign maps
commit 8af4b47f061edf6450f2b0a0a8134fdf1c13b3e5
Author: Roger Pau Monné <roger.pau@citrix.com>
AuthorDate: Thu Jun 17 18:00:57 2021 +0200
Commit: Jan Beulich <jbeulich@suse.com>
CommitDate: Thu Jun 17 18:00:57 2021 +0200

x86/ept: force WB cache attributes for grant and foreign maps

Force WB type for grants and foreign pages. Those are usually mapped
over unpopulated physical ranges in the p2m, and those ranges would
usually be UC in the MTRR state, which is unlikely to be the correct
cache attribute. It's also cumbersome (or even impossible) for the
guest to be setting the MTRR type for all those mappings as WB, as
MTRR ranges are finite.

Note that this is not an issue on AMD because WB cache attribute is
already set on grants and foreign mappings in the p2m and MTRR types
are ignored. Also on AMD Xen cannot force a cache attribute because of
the lack of ignore PAT equivalent, so the behavior here slightly
diverges between AMD and Intel (or EPT vs NPT/shadow).

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
---
xen/arch/x86/hvm/vmx/vmx.c | 2 +-
xen/arch/x86/mm/p2m-ept.c | 37 +++++++++++++++++++++++++++++++------
xen/include/asm-x86/hvm/vmx/vmx.h | 2 +-
3 files changed, 33 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 0d4b47681b..e09b7e3af9 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -423,7 +423,7 @@ static void domain_creation_finished(struct domain *d)
return;

ASSERT(epte_get_entry_emt(d, gfn, apic_access_mfn, 0, &ipat,
- true) == MTRR_TYPE_WRBACK);
+ p2m_mmio_direct) == MTRR_TYPE_WRBACK);
ASSERT(ipat);

if ( set_mmio_p2m_entry(d, gfn, apic_access_mfn, PAGE_ORDER_4K) )
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index f1d1d07e92..289001b66b 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -487,7 +487,7 @@ static int ept_invalidate_emt_range(struct p2m_domain *p2m,
}

int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
- unsigned int order, bool *ipat, bool direct_mmio)
+ unsigned int order, bool *ipat, p2m_type_t type)
{
int gmtrr_mtype, hmtrr_mtype;
struct vcpu *v = current;
@@ -518,7 +518,8 @@ int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
return MTRR_TYPE_UNCACHABLE;
}

- if ( !direct_mmio && !is_iommu_enabled(d) && !cache_flush_permitted(d) )
+ if ( type != p2m_mmio_direct && !is_iommu_enabled(d) &&
+ !cache_flush_permitted(d) )
{
*ipat = true;
return MTRR_TYPE_WRBACK;
@@ -535,9 +536,33 @@ int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
}
}

- if ( direct_mmio )
+ switch ( type )
+ {
+ case p2m_mmio_direct:
return MTRR_TYPE_UNCACHABLE;

+ case p2m_grant_map_ro:
+ case p2m_grant_map_rw:
+ case p2m_map_foreign:
+ /*
+ * Force WB type for grants and foreign pages. Those are usually mapped
+ * over unpopulated physical ranges in the p2m, and those would usually
+ * be UC in the MTRR state, which is unlikely to be the correct cache
+ * attribute. It's also cumbersome (or even impossible) for the guest
+ * to be setting the MTRR type for all those mappings as WB, as MTRR
+ * ranges are finite.
+ *
+ * Note that on AMD we cannot force a cache attribute because of the
+ * lack of ignore PAT equivalent, so the behavior here slightly
+ * diverges. See p2m_type_to_flags for the AMD attributes.
+ */
+ *ipat = true;
+ return MTRR_TYPE_WRBACK;
+
+ default:
+ break;
+ }
+
gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, gfn, order);
if ( gmtrr_mtype >= 0 )
{
@@ -640,7 +665,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
continue;
e.emt = epte_get_entry_emt(p2m->domain, _gfn(gfn + i),
_mfn(e.mfn), 0, &ipat,
- e.sa_p2mt == p2m_mmio_direct);
+ e.sa_p2mt);
e.ipat = ipat;

nt = p2m_recalc_type(e.recalc, e.sa_p2mt, p2m, gfn + i);
@@ -659,7 +684,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
int emt = epte_get_entry_emt(p2m->domain, _gfn(gfn),
_mfn(e.mfn),
level * EPT_TABLE_ORDER, &ipat,
- e.sa_p2mt == p2m_mmio_direct);
+ e.sa_p2mt);
bool_t recalc = e.recalc;

if ( recalc && p2m_is_changeable(e.sa_p2mt) )
@@ -895,7 +920,7 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
bool ipat;
int emt = epte_get_entry_emt(p2m->domain, _gfn(gfn), mfn,
i * EPT_TABLE_ORDER, &ipat,
- p2mt == p2m_mmio_direct);
+ p2mt);

if ( emt >= 0 )
new_entry.emt = emt;
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index f668ee1f09..0deb507490 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -600,7 +600,7 @@ void ept_p2m_uninit(struct p2m_domain *p2m);
void ept_walk_table(struct domain *d, unsigned long gfn);
bool_t ept_handle_misconfig(uint64_t gpa);
int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
- unsigned int order, bool *ipat, bool direct_mmio);
+ unsigned int order, bool *ipat, p2m_type_t type);
void setup_ept_dump(void);
void p2m_init_altp2m_ept(struct domain *d, unsigned int i);
/* Locate an alternate p2m by its EPTP */
--
generated by git-patchbot for /home/xen/git/xen.git#staging