Mailing List Archive

[PATCH 3/8] xsm: Revert "Fix xsm_mmu_* and xsm_update_va_mapping hooks"
This reverts 23220:56a3b9c7367f, which removes all validation of the
target pages in the mapping. This crash was solved by properly marking
pages without known SIDs in 22207:20f139010445.

Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
---
xen/arch/x86/mm.c | 30 +++++++++++-------------------
xen/include/xsm/xsm.h | 28 +++++++++++++---------------
xen/xsm/dummy.c | 11 +++++------
xen/xsm/flask/hooks.c | 41 +++++++++++++++++++++++++++++++++--------
4 files changed, 62 insertions(+), 48 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index b00c277..64af6ff 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3517,6 +3517,9 @@ int do_mmu_update(
{
p2m_type_t p2mt;

+ rc = xsm_mmu_normal_update(d, pg_owner, req.val);
+ if ( rc )
+ break;
rc = -EINVAL;

req.ptr -= cmd;
@@ -3545,14 +3548,6 @@ int do_mmu_update(
(unsigned long)(req.ptr & ~PAGE_MASK));
page = mfn_to_page(mfn);

- rc = xsm_mmu_normal_update(d, req.val, page);
- if ( rc ) {
- unmap_domain_page_with_cache(va, &mapcache);
- put_page(page);
- put_gfn(pt_owner, gmfn);
- break;
- }
-
if ( page_lock(page) )
{
switch ( page->u.inuse.type_info & PGT_type_mask )
@@ -3736,6 +3731,10 @@ int do_mmu_update(
mfn = req.ptr >> PAGE_SHIFT;
gpfn = req.val;

+ rc = xsm_mmu_machphys_update(d, mfn);
+ if ( rc )
+ break;
+
if ( unlikely(!get_page_from_pagenr(mfn, pg_owner)) )
{
MEM_LOG("Could not get page for mach->phys update");
@@ -3750,10 +3749,6 @@ int do_mmu_update(
break;
}

- rc = xsm_mmu_machphys_update(d, mfn_to_page(mfn));
- if ( rc )
- break;
-
set_gpfn_from_mfn(mfn, gpfn);

paging_mark_dirty(pg_owner, mfn);
@@ -4380,6 +4375,10 @@ static int __do_update_va_mapping(

perfc_incr(calls_to_update_va);

+ rc = xsm_update_va_mapping(d, pg_owner, val);
+ if ( rc )
+ return rc;
+
rc = -EINVAL;
pl1e = guest_map_l1e(v, va, &gl1mfn);
if ( unlikely(!pl1e || !get_page_from_pagenr(gl1mfn, d)) )
@@ -4399,13 +4398,6 @@ static int __do_update_va_mapping(
goto out;
}

- rc = xsm_update_va_mapping(d, val, gl1pg);
- if ( rc ) {
- page_unlock(gl1pg);
- put_page(gl1pg);
- goto out;
- }
-
rc = mod_l1_entry(pl1e, val, gl1mfn, 0, v, pg_owner);

page_unlock(gl1pg);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 82f510d..da1f5d0 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -141,12 +141,11 @@ struct xsm_operations {
int (*getidletime) (void);
int (*machine_memory_map) (void);
int (*domain_memory_map) (struct domain *d);
- int (*mmu_normal_update) (struct domain *d,
- intpte_t fpte, struct page_info *page);
- int (*mmu_machphys_update) (struct domain *d, struct page_info *page);
- int (*update_va_mapping) (struct domain *d,
- l1_pgentry_t pte,
- struct page_info *page);
+ int (*mmu_normal_update) (struct domain *d, struct domain *f,
+ intpte_t fpte);
+ int (*mmu_machphys_update) (struct domain *d, unsigned long mfn);
+ int (*update_va_mapping) (struct domain *d, struct domain *f,
+ l1_pgentry_t pte);
int (*add_to_physmap) (struct domain *d1, struct domain *d2);
int (*sendtrigger) (struct domain *d);
int (*bind_pt_irq) (struct domain *d, struct xen_domctl_bind_pt_irq *bind);
@@ -594,22 +593,21 @@ static inline int xsm_domain_memory_map(struct domain *d)
return xsm_call(domain_memory_map(d));
}

-static inline int xsm_mmu_normal_update (struct domain *d,
- intpte_t fpte, struct page_info *page)
+static inline int xsm_mmu_normal_update (struct domain *d, struct domain *f,
+ intpte_t fpte)
{
- return xsm_call(mmu_normal_update(d, fpte, page));
+ return xsm_call(mmu_normal_update(d, f, fpte));
}

-static inline int xsm_mmu_machphys_update (struct domain *d, struct page_info *page)
+static inline int xsm_mmu_machphys_update (struct domain *d, unsigned long mfn)
{
- return xsm_call(mmu_machphys_update(d, page));
+ return xsm_call(mmu_machphys_update(d, mfn));
}

-static inline int xsm_update_va_mapping(struct domain *d,
- l1_pgentry_t pte,
- struct page_info *page)
+static inline int xsm_update_va_mapping(struct domain *d, struct domain *f,
+ l1_pgentry_t pte)
{
- return xsm_call(update_va_mapping(d, pte, page));
+ return xsm_call(update_va_mapping(d, f, pte));
}

static inline int xsm_add_to_physmap(struct domain *d1, struct domain *d2)
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 1b50d0e..ef461e6 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -400,20 +400,19 @@ static int dummy_domain_memory_map (struct domain *d)
return 0;
}

-static int dummy_mmu_normal_update (struct domain *d,
- intpte_t fpte, struct page_info *page)
+static int dummy_mmu_normal_update (struct domain *d, struct domain *f,
+ intpte_t fpte)
{
return 0;
}

-static int dummy_mmu_machphys_update (struct domain *d, struct page_info *page)
+static int dummy_mmu_machphys_update (struct domain *d, unsigned long mfn)
{
return 0;
}

-static int dummy_update_va_mapping (struct domain *d,
- l1_pgentry_t pte,
- struct page_info *page)
+static int dummy_update_va_mapping (struct domain *d, struct domain *f,
+ l1_pgentry_t pte)
{
return 0;
}
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 97ae4d9..2cb3e16 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -348,6 +348,26 @@ static int get_page_sid(struct page_info *page, u32 *sid)
return rc;
}

+static int get_mfn_sid(unsigned long mfn, u32 *sid)
+{
+ int rc = 0;
+ struct page_info *page;
+
+ if ( mfn_valid(mfn) )
+ {
+ /*mfn is valid if this is a page that Xen is tracking!*/
+ page = mfn_to_page(mfn);
+ rc = get_page_sid(page, sid);
+ }
+ else
+ {
+ /*Possibly an untracked IO page?*/
+ rc = security_iomem_sid(mfn, sid);
+ }
+
+ return rc;
+}
+
static int flask_memory_adjust_reservation(struct domain *d1, struct domain *d2)
{
return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__ADJUST);
@@ -987,11 +1007,12 @@ static int flask_domain_memory_map(struct domain *d)
return domain_has_perm(current->domain, d, SECCLASS_MMU, MMU__MEMORYMAP);
}

-static int flask_mmu_normal_update(struct domain *d,
- intpte_t fpte, struct page_info *page)
+static int flask_mmu_normal_update(struct domain *d, struct domain *f,
+ intpte_t fpte)
{
int rc = 0;
u32 map_perms = MMU__MAP_READ;
+ unsigned long fmfn;
struct domain_security_struct *dsec;
u32 fsid;

@@ -1000,38 +1021,42 @@ static int flask_mmu_normal_update(struct domain *d,
if ( l1e_get_flags(l1e_from_intpte(fpte)) & _PAGE_RW )
map_perms |= MMU__MAP_WRITE;

- rc = get_page_sid(page, &fsid);
+ fmfn = get_gfn_untyped(f, l1e_get_pfn(l1e_from_intpte(fpte)));
+
+ rc = get_mfn_sid(fmfn, &fsid);
if ( rc )
return rc;

return avc_has_perm(dsec->sid, fsid, SECCLASS_MMU, map_perms, NULL);
}

-static int flask_mmu_machphys_update(struct domain *d, struct page_info *page)
+static int flask_mmu_machphys_update(struct domain *d, unsigned long mfn)
{
int rc = 0;
u32 psid;
struct domain_security_struct *dsec;
dsec = d->ssid;

- rc = get_page_sid(page, &psid);
+ rc = get_mfn_sid(mfn, &psid);
if ( rc )
return rc;

return avc_has_perm(dsec->sid, psid, SECCLASS_MMU, MMU__UPDATEMP, NULL);
}

-static int flask_update_va_mapping(struct domain *d,
- l1_pgentry_t pte, struct page_info *page)
+static int flask_update_va_mapping(struct domain *d, struct domain *f,
+ l1_pgentry_t pte)
{
int rc = 0;
u32 psid;
u32 map_perms = MMU__MAP_READ;
+ unsigned long mfn;
struct domain_security_struct *dsec;

dsec = d->ssid;

- rc = get_page_sid(page, &psid);
+ mfn = get_gfn_untyped(f, l1e_get_pfn(pte));
+ rc = get_mfn_sid(mfn, &psid);
if ( rc )
return rc;

--
1.7.7.3


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel