Mailing List Archive

[PATCH 12/22] arch/x86: Add missing mem_sharing XSM hooks
This patch adds splits up the mem_sharing and mem_event XSM hooks to
better cover what the code is doing. It also changes the utility
function get_mem_event_op_target to rcu_lock_live_remote_domain_by_id
because there is no mm-specific logic in there.

Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
Acked-by: Tim Deegan <tim@xen.org>
Acked-by: Jan Beulich <jbeulich@suse.com>
Cc: Keir Fraser <keir@xen.org>
---
xen/arch/x86/domctl.c | 8 +++-----
xen/arch/x86/mm/mem_event.c | 41 +++++++++++--------------------------
xen/arch/x86/mm/mem_sharing.c | 25 ++++++++++++++++++----
xen/common/domain.c | 15 ++++++++++++++
xen/include/asm-x86/mem_event.h | 1 -
xen/include/xen/sched.h | 6 ++++++
xen/include/xsm/dummy.h | 23 ++++++++++++++++++++-
xen/include/xsm/xsm.h | 24 +++++++++++++++++++---
xen/xsm/dummy.c | 5 ++++-
xen/xsm/flask/hooks.c | 25 ++++++++++++++++++++--
xen/xsm/flask/policy/access_vectors | 1 +
11 files changed, 128 insertions(+), 46 deletions(-)

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 0e81010..10558a0 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1223,10 +1223,8 @@ long arch_do_domctl(

case XEN_DOMCTL_mem_event_op:
{
- ret = xsm_mem_event(d);
- if ( !ret )
- ret = mem_event_domctl(d, &domctl->u.mem_event_op,
- guest_handle_cast(u_domctl, void));
+ ret = mem_event_domctl(d, &domctl->u.mem_event_op,
+ guest_handle_cast(u_domctl, void));
copyback = 1;
}
break;
@@ -1265,7 +1263,7 @@ long arch_do_domctl(
if ( current->domain == d )
break;

- ret = xsm_mem_event(d);
+ ret = xsm_mem_event_setup(d);
if ( !ret ) {
p2m = p2m_get_hostp2m(d);
p2m->access_required = domctl->u.access_required.access_required;
diff --git a/xen/arch/x86/mm/mem_event.c b/xen/arch/x86/mm/mem_event.c
index 27d1cf4..c2b3670 100644
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -29,6 +29,7 @@
#include <asm/mem_paging.h>
#include <asm/mem_access.h>
#include <asm/mem_sharing.h>
+#include <xsm/xsm.h>

/* for public/io/ring.h macros */
#define xen_mb() mb()
@@ -439,35 +440,19 @@ static void mem_sharing_notification(struct vcpu *v, unsigned int port)
mem_sharing_sharing_resume(v->domain);
}

-struct domain *get_mem_event_op_target(uint32_t domain, int *rc)
-{
- struct domain *d;
-
- /* Get the target domain */
- *rc = rcu_lock_remote_target_domain_by_id(domain, &d);
- if ( *rc != 0 )
- return NULL;
-
- /* Not dying? */
- if ( d->is_dying )
- {
- rcu_unlock_domain(d);
- *rc = -EINVAL;
- return NULL;
- }
-
- return d;
-}
-
int do_mem_event_op(int op, uint32_t domain, void *arg)
{
int ret;
struct domain *d;

- d = get_mem_event_op_target(domain, &ret);
- if ( !d )
+ ret = rcu_lock_live_remote_domain_by_id(domain, &d);
+ if ( ret )
return ret;

+ ret = xsm_mem_event_op(d, op);
+ if ( ret )
+ goto out;
+
switch (op)
{
case XENMEM_paging_op:
@@ -483,6 +468,7 @@ int do_mem_event_op(int op, uint32_t domain, void *arg)
ret = -ENOSYS;
}

+ out:
rcu_unlock_domain(d);
return ret;
}
@@ -516,6 +502,10 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
{
int rc;

+ rc = xsm_mem_event_control(d, mec->mode, mec->op);
+ if ( rc )
+ return rc;
+
if ( unlikely(d == current->domain) )
{
gdprintk(XENLOG_INFO, "Tried to do a memory event op on itself.\n");
@@ -537,13 +527,6 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
return -EINVAL;
}

- /* TODO: XSM hook */
-#if 0
- rc = xsm_mem_event_control(d, mec->op);
- if ( rc )
- return rc;
-#endif
-
rc = -ENOSYS;

switch ( mec->mode )
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index e91aac5..4624314 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -34,6 +34,7 @@
#include <asm/atomic.h>
#include <xen/rcupdate.h>
#include <asm/event.h>
+#include <xsm/xsm.h>

#include "mm-locks.h"

@@ -1345,10 +1346,18 @@ int mem_sharing_memop(struct domain *d, xen_mem_sharing_op_t *mec)
if ( !mem_sharing_enabled(d) )
return -EINVAL;

- cd = get_mem_event_op_target(mec->u.share.client_domain, &rc);
- if ( !cd )
+ rc = rcu_lock_live_remote_domain_by_id(mec->u.share.client_domain,
+ &cd);
+ if ( rc )
return rc;

+ rc = xsm_mem_sharing_op(d, cd, mec->op);
+ if ( rc )
+ {
+ rcu_unlock_domain(cd);
+ return rc;
+ }
+
if ( !mem_sharing_enabled(cd) )
{
rcu_unlock_domain(cd);
@@ -1401,10 +1410,18 @@ int mem_sharing_memop(struct domain *d, xen_mem_sharing_op_t *mec)
if ( !mem_sharing_enabled(d) )
return -EINVAL;

- cd = get_mem_event_op_target(mec->u.share.client_domain, &rc);
- if ( !cd )
+ rc = rcu_lock_live_remote_domain_by_id(mec->u.share.client_domain,
+ &cd);
+ if ( rc )
return rc;

+ rc = xsm_mem_sharing_op(d, cd, mec->op);
+ if ( rc )
+ {
+ rcu_unlock_domain(cd);
+ return rc;
+ }
+
if ( !mem_sharing_enabled(cd) )
{
rcu_unlock_domain(cd);
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 2f8ef00..ec8efe8 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -475,6 +475,21 @@ int rcu_lock_remote_domain_by_id(domid_t dom, struct domain **d)
return 0;
}

+int rcu_lock_live_remote_domain_by_id(domid_t dom, struct domain **d)
+{
+ int rv;
+ rv = rcu_lock_remote_domain_by_id(dom, d);
+ if ( rv )
+ return rv;
+ if ( (*d)->is_dying )
+ {
+ rcu_unlock_domain(*d);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int domain_kill(struct domain *d)
{
int rc = 0;
diff --git a/xen/include/asm-x86/mem_event.h b/xen/include/asm-x86/mem_event.h
index e17f36b..5959621 100644
--- a/xen/include/asm-x86/mem_event.h
+++ b/xen/include/asm-x86/mem_event.h
@@ -62,7 +62,6 @@ void mem_event_put_request(struct domain *d, struct mem_event_domain *med,
int mem_event_get_response(struct domain *d, struct mem_event_domain *med,
mem_event_response_t *rsp);

-struct domain *get_mem_event_op_target(uint32_t domain, int *rc);
int do_mem_event_op(int op, uint32_t domain, void *arg);
int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
XEN_GUEST_HANDLE_PARAM(void) u_domctl);
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 6c55039..90a6537 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -484,6 +484,12 @@ int rcu_lock_remote_target_domain_by_id(domid_t dom, struct domain **d);
*/
int rcu_lock_remote_domain_by_id(domid_t dom, struct domain **d);

+/*
+ * As rcu_lock_remote_domain_by_id() but will fail EINVAL if the domain is
+ * dying.
+ */
+int rcu_lock_live_remote_domain_by_id(domid_t dom, struct domain **d);
+
/* Finish a RCU critical region started by rcu_lock_domain_by_id(). */
static inline void rcu_unlock_domain(struct domain *d)
{
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index e42965c..42b2285 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -551,16 +551,37 @@ static XSM_INLINE int xsm_hvm_inject_msi(struct domain *d)
return 0;
}

-static XSM_INLINE int xsm_mem_event(struct domain *d)
+static XSM_INLINE int xsm_mem_event_setup(struct domain *d)
{
return 0;
}

+static XSM_INLINE int xsm_mem_event_control(struct domain *d, int mode, int op)
+{
+ if ( !IS_PRIV(current->domain) )
+ return -EPERM;
+ return 0;
+}
+
+static XSM_INLINE int xsm_mem_event_op(struct domain *d, int op)
+{
+ if ( !IS_PRIV_FOR(current->domain, d) )
+ return -EPERM;
+ return 0;
+}
+
static XSM_INLINE int xsm_mem_sharing(struct domain *d)
{
return 0;
}

+static XSM_INLINE int xsm_mem_sharing_op(struct domain *d, struct domain *cd, int op)
+{
+ if ( !IS_PRIV_FOR(current->domain, cd) )
+ return -EPERM;
+ return 0;
+}
+
static XSM_INLINE int xsm_apic(struct domain *d, int cmd)
{
if ( !IS_PRIV(d) )
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 470e3c0..88aa95a 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -151,8 +151,11 @@ struct xsm_operations {
int (*hvm_set_isa_irq_level) (struct domain *d);
int (*hvm_set_pci_link_route) (struct domain *d);
int (*hvm_inject_msi) (struct domain *d);
- int (*mem_event) (struct domain *d);
+ int (*mem_event_setup) (struct domain *d);
+ int (*mem_event_control) (struct domain *d, int mode, int op);
+ int (*mem_event_op) (struct domain *d, int op);
int (*mem_sharing) (struct domain *d);
+ int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
int (*apic) (struct domain *d, int cmd);
int (*xen_settime) (void);
int (*memtype) (uint32_t access);
@@ -665,9 +668,19 @@ static inline int xsm_hvm_inject_msi (struct domain *d)
return xsm_ops->hvm_inject_msi(d);
}

-static inline int xsm_mem_event (struct domain *d)
+static inline int xsm_mem_event_setup (struct domain *d)
{
- return xsm_ops->mem_event(d);
+ return xsm_ops->mem_event_setup(d);
+}
+
+static inline int xsm_mem_event_control (struct domain *d, int mode, int op)
+{
+ return xsm_ops->mem_event_control(d, mode, op);
+}
+
+static inline int xsm_mem_event_op (struct domain *d, int op)
+{
+ return xsm_ops->mem_event_op(d, op);
}

static inline int xsm_mem_sharing (struct domain *d)
@@ -675,6 +688,11 @@ static inline int xsm_mem_sharing (struct domain *d)
return xsm_ops->mem_sharing(d);
}

+static inline int xsm_mem_sharing_op (struct domain *d, struct domain *cd, int op)
+{
+ return xsm_ops->mem_sharing_op(d, cd, op);
+}
+
static inline int xsm_apic (struct domain *d, int cmd)
{
return xsm_ops->apic(d, cmd);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 1e7f42c..bc9d30f 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -136,8 +136,11 @@ void xsm_fixup_ops (struct xsm_operations *ops)
set_to_dummy_if_null(ops, hvm_set_isa_irq_level);
set_to_dummy_if_null(ops, hvm_set_pci_link_route);
set_to_dummy_if_null(ops, hvm_inject_msi);
- set_to_dummy_if_null(ops, mem_event);
+ set_to_dummy_if_null(ops, mem_event_setup);
+ set_to_dummy_if_null(ops, mem_event_control);
+ set_to_dummy_if_null(ops, mem_event_op);
set_to_dummy_if_null(ops, mem_sharing);
+ set_to_dummy_if_null(ops, mem_sharing_op);
set_to_dummy_if_null(ops, apic);
set_to_dummy_if_null(ops, xen_settime);
set_to_dummy_if_null(ops, memtype);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index eaf9650..f36fe2c 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1278,7 +1278,17 @@ static int flask_hvm_inject_msi(struct domain *d)
return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
}

-static int flask_mem_event(struct domain *d)
+static int flask_mem_event_setup(struct domain *d)
+{
+ return current_has_perm(d, SECCLASS_HVM, HVM__MEM_EVENT);
+}
+
+static int flask_mem_event_control(struct domain *d, int mode, int op)
+{
+ return current_has_perm(d, SECCLASS_HVM, HVM__MEM_EVENT);
+}
+
+static int flask_mem_event_op(struct domain *d, int op)
{
return current_has_perm(d, SECCLASS_HVM, HVM__MEM_EVENT);
}
@@ -1288,6 +1298,14 @@ static int flask_mem_sharing(struct domain *d)
return current_has_perm(d, SECCLASS_HVM, HVM__MEM_SHARING);
}

+static int flask_mem_sharing_op(struct domain *d, struct domain *cd, int op)
+{
+ int rc = current_has_perm(cd, SECCLASS_HVM, HVM__MEM_SHARING);
+ if ( rc )
+ return rc;
+ return domain_has_perm(d, cd, SECCLASS_HVM, HVM__SHARE_MEM);
+}
+
static int flask_apic(struct domain *d, int cmd)
{
u32 perm;
@@ -1737,8 +1755,11 @@ static struct xsm_operations flask_ops = {
.hvm_set_isa_irq_level = flask_hvm_set_isa_irq_level,
.hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
.hvm_inject_msi = flask_hvm_inject_msi,
- .mem_event = flask_mem_event,
+ .mem_event_setup = flask_mem_event_setup,
+ .mem_event_control = flask_mem_event_control,
+ .mem_event_op = flask_mem_event_op,
.mem_sharing = flask_mem_sharing,
+ .mem_sharing_op = flask_mem_sharing_op,
.apic = flask_apic,
.xen_settime = flask_xen_settime,
.memtype = flask_memtype,
diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors
index ea65e45..45ac437 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -102,6 +102,7 @@ class hvm
mem_sharing
audit_p2m
send_irq
+ share_mem
}

class event
--
1.7.11.7


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel