Mailing List Archive

[xen-unstable] Merge
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xensource.com>
# Date 1184763381 -3600
# Node ID e1f74a5a09cbb52ed67bb8b089c4cd2d31cc4a73
# Parent ad87a491287455e302a914ed17a2a00902136ec6
# Parent ad1c6cf0baafe149c9fe03be3255b95ffef68a34
Merge
---
tools/python/xen/util/acmpolicy.py | 17
tools/python/xen/util/security.py | 7
tools/python/xen/xend/XendAPI.py | 14
tools/python/xen/xend/XendVDI.py | 1
tools/python/xen/xend/XendXSPolicyAdmin.py | 9
tools/python/xen/xm/cfgbootpolicy.py | 5
tools/xm-test/lib/XmTestLib/XenAPIDomain.py | 4
tools/xm-test/lib/XmTestLib/acm.py | 52 +
tools/xm-test/tests/security-acm/01_security-acm_basic.py | 24
tools/xm-test/tests/security-acm/07_security-acm_pol_update.py | 303 +++++++
tools/xm-test/tests/security-acm/08_security-acm_xapi.py | 354 ++++++++
tools/xm-test/tests/security-acm/09_security-acm_pol_update.py | 427 ++++++++++
tools/xm-test/tests/security-acm/Makefile.am | 5
tools/xm-test/tests/security-acm/xm-test-new-security_policy.xml | 97 ++
xen/arch/x86/hvm/vpt.c | 8
15 files changed, 1286 insertions(+), 41 deletions(-)

diff -r ad87a4912874 -r e1f74a5a09cb tools/python/xen/util/acmpolicy.py
--- a/tools/python/xen/util/acmpolicy.py Wed Jul 18 13:56:00 2007 +0100
+++ b/tools/python/xen/util/acmpolicy.py Wed Jul 18 13:56:21 2007 +0100
@@ -122,7 +122,8 @@ class ACMPolicy(XSPolicy):
rc = -xsconstants.XSERR_GENERAL_FAILURE
if rc != xsconstants.XSERR_SUCCESS:
log.warn("XML did not validate against schema")
- rc = self.__validate_name_and_labels()
+ if rc == xsconstants.XSERR_SUCCESS:
+ rc = self.__validate_name_and_labels()
return rc

def __validate_name_and_labels(self):
@@ -626,14 +627,15 @@ class ACMPolicy(XSPolicy):
def policy_get_stes_of_vmlabel(self, vmlabel):
""" Get a list of all STEs of a given VMlabel """
return self.__policy_get_stes_of_labeltype(vmlabel,
- "VirtualMachineLabel")
+ "/SubjectLabels", "VirtualMachineLabel")

def policy_get_stes_of_resource(self, reslabel):
""" Get a list of all resources of a given VMlabel """
- return self.__policy_get_stes_of_labeltype(reslabel, "ResourceLabel")
-
- def __policy_get_stes_of_labeltype(self, label, labeltype):
- node = self.dom_get_node("SecurityLabelTemplate/SubjectLabels")
+ return self.__policy_get_stes_of_labeltype(reslabel,
+ "/ObjectLabels", "ResourceLabel")
+
+ def __policy_get_stes_of_labeltype(self, label, path, labeltype):
+ node = self.dom_get_node("SecurityLabelTemplate" + path)
if node:
i = 0
while i < len(node.childNodes):
@@ -661,7 +663,8 @@ class ACMPolicy(XSPolicy):
return False
for res in resources:
res_stes = self.policy_get_stes_of_resource(res)
- if len( set(res_stes).union( set(vm_stes) ) ) == 0:
+ if len(res_stes) == 0 or \
+ len( set(res_stes).intersection( set(vm_stes) ) ) == 0:
return False
return True

diff -r ad87a4912874 -r e1f74a5a09cb tools/python/xen/util/security.py
--- a/tools/python/xen/util/security.py Wed Jul 18 13:56:00 2007 +0100
+++ b/tools/python/xen/util/security.py Wed Jul 18 13:56:21 2007 +0100
@@ -799,9 +799,10 @@ def is_resource_in_use(resource):
lst.append(dominfo)
return lst

-def devices_equal(res1, res2):
+def devices_equal(res1, res2, mustexist=True):
""" Determine whether two devices are equal """
- return (unify_resname(res1) == unify_resname(res2))
+ return (unify_resname(res1, mustexist) ==
+ unify_resname(res2, mustexist))

def is_resource_in_use_by_dom(dominfo, resource):
""" Determine whether a resources is in use by a given domain
@@ -817,7 +818,7 @@ def is_resource_in_use_by_dom(dominfo, r
dev = devs[uuid]
if len(dev) >= 2 and dev[1].has_key('uname'):
# dev[0] is type, i.e. 'vbd'
- if devices_equal(dev[1]['uname'], resource):
+ if devices_equal(dev[1]['uname'], resource, mustexist=False):
log.info("RESOURCE IN USE: Domain %d uses %s." %
(dominfo.domid, resource))
return True
diff -r ad87a4912874 -r e1f74a5a09cb tools/python/xen/xend/XendAPI.py
--- a/tools/python/xen/xend/XendAPI.py Wed Jul 18 13:56:00 2007 +0100
+++ b/tools/python/xen/xend/XendAPI.py Wed Jul 18 13:56:21 2007 +0100
@@ -1410,22 +1410,22 @@ class XendAPI(object):
def VM_set_memory_dynamic_max(self, session, vm_ref, mem):
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_dynamic_max(int(mem))
- return xen_api_success_void()
+ return self._VM_save(dom)

def VM_set_memory_dynamic_min(self, session, vm_ref, mem):
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_dynamic_min(int(mem))
- return xen_api_success_void()
+ return self._VM_save(dom)

def VM_set_memory_static_max(self, session, vm_ref, mem):
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_static_max(int(mem))
- return xen_api_success_void()
+ return self._VM_save(dom)

def VM_set_memory_static_min(self, session, vm_ref, mem):
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
dom.set_memory_static_min(int(mem))
- return xen_api_success_void()
+ return self._VM_save(dom)

def VM_set_memory_dynamic_max_live(self, session, vm_ref, mem):
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
@@ -1620,7 +1620,8 @@ class XendAPI(object):
(rc, errors, oldlabel, new_ssidref) = \
dom.set_security_label(sec_label, old_label)
if rc != xsconstants.XSERR_SUCCESS:
- return xen_api_error(['SECURITY_ERROR', rc])
+ return xen_api_error(['SECURITY_ERROR', rc,
+ xsconstants.xserr2string(-rc)])
if rc == 0:
rc = new_ssidref
return xen_api_success(rc)
@@ -2239,7 +2240,8 @@ class XendAPI(object):
vdi = XendNode.instance().get_vdi_by_uuid(vdi_ref)
rc = vdi.set_security_label(sec_lab, old_lab)
if rc < 0:
- return xen_api_error(['SECURITY_ERROR', rc])
+ return xen_api_error(['SECURITY_ERROR', rc,
+ xsconstants.xserr2string(-rc)])
return xen_api_success(rc)

def VDI_get_security_label(self, session, vdi_ref):
diff -r ad87a4912874 -r e1f74a5a09cb tools/python/xen/xend/XendVDI.py
--- a/tools/python/xen/xend/XendVDI.py Wed Jul 18 13:56:00 2007 +0100
+++ b/tools/python/xen/xend/XendVDI.py Wed Jul 18 13:56:21 2007 +0100
@@ -24,6 +24,7 @@ from xen.util.xmlrpclib2 import stringif
from xen.util.xmlrpclib2 import stringify
from xmlrpclib import dumps, loads
from xen.util import security, xsconstants
+from xen.xend.XendError import SecurityError

KB = 1024
MB = 1024 * 1024
diff -r ad87a4912874 -r e1f74a5a09cb tools/python/xen/xend/XendXSPolicyAdmin.py
--- a/tools/python/xen/xend/XendXSPolicyAdmin.py Wed Jul 18 13:56:00 2007 +0100
+++ b/tools/python/xen/xend/XendXSPolicyAdmin.py Wed Jul 18 13:56:21 2007 +0100
@@ -56,7 +56,10 @@ class XSPolicyAdmin:
typ = data[1]
try:
if typ == xsconstants.ACM_POLICY_ID:
- self.xsobjs[ref] = ACMPolicy(name=name, ref=ref)
+ try:
+ self.xsobjs[ref] = ACMPolicy(name=name, ref=ref)
+ except Exception, e:
+ del self.policies[ref]
else:
del self.policies[ref]
except Exception, e:
@@ -271,6 +274,10 @@ class XSPolicyAdmin:
return pol
return None

+ def get_hv_loaded_policy_name(self):
+ security.refresh_security_policy()
+ return security.active_policy
+
def get_policy_by_name(self, name):
for pol in self.xsobjs.values():
if pol.get_name() == name:
diff -r ad87a4912874 -r e1f74a5a09cb tools/python/xen/xm/cfgbootpolicy.py
--- a/tools/python/xen/xm/cfgbootpolicy.py Wed Jul 18 13:56:00 2007 +0100
+++ b/tools/python/xen/xm/cfgbootpolicy.py Wed Jul 18 13:56:21 2007 +0100
@@ -170,8 +170,9 @@ def cfgbootpolicy_xapi(policy, user_titl
OptionError("No policy installed on system?")
acmpol = ACMPolicy(xml=xml)
if acmpol.get_name() != policy:
- OptionError("Policy installed on system '%s' does not match the "
- "request policy '%s'" % (acmpol.get_name(), policy))
+ raise OptionError("Policy installed on system '%s' does not "
+ "match the requested policy '%s'" %
+ (acmpol.get_name(), policy))
flags = int(policystate['flags']) | xsconstants.XS_INST_BOOT
rc = int(server.xenapi.XSPolicy.activate_xspolicy(xs_ref, flags))
if rc == flags:
diff -r ad87a4912874 -r e1f74a5a09cb tools/xm-test/lib/XmTestLib/XenAPIDomain.py
--- a/tools/xm-test/lib/XmTestLib/XenAPIDomain.py Wed Jul 18 13:56:00 2007 +0100
+++ b/tools/xm-test/lib/XmTestLib/XenAPIDomain.py Wed Jul 18 13:56:21 2007 +0100
@@ -23,6 +23,7 @@ import sys
import sys
from XmTestLib import *
from types import DictType
+from acm import *


class XenAPIConfig:
@@ -38,6 +39,9 @@ class XenAPIConfig:
'kernel' : 'PV_kernel',
'ramdisk': 'PV_ramdisk',
'root' : 'PV_args'}
+ if isACMEnabled():
+ #A default so every VM can start with ACM enabled
+ self.opts["security_label"] = "ACM:xm-test:red"

def setOpt(self, name, value):
"""Set an option in the config"""
diff -r ad87a4912874 -r e1f74a5a09cb tools/xm-test/lib/XmTestLib/acm.py
--- a/tools/xm-test/lib/XmTestLib/acm.py Wed Jul 18 13:56:00 2007 +0100
+++ b/tools/xm-test/lib/XmTestLib/acm.py Wed Jul 18 13:56:21 2007 +0100
@@ -19,6 +19,9 @@
"""
from Test import *
from xen.util import security
+from xen.xm.main import server
+from xen.util import xsconstants
+import re

try:
from acm_config import *
@@ -32,16 +35,47 @@ def isACMEnabled():
return security.on()


+def getSystemPolicyName():
+ s,o = traceCommand("xm getpolicy")
+ m = re.compile("Policy name[\s]*: ([A-z\-]+)").search(o)
+ if m:
+ polname = m.group(1)
+ return polname
+ return ""
+
+
+def ACMLoadPolicy_XenAPI(policy='xm-test'):
+ polname = getSystemPolicyName()
+ if polname != policy:
+ # Try it, maybe it's not activated
+ traceCommand("xm setpolicy %s %s" %
+ (xsconstants.XS_POLICY_ACM, policy))
+ polname = getSystemPolicyName()
+ if polname != policy:
+ FAIL("Need to have a system with no or policy '%s' active, "
+ "not %s" % (policy,polname))
+ else:
+ s, o = traceCommand("xm activatepolicy --load")
+ else:
+ s, o = traceCommand("xm activatepolicy --load")
+ if not re.search("Successfully", o):
+ FAIL("Could not set the policy '%s'." % policy)
+
+
def ACMLoadPolicy(policy='xm-test'):
- s, o = traceCommand("xm makepolicy %s" % (policy))
- if s != 0:
- FAIL("Need to be able to do 'xm makepolicy %s' but could not" %
- (policy))
- s, o = traceCommand("xm loadpolicy %s" % (policy))
- if s != 0:
- FAIL("Could not load the required policy '%s'.\n"
- "Start the system without any policy.\n%s" %
- (policy, o))
+ from xen.xm import main
+ if main.serverType == main.SERVER_XEN_API:
+ ACMLoadPolicy_XenAPI()
+ else:
+ s, o = traceCommand("xm makepolicy %s" % (policy))
+ if s != 0:
+ FAIL("Need to be able to do 'xm makepolicy %s' but could not" %
+ (policy))
+ s, o = traceCommand("xm loadpolicy %s" % (policy))
+ if s != 0:
+ FAIL("Could not load the required policy '%s'.\n"
+ "Start the system without any policy.\n%s" %
+ (policy, o))

def ACMPrepareSystem(resources):
if isACMEnabled():
diff -r ad87a4912874 -r e1f74a5a09cb tools/xm-test/tests/security-acm/01_security-acm_basic.py
--- a/tools/xm-test/tests/security-acm/01_security-acm_basic.py Wed Jul 18 13:56:00 2007 +0100
+++ b/tools/xm-test/tests/security-acm/01_security-acm_basic.py Wed Jul 18 13:56:21 2007 +0100
@@ -15,6 +15,7 @@

from XmTestLib import *
from xen.util import security
+from xen.util import xsconstants
import commands
import os
import re
@@ -28,7 +29,7 @@ if not isACMEnabled():
SKIP("Not running this test since ACM not enabled.")

status, output = traceCommand("xm makepolicy %s" % (testpolicy))
-if status != 0 or output != "":
+if status != 0:
FAIL("'xm makepolicy' failed with status %d and output\n%s" %
(status,output));

@@ -47,7 +48,7 @@ status, output = traceCommand("xm addlab
status, output = traceCommand("xm addlabel %s dom %s %s" %
(testlabel, vmconfigfile, testpolicy))
if status != 0:
- FAIL("'xm addlabel' failed with status %d.\n" % status)
+ FAIL("(1) 'xm addlabel' failed with status %d.\n" % status)

status, output = traceCommand("xm getlabel dom %s" %
(vmconfigfile))
@@ -55,8 +56,9 @@ if status != 0:
if status != 0:
FAIL("'xm getlabel' failed with status %d, output:\n%s" %
(status, output))
-if output != "policy=%s,label=%s" % (testpolicy,testlabel):
- FAIL("Received unexpected output from 'xm getlabel': \n%s" %
+if output != "policytype=%s,policy=%s,label=%s" % \
+ (xsconstants.ACM_POLICY_ID, testpolicy, testlabel):
+ FAIL("(1) Received unexpected output from 'xm getlabel dom': \n%s" %
(output))


@@ -74,30 +76,34 @@ status, output = traceCommand("xm getlab
(vmconfigfile))

if output != "Error: 'Domain not labeled'":
- FAIL("Received unexpected output from 'xm getlabel': \n%s" %
+ FAIL("(2) Received unexpected output from 'xm getlabel dom': \n%s" %
(output))

#Whatever label the resource might have, remove it
status, output = traceCommand("xm rmlabel res %s" %
(testresource))
+if status != 0:
+ FAIL("'xm rmlabel' on resource failed with status %d.\n" % status)

status, output = traceCommand("xm addlabel %s res %s %s" %
(testlabel, testresource, testpolicy))
if status != 0:
- FAIL("'xm addlabel' on resource failed with status %d.\n" % status)
+ FAIL("(2) 'xm addlabel' on resource failed with status %d.\n" % status)

status, output = traceCommand("xm getlabel res %s" % (testresource))

if status != 0:
FAIL("'xm getlabel' on resource failed with status %d, output:\n%s" %
(status, output))
-if output != "policy=%s,label=%s" % (testpolicy,testlabel):
- FAIL("Received unexpected output from 'xm getlabel': \n%s" %
+if output != "%s:%s:%s" % (xsconstants.ACM_POLICY_ID,\
+ testpolicy,testlabel):
+ FAIL("Received unexpected output from 'xm getlabel res': \n%s" %
(output))

status, output = traceCommand("xm resources")

if status != 0:
+ print "status = %s" % str(status)
FAIL("'xm resources' did not run properly")
if not re.search(security.unify_resname(testresource), output):
FAIL("'xm resources' did not show the tested resource '%s'." %
@@ -117,5 +123,5 @@ status, output = traceCommand("xm getlab
(testresource))

if output != "Error: 'Resource not labeled'":
- FAIL("Received unexpected output from 'xm getlabel': \n%s" %
+ FAIL("Received unexpected output from 'xm getlabel res': \n%s" %
(output))
diff -r ad87a4912874 -r e1f74a5a09cb tools/xm-test/tests/security-acm/07_security-acm_pol_update.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/xm-test/tests/security-acm/07_security-acm_pol_update.py Wed Jul 18 13:56:21 2007 +0100
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+
+# Copyright (C) International Business Machines Corp., 2006
+# Author: Stefan Berger <stefanb@us.ibm.com>
+
+# Test to exercise the xspolicy class
+
+from XmTestLib import xapi
+from XmTestLib.XenAPIDomain import XmTestAPIDomain
+from XmTestLib import *
+from xen.xend import XendAPIConstants
+from xen.util import acmpolicy, security, xsconstants
+from xen.util.acmpolicy import ACMPolicy
+from xen.xend.XendDomain import DOM0_UUID
+
+import commands
+import os
+import base64
+
+xm_test = {}
+xm_test['policyname'] = "xm-test"
+xm_test['date'] = "Fri Sep 29 14:44:38 2006"
+xm_test['url'] = None
+
+vm_label_red = "%s:xm-test:red" % xsconstants.ACM_POLICY_ID
+vm_label_green = "%s:xm-test:green" % xsconstants.ACM_POLICY_ID
+vm_label_blue = "%s:xm-test:blue" % xsconstants.ACM_POLICY_ID
+vm_label_sys = "%s:xm-test:SystemManagement" % xsconstants.ACM_POLICY_ID
+
+vm_label_black = "%s:xm-test:black"
+
+session = xapi.connect()
+
+oldlabel = session.xenapi.VM.get_security_label(DOM0_UUID)
+
+ssidref = session.xenapi.VM.set_security_label(DOM0_UUID,
+ vm_label_sys,
+ oldlabel)
+if int(ssidref) <= 0 or int(ssidref) != 0x00010001:
+ FAIL("(0) Domain-0 label for '%s' has unexpected failure: %08x" %
+ (vm_label_sys, int(ssidref)))
+print "ssidref for '%s' is 0x%08x" % (vm_label_sys, int(ssidref))
+
+
+xstype = session.xenapi.XSPolicy.get_xstype()
+if int(xstype) & xsconstants.XS_POLICY_ACM == 0:
+ SKIP("ACM not enabled/compiled in Xen")
+
+policystate = session.xenapi.XSPolicy.get_xspolicy()
+if not policystate.has_key('xs_ref'):
+ FAIL("get_xspolicy must return member 'xs_ref'")
+
+xs_ref = policystate['xs_ref']
+if xs_ref != "":
+ origpolicyxml = session.xenapi.ACMPolicy.get_xml(xs_ref)
+else:
+ origpolicyxml = ""
+
+f = open("xm-test-security_policy.xml", 'r')
+if f:
+ newpolicyxml = f.read()
+ f.close()
+else:
+ FAIL("Could not read 'xm-test' policy")
+
+try:
+ os.unlink("/boot/xm-test.bin")
+except:
+ pass
+
+policystate = session.xenapi.XSPolicy.get_xspolicy()
+
+if int(policystate['type']) == 0:
+ policystate = session.xenapi.XSPolicy.set_xspolicy(
+ xsconstants.XS_POLICY_ACM,
+ newpolicyxml,
+ xsconstants.XS_INST_LOAD | xsconstants.XS_INST_BOOT,
+ 1)
+ if int(policystate['flags']) == -1:
+ FAIL("Could not set the new policy.")
+
+print "state of policy = %s " % policystate
+
+rc = session.xenapi.XSPolicy.activate_xspolicy(
+ policystate['xs_ref'],
+ xsconstants.XS_INST_LOAD | xsconstants.XS_INST_BOOT)
+if int(rc) != xsconstants.XS_INST_LOAD | xsconstants.XS_INST_BOOT:
+ FAIL("Could not activate the current policy: rc = %08x" % int(rc))
+
+if not os.path.exists("/boot/xm-test.bin"):
+ FAIL("Binary policy was not installed. Check grub config file.")
+
+policystate = session.xenapi.XSPolicy.get_xspolicy()
+
+if int(policystate['flags']) != xsconstants.XS_INST_BOOT | \
+ xsconstants.XS_INST_LOAD:
+ FAIL("Flags (%x) are not indicating the correct state of the policy.",
+ int(policystate['flags']))
+
+policystate = session.xenapi.XSPolicy.get_xspolicy()
+xs_ref = policystate['xs_ref']
+
+newpolicyxml = None
+f = open("xm-test-new-security_policy.xml", 'r')
+if f:
+ newpolicyxml = f.read()
+ f.close()
+else:
+ FAIL("Could not read 'xm-test-new' policy")
+
+cur_acmpol = ACMPolicy(xml = policystate['repr'])
+new_acmpol = ACMPolicy(xml = newpolicyxml)
+
+new_acmpol.update_frompolicy(cur_acmpol)
+
+policystate = session.xenapi.XSPolicy.set_xspolicy(xsconstants.XS_POLICY_ACM,
+ new_acmpol.toxml(),
+ xsconstants.XS_INST_LOAD | xsconstants.XS_INST_BOOT,
+ 1)
+
+f = open("xm-test-security_policy.xml", 'r')
+if f:
+ newpolicyxml = f.read()
+ f.close()
+else:
+ FAIL("Could not read 'xm-test-new' policy")
+
+cur_acmpol = new_acmpol
+new_acmpol = ACMPolicy(xml = newpolicyxml)
+
+new_acmpol.update_frompolicy(cur_acmpol)
+
+policystate = session.xenapi.XSPolicy.set_xspolicy(xsconstants.XS_POLICY_ACM,
+ new_acmpol.toxml(),
+ xsconstants.XS_INST_LOAD | xsconstants.XS_INST_BOOT,
+ 1)
+
+dom0_lab = session.xenapi.VM.get_security_label(DOM0_UUID)
+
+ssidref = session.xenapi.VM.set_security_label(DOM0_UUID,
+ vm_label_sys, dom0_lab)
+if int(ssidref) <= 0 or int(ssidref) != 0x00010001:
+ FAIL("(1) Domain-0 label for '%s' has unexpected failure: %08x" %
+ (vm_label_sys, int(ssidref)))
+print "ssidref for '%s' is 0x%08x" % (vm_label_sys, int(ssidref))
+
+try:
+ ssidref = session.xenapi.VM.set_security_label(DOM0_UUID,
+ vm_label_black,
+ vm_label_sys)
+ FAIL("Could set label '%s', although it's not in the policy. "
+ "ssidref=%s" % (vm_label_black, ssidref))
+except:
+ pass
+
+ssidref = session.xenapi.VM.set_security_label(DOM0_UUID,
+ vm_label_red,
+ vm_label_sys)
+if int(ssidref) <= 0:
+ FAIL("(2) Domain-0 label for '%s' has unexpected failure: %08x" %
+ (vm_label_red, int(ssidref)))
+print "ssidref for '%s' is 0x%08x" % (vm_label_red, int(ssidref))
+
+label = session.xenapi.VM.get_security_label(DOM0_UUID)
+
+if label != vm_label_red:
+ FAIL("Dom0 label '%s' not as expected '%s'" % (label, vm_label_red))
+
+
+ssidref = session.xenapi.VM.set_security_label(DOM0_UUID,
+ vm_label_sys,
+ vm_label_red)
+if int(ssidref) <= 0 or int(ssidref) != 0x00010001:
+ FAIL("(3) Domain-0 label for '%s' has unexpected failure: %08x" %
+ (vm_label_sys, int(ssidref)))
+
+label = session.xenapi.VM.get_security_label(DOM0_UUID)
+
+if label != vm_label_sys:
+ FAIL("Dom0 label '%s' not as expected '%s'" % label, dom0_label)
+
+header = session.xenapi.ACMPolicy.get_header(xs_ref)
+
+if header['policyname'] != xm_test['policyname']:
+ FAIL("Name in header is '%s', expected is '%s'." %
+ (header['policyname'],xm_test['policyname']))
+if header['date'] != xm_test['date']:
+ FAIL("Date in header is '%s', expected is '%s'." %
+ (header['date'],xm_test['date']))
+if header.has_key("url") and header['url' ] != xm_test['url' ]:
+ FAIL("URL in header is '%s', expected is '%s'." %
+ (header['url' ],xm_test['url' ]))
+
+# Create another domain
+try:
+ # XmTestAPIDomain tries to establish a connection to XenD
+ domain = XmTestAPIDomain(extraConfig={ 'security_label' : vm_label_blue })
+except Exception, e:
+ SKIP("Skipping test. Error: %s" % str(e))
+
+
+vm_uuid = domain.get_uuid()
+
+res = session.xenapi.VM.get_security_label(vm_uuid)
+if res != vm_label_blue:
+ FAIL("VM has security label '%s', expected is '%s'" %
+ (res, vm_label_blue))
+
+try:
+ domain.start(noConsole=True)
+except:
+ FAIL("Could not create domain")
+
+
+# Attempt to relabel the running domain
+ssidref = session.xenapi.VM.set_security_label(vm_uuid,
+ vm_label_red,
+ vm_label_blue)
+if int(ssidref) <= 0:
+ FAIL("Could not relabel running domain to '%s'." % vm_label_red)
+
+# user domain is 'red', dom0 is current 'SystemManagement'.
+# Try to move domain-0 to 'red' first, then to 'blue'.
+
+# Moving domain-0 to 'red' should work
+ssidref = session.xenapi.VM.set_security_label(DOM0_UUID,
+ vm_label_red,
+ vm_label_sys)
+if int(ssidref) <= 0:
+ FAIL("Could not label domain-0 '%s'" % vm_label_red)
+
+# Moving the guest domain to 'blue' should not work due to conflict set
+try:
+ ssidref = session.xenapi.VM.set_security_label(vm_uuid,
+ vm_label_blue,
+ vm_label_red)
+ FAIL("Could label guest domain with '%s', although this is in a conflict "
+ "set. ssidref=%x" % (vm_label_blue,int(ssidref)))
+except:
+ pass
+
+label = session.xenapi.VM.get_security_label(vm_uuid)
+if label != vm_label_red:
+ FAIL("User domain has wrong label '%s', expected '%s'." %
+ (label, vm_label_red))
+
+label = session.xenapi.VM.get_security_label(DOM0_UUID)
+if label != vm_label_red:
+ FAIL("Domain-0 has wrong label '%s'; expected '%s'." %
+ (label, vm_label_red))
+
+ssidref = session.xenapi.VM.set_security_label(DOM0_UUID,
+ vm_label_sys,
+ vm_label_red)
+if int(ssidref) < 0:
+ FAIL("Could not set the domain-0 security label to '%s'." %
+ (vm_label_sys))
+
+# pause the domain and relabel it...
+session.xenapi.VM.pause(vm_uuid)
+
+label = session.xenapi.VM.get_security_label(vm_uuid)
+if label != vm_label_red:
+ FAIL("User domain has wrong label '%s', expected '%s'." %
+ (label, vm_label_red))
+
+ssidref = session.xenapi.VM.set_security_label(vm_uuid,
+ vm_label_blue,
+ vm_label_red)
+print "guest domain new label '%s'; ssidref is 0x%08x" % \
+ (vm_label_blue, int(ssidref))
+if int(ssidref) <= 0:
+ FAIL("Could not label guest domain with '%s'" % (vm_label_blue))
+
+label = session.xenapi.VM.get_security_label(vm_uuid)
+if label != vm_label_blue:
+ FAIL("User domain has wrong label '%s', expected '%s'." %
+ (label, vm_label_blue))
+
+session.xenapi.VM.unpause(vm_uuid)
+
+rc = session.xenapi.VM.suspend(vm_uuid)
+
+ssidref = session.xenapi.VM.set_security_label(vm_uuid,
+ vm_label_green,
+ vm_label_blue)
+print "guest domain new label '%s'; ssidref is 0x%08x" % \
+ (vm_label_green, int(ssidref))
+if int(ssidref) < 0:
+ FAIL("Could not label suspended guest domain with '%s'" % (vm_label_blue))
+
+label = session.xenapi.VM.get_security_label(vm_uuid)
+if label != vm_label_green:
+ FAIL("User domain has wrong label '%s', expected '%s'." %
+ (label, vm_label_green))
+
+
+rc = session.xenapi.VM.resume(vm_uuid, False)
+
+label = session.xenapi.VM.get_security_label(vm_uuid)
+if label != vm_label_green:
+ FAIL("User domain has wrong label '%s', expected '%s'." %
+ (label, vm_label_green))
diff -r ad87a4912874 -r e1f74a5a09cb tools/xm-test/tests/security-acm/08_security-acm_xapi.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/xm-test/tests/security-acm/08_security-acm_xapi.py Wed Jul 18 13:56:21 2007 +0100
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+
+# Copyright (C) International Business Machines Corp., 2007
+# Author: Stefan Berger <stefanb@us.ibm.com>
+
+# VM creation test with labeled VM and labeled VDI
+
+from XmTestLib import xapi
+from XmTestLib.XenAPIDomain import XmTestAPIDomain
+from XmTestLib import *
+from xen.xend import XendAPIConstants
+from xen.util import acmpolicy, security, xsconstants
+import commands
+import os
+
+vm_label_red = xsconstants.ACM_POLICY_ID + ":xm-test:red"
+vm_label_green = xsconstants.ACM_POLICY_ID + ":xm-test:green"
+vdi_label_red = xsconstants.ACM_POLICY_ID + ":xm-test:red"
+vdi_label_green = xsconstants.ACM_POLICY_ID + ":xm-test:green"
+
+vdi_file = "/dev/ram0"
+vdi_path = "phy:" + vdi_file
+
+#Note:
+# If during the suspend/resume operations 'red' instead of 'green' is
+# used, the Chinese Wall policy goes into effect and disallows the
+# suspended VM from being resumed...
+
+try:
+ # XmTestAPIDomain tries to establish a connection to XenD
+ domain = XmTestAPIDomain(extraConfig={ 'security_label' : vm_label_red })
+except Exception, e:
+ SKIP("Skipping test. Error: %s" % str(e))
+
+vm_uuid = domain.get_uuid()
+
+session = xapi.connect()
+xstype = session.xenapi.XSPolicy.get_xstype()
+if int(xstype) & xsconstants.XS_POLICY_ACM == 0:
+ SKIP("ACM not enabled/compiled in Xen")
+
+f = open("xm-test-security_policy.xml", 'r')
+if f:
+ newpolicyxml = f.read()
+ f.close()
+else:
+ FAIL("Could not read 'xm-test' policy")
+
+policystate = session.xenapi.XSPolicy.get_xspolicy()
+if int(policystate['type']) == 0:
+ policystate = session.xenapi.XSPolicy.set_xspolicy(
+ xsconstants.XS_POLICY_ACM,
+ newpolicyxml,
+ xsconstants.XS_INST_BOOT | xsconstants.XS_INST_LOAD,
+ True)
+ if int(policystate['flags']) == -1:
+ FAIL("Could not set the new policy.")
+
+policystate = session.xenapi.XSPolicy.get_xspolicy()
+print "policystate = %s" % policystate
+acm_ref = policystate['xs_ref']
+
+
+#
+# Some tests with labeling of resources
+#
+labels = session.xenapi.XSPolicy.get_labeled_resources()
+print "labeled resources are:\n%s" % labels
+
+oldlabel = session.xenapi.XSPolicy.get_resource_label("phy:/dev/ram0")
+
+rc = session.xenapi.XSPolicy.set_resource_label("phy:/dev/ram0", "",
+ oldlabel)
+
+rc = session.xenapi.XSPolicy.set_resource_label("phy:/dev/ram0",
+ vdi_label_green,
+ "")
+
+res = session.xenapi.XSPolicy.get_resource_label("phy:/dev/ram0")
+if res != vdi_label_green:
+ FAIL("(1) get_resource_label returned unexpected result %s, wanted %s" %
+ (res, vdi_label_green))
+
+
+#
+# Some test with labeling of VMs
+#
+
+res = session.xenapi.VM.get_security_label(vm_uuid)
+
+if res != vm_label_red:
+ FAIL("VM.get_security_label returned wrong security label '%s'." % res)
+
+res = session.xenapi.VM.set_security_label(vm_uuid, vm_label_green,
+ vm_label_red)
+
+res = session.xenapi.VM.get_security_label(vm_uuid)
+if res != vm_label_green:
+ FAIL("VM does not show expected label '%s' but '%s'." %
+ (vm_label_green, res))
+
+res = session.xenapi.VM.set_security_label(vm_uuid, "", vm_label_green)
+if int(res) != 0:
+ FAIL("Should be able to unlabel the domain while it's halted.")
+
+res = session.xenapi.VM.get_security_label(vm_uuid)
+if res != "":
+ FAIL("Unexpected VM security label after removal: %s" % res)
+
+res = session.xenapi.VM.set_security_label(vm_uuid, vm_label_red, res)
+if int(res) != 0:
+ FAIL("Could not label the VM to '%s'" % vm_label_red)
+
+res = session.xenapi.VM.get_security_label(vm_uuid)
+if res != vm_label_red:
+ FAIL("VM has wrong label '%s', expected '%s'." % (res, vm_label_red))
+
+sr_uuid = session.xenapi.SR.get_by_name_label("Local")
+if len(sr_uuid) == 0:
+ FAIL("Could not get a handle on SR 'Local'")
+
+
+vdi_rec = { 'name_label' : "My disk",
+ 'SR' : sr_uuid[0],
+ 'virtual_size': 0,
+ 'sector_size' : 512,
+ 'parent' : '',
+ 'SR_name' : 'Local',
+ 'type' : 'system',
+ 'shareable' : False,
+ 'read-only' : False,
+ 'other_config': {'location': vdi_path}
+}
+
+vdi_ref = session.xenapi.VDI.create(vdi_rec)
+
+res = session.xenapi.VDI.get_name_label(vdi_ref)
+if res != vdi_rec['name_label']:
+ print "Destroying VDI now"
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("VDI_get_name_label return wrong information")
+
+res = session.xenapi.VDI.get_record(vdi_ref)
+print "vdi_record : %s" % res
+
+oldlabel = session.xenapi.XSPolicy.get_resource_label(vdi_path)
+
+#Remove label from VDI device
+rc = session.xenapi.XSPolicy.set_resource_label(vdi_path,
+ "",
+ oldlabel)
+
+
+# Attach a VBD to the VM
+
+vbd_rec = { 'VM' : vm_uuid,
+ 'VDI' : vdi_ref,
+ 'device' : "xvda1",
+ 'mode' : 1,
+ 'bootable': 0,
+}
+
+vbd_ref = session.xenapi.VBD.create(vbd_rec)
+
+res = session.xenapi.VBD.get_record(vbd_ref)
+
+try:
+ domain.start(noConsole=True)
+ # Should not get here.
+ print "Destroying VDI now"
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("Could start VM with a VBD that it is not allowed to access.")
+except:
+ pass
+ print "Could not create domain -- that's good"
+
+
+#
+# Label the VDI now
+#
+
+rc = session.xenapi.VDI.set_security_label(vdi_ref, vdi_label_red, "")
+if int(rc) != 0:
+ FAIL("Could not set the VDI label to '%s'" % vdi_label_red)
+
+label = session.xenapi.VDI.get_security_label(vdi_ref)
+if label != vdi_label_red:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("Unexpected label '%s' on VDI, wanted '%s'" %
+ (label, vdi_label_red))
+
+rc = session.xenapi.VDI.set_security_label(vdi_ref, "", label)
+if int(rc) != 0:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("Should be able to unlabel VDI.")
+
+rc = session.xenapi.VDI.set_security_label(vdi_ref, vdi_label_red, "")
+if int(rc) != 0:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("Should be able to label VDI with label '%s'" % vid_label_red)
+
+res = session.xenapi.XSPolicy.get_resource_label(vdi_path)
+if res != vdi_label_red:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("(2) get_resource_label on %s returned unexpected result %s, wanted '%s'" %
+ (vdi_path, res, vdi_label_red))
+
+res = session.xenapi.VDI.get_security_label(vdi_ref)
+if res != vdi_label_red:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("get_security_label returned unexpected result %s, wanted '%s'" %
+ (res, vdi_label_red))
+
+domain.start(noConsole=True)
+
+console = domain.getConsole()
+
+domName = domain.getName()
+
+try:
+ run = console.runCmd("cat /proc/interrupts")
+except ConsoleError, e:
+ saveLog(console.getHistory())
+ FAIL("Could not access proc-filesystem")
+
+# Try to relabel while VM is running
+try:
+ res = session.xenapi.VM.set_security_label(vm_uuid, vm_label_green,
+ vm_label_red)
+except:
+ pass
+
+lab = session.xenapi.VM.get_security_label(vm_uuid)
+if lab == vm_label_green:
+ FAIL("Should not be able to reset the security label while running."
+ "tried to set to %s, got %s, old: %s" %(vm_label_green, lab,
+ vm_label_red))
+
+
+#
+# Suspend the domain and relabel it
+#
+
+try:
+ status, output = traceCommand("xm suspend %s" % domName,
+ timeout=30)
+except TimeoutError, e:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("Failure from suspending VM: %s." % str(e))
+
+# Try to relabel while VM is suspended -- this should work
+
+rc = session.xenapi.VM.set_security_label(vm_uuid, vm_label_green,
+ vm_label_red)
+if int(rc) != 0:
+ FAIL("VM security label could not be set to %s" % vm_label_green)
+
+res = session.xenapi.VM.get_security_label(vm_uuid)
+if res != vm_label_green:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("VM (suspended) has label '%s', expected '%s'." %
+ (res, vm_label_green))
+
+status, output = traceCommand("xm list")
+
+#Try to resume now -- should fail due to denied access to block device
+try:
+ status, output = traceCommand("xm resume %s" % domName,
+ timeout=30)
+ if status == 0:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("Could resume re-labeled VM: %s" % output)
+except Exception, e:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("1. Error resuming the VM: %s." % str(e))
+
+# Relabel VM so it would resume
+res = session.xenapi.VM.set_security_label(vm_uuid, vm_label_red,
+ vm_label_green)
+if int(res) != 0:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("Could not relabel VM to have it resume.")
+
+res = session.xenapi.VM.get_security_label(vm_uuid)
+if res != vm_label_red:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("VM (suspended) has label '%s', expected '%s'." %
+ (res, vm_label_red))
+
+
+# Relabel the resource so VM should not resume
+try:
+ session.xenapi.XSPolicy.set_resource_label(vdi_path,
+ vdi_label_green,
+ "")
+except Exception, e:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("Could not label the VDI to '%s': %x" %
+ (vdi_label_green, int(rc)))
+
+#Try to resume now -- should fail due to denied access to block device
+try:
+ status, output = traceCommand("xm resume %s" % domName,
+ timeout=30)
+ if status == 0:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("Could resume re-labeled VM: %s" % output)
+except Exception, e:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("2. Error resuming the VM: %s." % str(e))
+
+
+status, output = traceCommand("xm list")
+
+# Relabel the resource so VM can resume
+try:
+ session.xenapi.XSPolicy.set_resource_label(vdi_path,
+ vdi_label_red,
+ vdi_label_green)
+except Exception, e:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("Could not label the resource to '%s'" % vid_label_red)
+
+res = session.xenapi.XSPolicy.get_resource_label(vdi_path)
+if res != vdi_label_red:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("'%s' has label '%s', expected '%s'." %
+ (vdi_path, res, vdi_label_red))
+
+#Try to resume now -- should work
+try:
+ status, output = traceCommand("xm resume %s" % domName,
+ timeout=30)
+ if status != 0:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("Could not resume re-labeled VM: %s" % output)
+except Exception, e:
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("3. Error resuming the VM: %s." % str(e))
+
+
+status, output = traceCommand("xm list")
+
+console = domain.getConsole()
+
+try:
+ run = console.runCmd("cat /proc/interrupts")
+except ConsoleError, e:
+ saveLog(console.getHistory())
+ session.xenapi.VDI.destroy(vdi_ref)
+ FAIL("Could not access proc-filesystem")
+
+domain.stop()
+domain.destroy()
diff -r ad87a4912874 -r e1f74a5a09cb tools/xm-test/tests/security-acm/09_security-acm_pol_update.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/xm-test/tests/security-acm/09_security-acm_pol_update.py Wed Jul 18 13:56:21 2007 +0100
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+
+# Copyright (C) International Business Machines Corp., 2007
+# Author: Stefan Berger <stefanb@us.ibm.com>
+
+# Test to exercise the xspolicy and acmpolicy classes
+
+from XmTestLib import xapi
+from XmTestLib.XenAPIDomain import XmTestAPIDomain
+from XmTestLib import *
+from xen.xend import XendAPIConstants
+from xen.util import security, xsconstants
+from xen.util.acmpolicy import ACMPolicy
+from xen.xend.XendDomain import DOM0_UUID
+import base64
+import struct
+import time
+
+def typestoxml(types):
+ res = ""
+ for t in types:
+ res += "<Type>" + t + "</Type>\n"
+ return res
+
+def cfstoxml(cfss):
+ res = ""
+ for cfs in cfss:
+ res += "<Conflict name=\"" + cfs['name'] + "\">\n" + \
+ typestoxml(cfs['chws']) + \
+ "</Conflict>\n"
+ return res
+
+def vmlabelstoxml(vmlabels, vmfrommap):
+ res = ""
+ for vmlabel in vmlabels:
+ res += "<VirtualMachineLabel>\n"
+ if vmlabel['name'] in vmfrommap:
+ res += "<Name from=\""+ vmfrommap[vmlabel['name']] +"\">"
+ else:
+ res += "<Name>"
+ res += vmlabel['name'] + "</Name>\n"
+ res += "<SimpleTypeEnforcementTypes>\n" + \
+ typestoxml(vmlabel['stes']) + \
+ "</SimpleTypeEnforcementTypes>\n"
+ if vmlabel.has_key('chws'):
+ res += "<ChineseWallTypes>\n" + \
+ typestoxml(vmlabel['chws']) + \
+ "</ChineseWallTypes>\n"
+ res += "</VirtualMachineLabel>\n"
+ return res
+
+
+def reslabelstoxml(reslabels, resfrommap):
+ res = ""
+ for reslabel in reslabels:
+ res += "<ResourceLabel>\n"
+ if resfrommap.has_key(reslabel['name']):
+ res += "<Name from=\""+ resfrommap[reslabel['name']] +"\">"
+ else:
+ res += "<Name>"
+ res += reslabel['name'] + "</Name>\n"
+ res += "<SimpleTypeEnforcementTypes>\n" + \
+ typestoxml(reslabel['stes']) + \
+ "</SimpleTypeEnforcementTypes>\n"
+ res += "</ResourceLabel>\n"
+ return res
+
+def create_xml_policy(hdr, stes, chws,
+ vmlabels, vmfrommap, bootstrap,
+ reslabels, resfrommap,
+ cfss):
+ hdr_xml ="<PolicyHeader>\n" + \
+ " <PolicyName>" + hdr['name'] + "</PolicyName>\n" + \
+ " <Version>" + hdr['version'] + "</Version>\n" + \
+ " <FromPolicy>\n" + \
+ " <PolicyName>" + hdr['oldname'] + "</PolicyName>\n" + \
+ " <Version>" + hdr['oldversion'] + "</Version>\n" + \
+ " </FromPolicy>\n" + \
+ "</PolicyHeader>\n"
+
+ stes_xml = "<SimpleTypeEnforcement>\n" + \
+ " <SimpleTypeEnforcementTypes>\n" + \
+ typestoxml(stes) + \
+ " </SimpleTypeEnforcementTypes>\n" + \
+ "</SimpleTypeEnforcement>\n"
+
+ chws_xml = "<ChineseWall>\n" + \
+ " <ChineseWallTypes>\n" + \
+ typestoxml(chws) + \
+ " </ChineseWallTypes>\n" + \
+ " <ConflictSets>\n" + \
+ cfstoxml(cfss) + \
+ " </ConflictSets>\n" + \
+ "</ChineseWall>\n"
+
+ subjlabel_xml = "<SubjectLabels bootstrap=\""+ bootstrap +"\">\n" + \
+ vmlabelstoxml(vmlabels, vmfrommap) + \
+ "</SubjectLabels>\n"
+ objlabel_xml = "<ObjectLabels>\n" + \
+ reslabelstoxml(reslabels, resfrommap) + \
+ "</ObjectLabels>\n"
+
+ policyxml = "<?xml version=\"1.0\" ?>\n" + \
+ "<SecurityPolicyDefinition xmlns=\"http://www.ibm.com\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.ibm.com ../../security_policy.xsd \">\n" + \
+ hdr_xml + \
+ stes_xml + \
+ chws_xml + \
+ "<SecurityLabelTemplate>\n" + \
+ subjlabel_xml + \
+ objlabel_xml + \
+ "</SecurityLabelTemplate>\n" + \
+ "</SecurityPolicyDefinition>\n"
+ return policyxml
+
+
+def update_hdr(hdr):
+ """ Update the version information in the header """
+ hdr['oldversion'] = hdr['version']
+ hdr['oldname'] = hdr['name']
+ vers = hdr['version']
+ tmp = vers.split('.')
+ if len(tmp) == 1:
+ rev = 1
+ else:
+ rev = int(tmp[1]) + 1
+ hdr['version'] = "%s.%s" % (tmp[0],rev)
+ return hdr
+
+session = xapi.connect()
+
+policystate = session.xenapi.XSPolicy.get_xspolicy()
+
+if policystate['repr'] != "":
+ print "%s" % policystate['repr']
+ try:
+ acmpol = ACMPolicy(xml=policystate['repr'])
+ except Exception, e:
+ FAIL("Failure from creating ACMPolicy object: %s" % str(e))
+ oldname = acmpol.policy_dom_get_hdr_item("PolicyName")
+ oldvers = acmpol.policy_dom_get_hdr_item("Version")
+ tmp = oldvers.split(".")
+ if len(tmp) == 1:
+ rev = 1
+ else:
+ rev = int(tmp[1]) + 1
+ newvers = "%s.%s" % (tmp[0], str(rev))
+ print "old name/version = %s/%s" % (oldname, oldvers)
+else:
+ oldname = None
+ oldvers = None
+ newvers = "1.0"
+
+# Initialize the header of the policy
+hdr = {}
+hdr['name'] = "xm-test"
+hdr['version'] = newvers
+
+if oldname:
+ hdr['oldname'] = oldname
+ if oldvers and oldvers != "":
+ hdr['oldversion'] = oldvers
+
+stes = [ "SystemManagement", "red", "green", "blue" ]
+
+chws = [ "SystemManagement", "red", "green", "blue" ]
+
+bootstrap = "SystemManagement"
+
+vm_sysmgt = { 'name' : bootstrap,
+ 'stes' : stes,
+ 'chws' : [ "SystemManagement" ] }
+
+vm_red = { 'name' : "red" ,
+ 'stes' : ["red"] ,
+ 'chws' : ["red"] }
+
+vm_green = { 'name' : "green" ,
+ 'stes' : ["green"] ,
+ 'chws' : ["green"] }
+
+vm_blue = { 'name' : "blue" ,
+ 'stes' : ["blue"] ,
+ 'chws' : ["blue"] }
+
+res_red = { 'name' : "red" ,
+ 'stes' : ["red"] }
+
+res_green = { 'name' : "green" ,
+ 'stes' : ["green"] }
+
+res_blue = { 'name' : "blue" ,
+ 'stes' : ["blue"] }
+
+cfs_1 = { 'name' : "CFS1",
+ 'chws' : [ "red" , "blue" ] }
+
+vmlabels = [ vm_sysmgt, vm_red, vm_green, vm_blue ]
+vmfrommap = {}
+reslabels = [ res_red, res_green, res_blue ]
+resfrommap = {}
+cfss = [ cfs_1 ]
+
+vm_label_red = xsconstants.ACM_POLICY_ID + ":xm-test:red"
+vm_label_green = xsconstants.ACM_POLICY_ID + ":xm-test:green"
+vm_label_blue = xsconstants.ACM_POLICY_ID + ":xm-test:blue"
+
+xml = create_xml_policy(hdr, stes, chws,
+ vmlabels, vmfrommap, bootstrap,
+ reslabels, resfrommap,
+ cfss)
+
+xml_good = xml
+
+policystate = session.xenapi.XSPolicy.set_xspolicy(xsconstants.XS_POLICY_ACM,
+ xml,
+ xsconstants.XS_INST_LOAD,
+ True)
+
+print "\n\npolicystate = %s" % policystate
+
+policystate = session.xenapi.XSPolicy.get_xspolicy()
+
+#
+# Create two non-conflicting domains and start them
+#
+try:
+ # XmTestAPIDomain tries to establish a connection to XenD
+ domain1 = XmTestAPIDomain(extraConfig={ 'security_label' : vm_label_red })
+except Exception, e:
+ SKIP("Skipping test. Error: %s" % str(e))
+
+
+vm1_uuid = domain1.get_uuid()
+
+try:
+ domain1.start(noConsole=True)
+except:
+ FAIL("Could not start domain1")
+
+print "Domain 1 started"
+
+try:
+ # XmTestAPIDomain tries to establish a connection to XenD
+ domain2 = XmTestAPIDomain(extraConfig={'security_label': vm_label_green })
+except Exception, e:
+ SKIP("Skipping test. Error: %s" % str(e))
+
+vm2_uuid = domain2.get_uuid()
+
+try:
+ domain2.start(noConsole=True)
+except:
+ FAIL("Could not start domain1")
+
+
+print "Domain 2 started"
+
+# Try a policy that would put the two domains into conflict
+cfs_2 = { 'name' : "CFS1",
+ 'chws' : [ "red" , "green" ] }
+cfss = [ cfs_2 ]
+
+hdr = update_hdr(hdr)
+xml = create_xml_policy(hdr, stes, chws,
+ vmlabels, vmfrommap, bootstrap,
+ reslabels, resfrommap,
+ cfss)
+
+policystate = session.xenapi.XSPolicy.set_xspolicy(xsconstants.XS_POLICY_ACM,
+ xml,
+ xsconstants.XS_INST_LOAD,
+ True)
+
+print "policystate %s" % policystate
+
+if int(policystate['xserr']) == 0:
+ FAIL("(1) Should not have been able to set this policy.")
+
+if len(policystate['errors']) == 0:
+ FAIL("Hypervisor should have reported errros.")
+
+errors = base64.b64decode(policystate['errors'])
+
+print "Length of errors: %d" % len(errors)
+a,b = struct.unpack("!ii",errors)
+
+print "%08x , %08x" % (a,b)
+
+#
+# Create a faulty policy with 'red' STE missing
+#
+
+cfss = [ cfs_1 ]
+stes = [ "SystemManagement", "green", "blue" ]
+
+xml = create_xml_policy(hdr, stes, chws,
+ vmlabels, vmfrommap, bootstrap,
+ reslabels, resfrommap,
+ cfss)
+policystate = session.xenapi.XSPolicy.set_xspolicy(xsconstants.XS_POLICY_ACM,
+ xml,
+ xsconstants.XS_INST_LOAD,
+ True)
+
+print "Result from setting faulty(!) policy with STE 'red' missing:"
+print "policystate %s" % policystate
+
+if int(policystate['xserr']) == 0:
+ FAIL("(2) Should not have been able to set this policy.")
+
+#
+# Create a policy with 'red' VMLabel missing -- should not work since it is
+# in use.
+#
+stes = [ "SystemManagement", "red", "green", "blue" ]
+
+vmlabels = [ vm_sysmgt, vm_green, vm_blue ]
+
+xml = create_xml_policy(hdr, stes, chws,
+ vmlabels, vmfrommap, bootstrap,
+ reslabels, resfrommap,
+ cfss)
+policystate = session.xenapi.XSPolicy.set_xspolicy(xsconstants.XS_POLICY_ACM,
+ xml,
+ xsconstants.XS_INST_LOAD,
+ True)
+print "Result from setting faulty(!) policy with VMlabel 'red' missing:"
+print "policystate %s" % policystate
+
+if int(policystate['xserr']) == 0:
+ FAIL("(3) Should not have been able to set this policy.")
+
+#
+# Create a policy with 'blue' VMLabel missing -- should work since it is NOT
+# in use.
+#
+vmlabels = [ vm_sysmgt, vm_red, vm_green ]
+
+xml = create_xml_policy(hdr, stes, chws,
+ vmlabels, vmfrommap, bootstrap,
+ reslabels, resfrommap,
+ cfss)
+policystate = session.xenapi.XSPolicy.set_xspolicy(xsconstants.XS_POLICY_ACM,
+ xml,
+ xsconstants.XS_INST_LOAD,
+ True)
+
+print "Result from setting (good) policy with VMlabel 'blue' missing:"
+print "policystate %s" % policystate
+
+if int(policystate['xserr']) != 0:
+ FAIL("(4) Should have been able to set this policy: %s" % xml)
+
+#
+# Move the green VMLabel towards blue which should put the running
+# domain with label blue into a conflict set
+#
+vmlabels = [ vm_sysmgt, vm_red, vm_blue ]
+
+vmfrommap = { "blue" : "green" } # new : old
+
+hdr = update_hdr(hdr) #Needed, since last update was successful
+xml = create_xml_policy(hdr, stes, chws,
+ vmlabels, vmfrommap, bootstrap,
+ reslabels, resfrommap,
+ cfss)
+
+policystate = session.xenapi.XSPolicy.set_xspolicy(xsconstants.XS_POLICY_ACM,
+ xml,
+ xsconstants.XS_INST_LOAD,
+ True)
+
+print "policystate %s" % policystate
+
+if int(policystate['xserr']) == 0:
+ FAIL("(5) Should not have been able to set this policy.")
+
+#
+# Try to install a policy where a VM label has a faulty VM label name
+#
+vmfrommap = {}
+
+vm_blue_bad = { 'name' : "blue:x" , # ':' no allowed
+ 'stes' : ["blue"],
+ 'chws' : ["blue"] }
+
+vmlabels = [ vm_sysmgt, vm_red, vm_green, vm_blue_bad ]
+
+xml = create_xml_policy(hdr, stes, chws,
+ vmlabels, vmfrommap, bootstrap,
+ reslabels, resfrommap,
+ cfss)
+
+policystate = session.xenapi.XSPolicy.set_xspolicy(xsconstants.XS_POLICY_ACM,
+ xml,
+ xsconstants.XS_INST_LOAD,
+ True)
+
+print "policystate %s" % policystate
+
+if int(policystate['xserr']) == 0:
+ FAIL("(6) Should not have been able to set this policy.")
+
+#
+# End the test by installing the initial policy again
+#
+
+cur_version = hdr['version']
+(maj, min) = cur_version.split(".")
+cur_version = "%s.%s" % (maj, str(int(min)-1) )
+
+orig_acmpol = ACMPolicy(xml=xml_good)
+orig_acmpol.set_frompolicy_version(cur_version)
+orig_acmpol.set_policy_version(hdr['version'])
+
+policystate = session.xenapi.XSPolicy.set_xspolicy(xsconstants.XS_POLICY_ACM,
+ orig_acmpol.toxml(),
+ xsconstants.XS_INST_LOAD,
+ True)
+
+if int(policystate['xserr']) != 0:
+ FAIL("(END) Should have been able to set this policy.")
+
+domain1.stop()
+domain2.stop()
+domain1.destroy()
+domain2.destroy()
diff -r ad87a4912874 -r e1f74a5a09cb tools/xm-test/tests/security-acm/Makefile.am
--- a/tools/xm-test/tests/security-acm/Makefile.am Wed Jul 18 13:56:00 2007 +0100
+++ b/tools/xm-test/tests/security-acm/Makefile.am Wed Jul 18 13:56:21 2007 +0100
@@ -5,7 +5,10 @@ TESTS = 01_security-acm_basic.test \
03_security-acm_dom_conflict.test \
04_security-acm_dom_res.test \
05_security-acm_dom_res_conf.test \
- 06_security-acm_dom_block_attach.test
+ 06_security-acm_dom_block_attach.test \
+ 07_security-acm_pol_update.test \
+ 08_security-acm_xapi.test \
+ 09_security-acm_pol_update.test

XFAIL_TESTS =

diff -r ad87a4912874 -r e1f74a5a09cb tools/xm-test/tests/security-acm/xm-test-new-security_policy.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/xm-test/tests/security-acm/xm-test-new-security_policy.xml Wed Jul 18 13:56:21 2007 +0100
@@ -0,0 +1,97 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Auto-generated by ezPolicy -->
+<SecurityPolicyDefinition xmlns="http://www.ibm.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.ibm.com ../../security_policy.xsd ">
+ <PolicyHeader>
+ <PolicyName>xm-test</PolicyName>
+ <Date>Fri Sep 29 14:44:38 2006</Date>
+ <Version>1.1</Version>
+ <FromPolicy>
+ <PolicyName>xm-test</PolicyName>
+ <Version>1.0</Version>
+ </FromPolicy>
+ </PolicyHeader>
+
+ <SimpleTypeEnforcement>
+ <SimpleTypeEnforcementTypes>
+ <Type>SystemManagement</Type>
+ <Type>green</Type>
+ <Type>red</Type>
+ </SimpleTypeEnforcementTypes>
+ </SimpleTypeEnforcement>
+
+ <ChineseWall priority="PrimaryPolicyComponent">
+ <ChineseWallTypes>
+ <Type>SystemManagement</Type>
+ <Type>green</Type>
+ <Type>red</Type>
+ </ChineseWallTypes>
+
+ <ConflictSets>
+ <Conflict name="RER">
+ <Type>green</Type>
+ <Type>red</Type>
+ </Conflict>
+ </ConflictSets>
+ </ChineseWall>
+
+ <SecurityLabelTemplate>
+ <SubjectLabels bootstrap="SystemManagement">
+ <VirtualMachineLabel>
+ <Name>SystemManagement</Name>
+ <SimpleTypeEnforcementTypes>
+ <Type>SystemManagement</Type>
+ <Type>green</Type>
+ <Type>red</Type>
+ </SimpleTypeEnforcementTypes>
+ <ChineseWallTypes>
+ <Type>SystemManagement</Type>
+ </ChineseWallTypes>
+ </VirtualMachineLabel>
+
+ <VirtualMachineLabel>
+ <Name>green</Name>
+ <SimpleTypeEnforcementTypes>
+ <Type>green</Type>
+ </SimpleTypeEnforcementTypes>
+ <ChineseWallTypes>
+ <Type>green</Type>
+ </ChineseWallTypes>
+ </VirtualMachineLabel>
+
+ <VirtualMachineLabel>
+ <Name>red</Name>
+ <SimpleTypeEnforcementTypes>
+ <Type>red</Type>
+ </SimpleTypeEnforcementTypes>
+ <ChineseWallTypes>
+ <Type>red</Type>
+ </ChineseWallTypes>
+ </VirtualMachineLabel>
+
+ </SubjectLabels>
+
+ <ObjectLabels>
+ <ResourceLabel>
+ <Name>SystemManagement</Name>
+ <SimpleTypeEnforcementTypes>
+ <Type>SystemManagement</Type>
+ </SimpleTypeEnforcementTypes>
+ </ResourceLabel>
+
+ <ResourceLabel>
+ <Name>green</Name>
+ <SimpleTypeEnforcementTypes>
+ <Type>green</Type>
+ </SimpleTypeEnforcementTypes>
+ </ResourceLabel>
+
+ <ResourceLabel>
+ <Name>red</Name>
+ <SimpleTypeEnforcementTypes>
+ <Type>red</Type>
+ </SimpleTypeEnforcementTypes>
+ </ResourceLabel>
+
+ </ObjectLabels>
+ </SecurityLabelTemplate>
+</SecurityPolicyDefinition>
diff -r ad87a4912874 -r e1f74a5a09cb xen/arch/x86/hvm/vpt.c
--- a/xen/arch/x86/hvm/vpt.c Wed Jul 18 13:56:00 2007 +0100
+++ b/xen/arch/x86/hvm/vpt.c Wed Jul 18 13:56:21 2007 +0100
@@ -261,13 +261,15 @@ void create_periodic_time(
pt->enabled = 1;
pt->pending_intr_nr = 0;

- if ( period < 900000 ) /* < 0.9 ms */
+ /* Periodic timer must be at least 0.9ms. */
+ if ( (period < 900000) && !one_shot )
{
gdprintk(XENLOG_WARNING,
"HVM_PlatformTime: program too small period %"PRIu64"\n",
period);
- period = 900000; /* force to 0.9ms */
- }
+ period = 900000;
+ }
+
pt->period = period;
pt->vcpu = v;
pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog
[xen-unstable] Merge [ In reply to ]
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xensource.com>
# Date 1185285186 -3600
# Node ID cc48264ed64742a7c57704948ea04dcad438a015
# Parent c585f993385c9dd6f61e27a9c6622adc8a8e50b1
# Parent 2f22450e716d037aa654e0486f4fe7722ad6b2bb
Merge
---
tools/xenstore/talloc.c | 18 ++++++
tools/xenstore/xenstored_core.c | 98 +++++++++++++++++++-------------------
tools/xenstore/xenstored_domain.c | 9 +--
3 files changed, 70 insertions(+), 55 deletions(-)

diff -r c585f993385c -r cc48264ed647 tools/xenstore/talloc.c
--- a/tools/xenstore/talloc.c Tue Jul 24 14:52:16 2007 +0100
+++ b/tools/xenstore/talloc.c Tue Jul 24 14:53:06 2007 +0100
@@ -97,6 +97,7 @@ struct talloc_chunk {
struct talloc_chunk *next, *prev;
struct talloc_chunk *parent, *child;
struct talloc_reference_handle *refs;
+ unsigned int null_refs; /* references from null_context */
talloc_destructor_t destructor;
const char *name;
size_t size;
@@ -189,6 +190,7 @@ void *_talloc(const void *context, size_
tc->child = NULL;
tc->name = NULL;
tc->refs = NULL;
+ tc->null_refs = 0;

if (context) {
struct talloc_chunk *parent = talloc_chunk_from_ptr(context);
@@ -225,7 +227,11 @@ void talloc_set_destructor(const void *p
*/
void talloc_increase_ref_count(const void *ptr)
{
- talloc_reference(null_context, ptr);
+ struct talloc_chunk *tc;
+ if (ptr == NULL) return;
+
+ tc = talloc_chunk_from_ptr(ptr);
+ tc->null_refs++;
}

/*
@@ -285,6 +291,11 @@ static int talloc_unreference(const void

if (context == NULL) {
context = null_context;
+ }
+
+ if ((context == null_context) && tc->null_refs) {
+ tc->null_refs--;
+ return 0;
}

for (h=tc->refs;h;h=h->next) {
@@ -538,6 +549,11 @@ int talloc_free(void *ptr)
}

tc = talloc_chunk_from_ptr(ptr);
+
+ if (tc->null_refs) {
+ tc->null_refs--;
+ return -1;
+ }

if (tc->refs) {
talloc_reference_destructor(tc->refs);
diff -r c585f993385c -r cc48264ed647 tools/xenstore/xenstored_core.c
--- a/tools/xenstore/xenstored_core.c Tue Jul 24 14:52:16 2007 +0100
+++ b/tools/xenstore/xenstored_core.c Tue Jul 24 14:53:06 2007 +0100
@@ -299,10 +299,14 @@ static void set_fd(int fd, fd_set *set,
}


-static int initialize_set(fd_set *inset, fd_set *outset, int sock, int ro_sock)
-{
- struct connection *i;
+static int initialize_set(fd_set *inset, fd_set *outset, int sock, int ro_sock,
+ struct timeval **ptimeout)
+{
+ static struct timeval zero_timeout = { 0 };
+ struct connection *conn;
int max = -1;
+
+ *ptimeout = NULL;

FD_ZERO(inset);
FD_ZERO(outset);
@@ -314,13 +318,19 @@ static int initialize_set(fd_set *inset,
if (xce_handle != -1)
set_fd(xc_evtchn_fd(xce_handle), inset, &max);

- list_for_each_entry(i, &connections, list) {
- if (i->domain)
- continue;
- set_fd(i->fd, inset, &max);
- if (!list_empty(&i->out_list))
- FD_SET(i->fd, outset);
- }
+ list_for_each_entry(conn, &connections, list) {
+ if (conn->domain) {
+ if (domain_can_read(conn) ||
+ (domain_can_write(conn) &&
+ !list_empty(&conn->out_list)))
+ *ptimeout = &zero_timeout;
+ } else {
+ set_fd(conn->fd, inset, &max);
+ if (!list_empty(&conn->out_list))
+ FD_SET(conn->fd, outset);
+ }
+ }
+
return max;
}

@@ -1709,6 +1719,7 @@ int main(int argc, char *argv[])
bool no_domain_init = false;
const char *pidfile = NULL;
int evtchn_fd = -1;
+ struct timeval *timeout;

while ((opt = getopt_long(argc, argv, "DE:F:HNPS:t:T:RLVW:", options,
NULL)) != -1) {
@@ -1850,17 +1861,16 @@ int main(int argc, char *argv[])
evtchn_fd = xc_evtchn_fd(xce_handle);

/* Get ready to listen to the tools. */
- max = initialize_set(&inset, &outset, *sock, *ro_sock);
+ max = initialize_set(&inset, &outset, *sock, *ro_sock, &timeout);

/* Tell the kernel we're up and running. */
xenbus_notify_running();

/* Main loop. */
- /* FIXME: Rewrite so noone can starve. */
for (;;) {
- struct connection *i;
-
- if (select(max+1, &inset, &outset, NULL, NULL) < 0) {
+ struct connection *conn, *old_conn;
+
+ if (select(max+1, &inset, &outset, NULL, timeout) < 0) {
if (errno == EINTR)
continue;
barf_perror("Select failed");
@@ -1882,41 +1892,31 @@ int main(int argc, char *argv[])
if (evtchn_fd != -1 && FD_ISSET(evtchn_fd, &inset))
handle_event();

- list_for_each_entry(i, &connections, list) {
- if (i->domain)
- continue;
-
- /* Operations can delete themselves or others
- * (xs_release): list is not safe after input,
- * so break. */
- if (FD_ISSET(i->fd, &inset)) {
- handle_input(i);
- break;
+ conn = list_entry(connections.next, typeof(*conn), list);
+ while (&conn->list != &connections) {
+ talloc_increase_ref_count(conn);
+
+ if (conn->domain) {
+ if (domain_can_read(conn))
+ handle_input(conn);
+ if (domain_can_write(conn) &&
+ !list_empty(&conn->out_list))
+ handle_output(conn);
+ } else {
+ if (FD_ISSET(conn->fd, &inset))
+ handle_input(conn);
+ if (FD_ISSET(conn->fd, &outset))
+ handle_output(conn);
}
- if (FD_ISSET(i->fd, &outset)) {
- handle_output(i);
- break;
- }
- }
-
- /* Handle all possible I/O for domain connections. */
- more:
- list_for_each_entry(i, &connections, list) {
- if (!i->domain)
- continue;
-
- if (domain_can_read(i)) {
- handle_input(i);
- goto more;
- }
-
- if (domain_can_write(i) && !list_empty(&i->out_list)) {
- handle_output(i);
- goto more;
- }
- }
-
- max = initialize_set(&inset, &outset, *sock, *ro_sock);
+
+ old_conn = conn;
+ conn = list_entry(old_conn->list.next,
+ typeof(*conn), list);
+ talloc_free(old_conn);
+ }
+
+ max = initialize_set(&inset, &outset, *sock, *ro_sock,
+ &timeout);
}
}

diff -r c585f993385c -r cc48264ed647 tools/xenstore/xenstored_domain.c
--- a/tools/xenstore/xenstored_domain.c Tue Jul 24 14:52:16 2007 +0100
+++ b/tools/xenstore/xenstored_domain.c Tue Jul 24 14:53:06 2007 +0100
@@ -174,6 +174,8 @@ static int destroy_domain(void *_domain)

if (domain->interface)
munmap(domain->interface, getpagesize());
+
+ fire_watches(NULL, "@releaseDomain", false);

return 0;
}
@@ -197,7 +199,7 @@ static void domain_cleanup(void)
continue;
}
talloc_free(domain->conn);
- notify = 1;
+ notify = 0; /* destroy_domain() fires the watch */
}

if (notify)
@@ -246,7 +248,6 @@ static struct domain *new_domain(void *c
{
struct domain *domain;
int rc;
-

domain = talloc(context, struct domain);
domain->port = 0;
@@ -361,7 +362,7 @@ void do_introduce(struct connection *con
/* Now domain belongs to its connection. */
talloc_steal(domain->conn, domain);

- fire_watches(conn, "@introduceDomain", false);
+ fire_watches(NULL, "@introduceDomain", false);
} else if ((domain->mfn == mfn) && (domain->conn != conn)) {
/* Use XS_INTRODUCE for recreating the xenbus event-channel. */
if (domain->port)
@@ -413,8 +414,6 @@ void do_release(struct connection *conn,
}

talloc_free(domain->conn);
-
- fire_watches(conn, "@releaseDomain", false);

send_ack(conn, XS_RELEASE);
}

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog
[xen-unstable] Merge [ In reply to ]
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xensource.com>
# Date 1186041746 -3600
# Node ID e0b424bc95724904dbf984f8184e3647f54637dd
# Parent bf85b467ee8963c986997654452911bd82a27edb
# Parent 07364f8574b8fa9cb84d446c83fb13deee24fd81
Merge
---
tools/xenstore/utils.c | 7 +-
tools/xenstore/utils.h | 2
tools/xenstore/xenstored_core.c | 16 ++---
xen/arch/x86/domain_build.c | 7 +-
xen/common/libelf/libelf-dominfo.c | 101 ++++++++++++++++++++++++++++++++++++-
xen/common/libelf/libelf-loader.c | 44 +++++++++++++---
xen/common/libelf/libelf-tools.c | 30 ++++++++++
xen/include/public/libelf.h | 76 +++++++++++++++------------
8 files changed, 228 insertions(+), 55 deletions(-)

diff -r bf85b467ee89 -r e0b424bc9572 tools/xenstore/utils.c
--- a/tools/xenstore/utils.c Thu Aug 02 09:02:08 2007 +0100
+++ b/tools/xenstore/utils.c Thu Aug 02 09:02:26 2007 +0100
@@ -10,18 +10,17 @@
#include <signal.h>
#include "utils.h"

-void xprintf(const char *fmt, ...)
+static void default_xprintf(const char *fmt, ...)
{
va_list args;
-
- if (!stderr)
- return; /* could trace()? */

va_start(args, fmt);
vfprintf(stderr, fmt, args);
va_end(args);
fflush(stderr);
}
+
+void (*xprintf)(const char *fmt, ...) = default_xprintf;

void barf(const char *fmt, ...)
{
diff -r bf85b467ee89 -r e0b424bc9572 tools/xenstore/utils.h
--- a/tools/xenstore/utils.h Thu Aug 02 09:02:08 2007 +0100
+++ b/tools/xenstore/utils.h Thu Aug 02 09:02:26 2007 +0100
@@ -24,7 +24,7 @@ void barf(const char *fmt, ...) __attrib
void barf(const char *fmt, ...) __attribute__((noreturn));
void barf_perror(const char *fmt, ...) __attribute__((noreturn));

-void xprintf(const char *fmt, ...);
+void (*xprintf)(const char *fmt, ...);

#define eprintf(_fmt, _args...) xprintf("[ERR] %s" _fmt, __FUNCTION__, ##_args)

diff -r bf85b467ee89 -r e0b424bc9572 tools/xenstore/xenstored_core.c
--- a/tools/xenstore/xenstored_core.c Thu Aug 02 09:02:08 2007 +0100
+++ b/tools/xenstore/xenstored_core.c Thu Aug 02 09:02:26 2007 +0100
@@ -1880,14 +1880,14 @@ int main(int argc, char *argv[])

/* close stdin/stdout now we're ready to accept connections */
if (dofork) {
- close(STDIN_FILENO);
- close(STDOUT_FILENO);
- close(STDERR_FILENO);
-
- /* Get ourselves a nice xenstored crash if these are used. */
- stdin = NULL;
- stdout = NULL;
- stderr = NULL;
+ int devnull = open("/dev/null", O_RDWR);
+ if (devnull == -1)
+ barf_perror("Could not open /dev/null\n");
+ close(STDIN_FILENO); dup2(STDIN_FILENO, devnull);
+ close(STDOUT_FILENO); dup2(STDOUT_FILENO, devnull);
+ close(STDERR_FILENO); dup2(STDERR_FILENO, devnull);
+ close(devnull);
+ xprintf = trace;
}

signal(SIGHUP, trigger_reopen_log);
diff -r bf85b467ee89 -r e0b424bc9572 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c Thu Aug 02 09:02:08 2007 +0100
+++ b/xen/arch/x86/domain_build.c Thu Aug 02 09:02:26 2007 +0100
@@ -316,6 +316,9 @@ int __init construct_dom0(
parms.pae ? ", PAE" : "",
elf_msb(&elf) ? "msb" : "lsb",
elf.pstart, elf.pend);
+ if ( parms.bsd_symtab )
+ printk(" Dom0 symbol map 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
+ elf.sstart, elf.send);

if ( !compatible )
{
@@ -385,7 +388,7 @@ int __init construct_dom0(
v_start = parms.virt_base;
vkern_start = parms.virt_kstart;
vkern_end = parms.virt_kend;
- vinitrd_start = round_pgup(vkern_end);
+ vinitrd_start = round_pgup(parms.virt_end);
vinitrd_end = vinitrd_start + initrd_len;
vphysmap_start = round_pgup(vinitrd_end);
vphysmap_end = vphysmap_start + (nr_pages * (!is_pv_32on64_domain(d) ?
@@ -795,7 +798,7 @@ int __init construct_dom0(

/* Copy the OS image and free temporary buffer. */
elf.dest = (void*)vkern_start;
- elf_load_binary(&elf);
+ elf_xen_dom_load_binary(&elf, &parms);

if ( UNSET_ADDR != parms.virt_hypercall )
{
diff -r bf85b467ee89 -r e0b424bc9572 xen/common/libelf/libelf-dominfo.c
--- a/xen/common/libelf/libelf-dominfo.c Thu Aug 02 09:02:08 2007 +0100
+++ b/xen/common/libelf/libelf-dominfo.c Thu Aug 02 09:02:26 2007 +0100
@@ -333,6 +333,99 @@ static int elf_xen_note_check(struct elf
return 0;
}

+
+static void elf_xen_loadsymtab(struct elf_binary *elf,
+ struct elf_dom_parms *parms)
+{
+ unsigned long maxva, len;
+
+ if ( !parms->bsd_symtab )
+ return;
+
+ /* Calculate the required additional kernel space for the elf image */
+
+ /* The absolute base address of the elf image */
+ maxva = elf_round_up(elf, parms->virt_kend);
+ maxva += sizeof(long); /* Space to store the size of the elf image */
+ /* Space for the elf and elf section headers */
+ maxva += (elf_uval(elf, elf->ehdr, e_ehsize) +
+ elf_shdr_count(elf) * elf_uval(elf, elf->ehdr, e_shentsize));
+ maxva = elf_round_up(elf, maxva);
+
+ /* Space for the symbol and string tabs */
+ len = (unsigned long)elf->send - (unsigned long)elf->sstart;
+ maxva = elf_round_up(elf, maxva + len);
+
+ /* The address the kernel must expanded to */
+ parms->virt_end = maxva;
+}
+
+int elf_xen_dom_load_binary(struct elf_binary *elf,
+ struct elf_dom_parms *parms)
+{
+ elf_ehdr *sym_ehdr;
+ unsigned long shdr, symtab_addr;
+ unsigned long maxva, symbase;
+ uint8_t i;
+ char *p;
+
+ elf_load_binary(elf);
+
+ if ( !parms->bsd_symtab )
+ return 0;
+
+#define elf_hdr_elm(_elf, _hdr, _elm, _val) \
+do { \
+ if ( elf_64bit(_elf) ) \
+ (_hdr)->e64._elm = _val; \
+ else \
+ (_hdr)->e32._elm = _val; \
+} while ( 0 )
+
+ /* ehdr right after the kernel image (4 byte aligned) */
+ symbase = elf_round_up(elf, parms->virt_kend);
+ symtab_addr = maxva = symbase + sizeof(long);
+
+ /* Set up Elf header. */
+ sym_ehdr = (elf_ehdr *)symtab_addr;
+ maxva = elf_copy_ehdr(elf, sym_ehdr);
+
+ elf_hdr_elm(elf, sym_ehdr, e_phoff, 0);
+ elf_hdr_elm(elf, sym_ehdr, e_shoff, elf_uval(elf, elf->ehdr, e_ehsize));
+ elf_hdr_elm(elf, sym_ehdr, e_phentsize, 0);
+ elf_hdr_elm(elf, sym_ehdr, e_phnum, 0);
+
+ /* Copy Elf section headers. */
+ shdr = maxva;
+ maxva = elf_copy_shdr(elf, (elf_shdr *)shdr);
+
+ for ( i = 0; i < elf_shdr_count(elf); i++ )
+ {
+ uint8_t type;
+ unsigned long tmp;
+ type = elf_uval(elf, (elf_shdr *)shdr, sh_type);
+ if ( (type == SHT_STRTAB) || (type == SHT_SYMTAB) )
+ {
+ elf_msg(elf, "%s: shdr %i at 0x%p -> 0x%p\n", __func__, i,
+ elf_section_start(elf, (elf_shdr *)shdr), (void *)maxva);
+ tmp = elf_copy_section(elf, (elf_shdr *)shdr, (void *)maxva);
+ /* Mangled to be based on ELF header location. */
+ elf_hdr_elm(elf, (elf_shdr *)shdr, sh_offset,
+ maxva - symtab_addr);
+ maxva = tmp;
+ }
+ shdr += elf_uval(elf, elf->ehdr, e_shentsize);
+ }
+
+ /* Write down the actual sym size. */
+ p = (char *)symbase;
+ *(long *)p = maxva - symtab_addr; /* sym size */
+
+#undef elf_ehdr_elm
+
+ return 0;
+}
+
static int elf_xen_addr_calc_check(struct elf_binary *elf,
struct elf_dom_parms *parms)
{
@@ -374,9 +467,13 @@ static int elf_xen_addr_calc_check(struc
parms->virt_offset = parms->virt_base - parms->elf_paddr_offset;
parms->virt_kstart = elf->pstart + parms->virt_offset;
parms->virt_kend = elf->pend + parms->virt_offset;
+ parms->virt_end = parms->virt_kend;

if ( parms->virt_entry == UNSET_ADDR )
parms->virt_entry = elf_uval(elf, elf->ehdr, e_entry);
+
+ if ( parms->bsd_symtab )
+ elf_xen_loadsymtab(elf, parms);

elf_msg(elf, "%s: addresses:\n", __FUNCTION__);
elf_msg(elf, " virt_base = 0x%" PRIx64 "\n", parms->virt_base);
@@ -384,12 +481,14 @@ static int elf_xen_addr_calc_check(struc
elf_msg(elf, " virt_offset = 0x%" PRIx64 "\n", parms->virt_offset);
elf_msg(elf, " virt_kstart = 0x%" PRIx64 "\n", parms->virt_kstart);
elf_msg(elf, " virt_kend = 0x%" PRIx64 "\n", parms->virt_kend);
+ elf_msg(elf, " virt_end = 0x%" PRIx64 "\n", parms->virt_end);
elf_msg(elf, " virt_entry = 0x%" PRIx64 "\n", parms->virt_entry);

if ( (parms->virt_kstart > parms->virt_kend) ||
(parms->virt_entry < parms->virt_kstart) ||
(parms->virt_entry > parms->virt_kend) ||
- (parms->virt_base > parms->virt_kstart) )
+ (parms->virt_base > parms->virt_kstart) ||
+ (parms->virt_kend > parms->virt_end) )
{
elf_err(elf, "%s: ERROR: ELF start or entries are out of bounds.\n",
__FUNCTION__);
diff -r bf85b467ee89 -r e0b424bc9572 xen/common/libelf/libelf-loader.c
--- a/xen/common/libelf/libelf-loader.c Thu Aug 02 09:02:08 2007 +0100
+++ b/xen/common/libelf/libelf-loader.c Thu Aug 02 09:02:26 2007 +0100
@@ -10,6 +10,8 @@ int elf_init(struct elf_binary *elf, con
{
const elf_shdr *shdr;
uint64_t i, count, section, offset;
+ uint64_t low = -1;
+ uint64_t high = 0;

if ( !elf_is_elfbinary(image) )
{
@@ -24,7 +26,11 @@ int elf_init(struct elf_binary *elf, con
elf->class = elf->ehdr->e32.e_ident[EI_CLASS];
elf->data = elf->ehdr->e32.e_ident[EI_DATA];

- /* sanity check phdr */
+#ifdef VERBOSE
+ elf_set_verbose(elf);
+#endif
+
+ /* Sanity check phdr. */
offset = elf_uval(elf, elf->ehdr, e_phoff) +
elf_uval(elf, elf->ehdr, e_phentsize) * elf_phdr_count(elf);
if ( offset > elf->size )
@@ -34,7 +40,7 @@ int elf_init(struct elf_binary *elf, con
return -1;
}

- /* sanity check shdr */
+ /* Sanity check shdr. */
offset = elf_uval(elf, elf->ehdr, e_shoff) +
elf_uval(elf, elf->ehdr, e_shentsize) * elf_shdr_count(elf);
if ( offset > elf->size )
@@ -44,29 +50,55 @@ int elf_init(struct elf_binary *elf, con
return -1;
}

- /* find section string table */
+ /* Find section string table. */
section = elf_uval(elf, elf->ehdr, e_shstrndx);
shdr = elf_shdr_by_index(elf, section);
if ( shdr != NULL )
elf->sec_strtab = elf_section_start(elf, shdr);

- /* find symbol table, symbol string table */
+ /* Find symbol table and symbol string table. */
count = elf_shdr_count(elf);
for ( i = 0; i < count; i++ )
{
+ const char *sh_symend, *sh_strend;
+
shdr = elf_shdr_by_index(elf, i);
if ( elf_uval(elf, shdr, sh_type) != SHT_SYMTAB )
continue;
elf->sym_tab = shdr;
+ sh_symend = (const char *)elf_section_end(elf, shdr);
shdr = elf_shdr_by_index(elf, elf_uval(elf, shdr, sh_link));
if ( shdr == NULL )
{
elf->sym_tab = NULL;
+ sh_symend = 0;
continue;
}
elf->sym_strtab = elf_section_start(elf, shdr);
- break;
- }
+ sh_strend = (const char *)elf_section_end(elf, shdr);
+
+ if ( low > (unsigned long)elf->sym_tab )
+ low = (unsigned long)elf->sym_tab;
+ if ( low > (unsigned long)shdr )
+ low = (unsigned long)shdr;
+
+ if ( high < ((unsigned long)sh_symend) )
+ high = (unsigned long)sh_symend;
+ if ( high < ((unsigned long)sh_strend) )
+ high = (unsigned long)sh_strend;
+
+ elf_msg(elf, "%s: shdr: sym_tab=%p size=0x%" PRIx64 "\n",
+ __FUNCTION__, elf->sym_tab,
+ elf_uval(elf, elf->sym_tab, sh_size));
+ elf_msg(elf, "%s: shdr: str_tab=%p size=0x%" PRIx64 "\n",
+ __FUNCTION__, elf->sym_strtab, elf_uval(elf, shdr, sh_size));
+
+ elf->sstart = low;
+ elf->send = high;
+ elf_msg(elf, "%s: symbol map: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
+ __FUNCTION__, elf->sstart, elf->send);
+ }
+
return 0;
}

diff -r bf85b467ee89 -r e0b424bc9572 xen/common/libelf/libelf-tools.c
--- a/xen/common/libelf/libelf-tools.c Thu Aug 02 09:02:08 2007 +0100
+++ b/xen/common/libelf/libelf-tools.c Thu Aug 02 09:02:26 2007 +0100
@@ -236,6 +236,36 @@ int elf_phdr_is_loadable(struct elf_bina
uint64_t p_flags = elf_uval(elf, phdr, p_flags);

return ((p_type == PT_LOAD) && (p_flags & (PF_W | PF_X)) != 0);
+}
+
+unsigned long
+elf_copy_ehdr(struct elf_binary *elf, void *dest)
+{
+ uint64_t size;
+
+ size = elf_uval(elf, elf->ehdr, e_ehsize);
+ memcpy(dest, elf->ehdr, size);
+ return elf_round_up(elf, (unsigned long)(dest) + size);
+}
+
+unsigned long
+elf_copy_shdr(struct elf_binary *elf, void *dest)
+{
+ uint64_t size;
+
+ size = elf_shdr_count(elf) * elf_uval(elf, elf->ehdr, e_shentsize);
+ memcpy(dest, elf->image + elf_uval(elf, elf->ehdr, e_shoff), size);
+ return elf_round_up(elf, (unsigned long)(dest) + size);
+}
+
+unsigned long
+elf_copy_section(struct elf_binary *elf, const elf_shdr *shdr, void *dest)
+{
+ uint64_t size;
+
+ size = elf_uval(elf, shdr, sh_size);
+ memcpy(dest, elf_section_start(elf, shdr), size);
+ return elf_round_up(elf, (unsigned long)(dest) + size);
}

/*
diff -r bf85b467ee89 -r e0b424bc9572 xen/include/public/libelf.h
--- a/xen/include/public/libelf.h Thu Aug 02 09:02:08 2007 +0100
+++ b/xen/include/public/libelf.h Thu Aug 02 09:02:26 2007 +0100
@@ -65,6 +65,8 @@ struct elf_binary {

/* loaded to */
char *dest;
+ uint64_t sstart;
+ uint64_t send;
uint64_t pstart;
uint64_t pend;
uint64_t reloc_offset;
@@ -91,33 +93,32 @@ struct elf_binary {
#define elf_lsb(elf) (ELFDATA2LSB == (elf)->data)
#define elf_swap(elf) (NATIVE_ELFDATA != (elf)->data)

-#define elf_uval(elf, str, elem) \
- ((ELFCLASS64 == (elf)->class) \
- ? elf_access_unsigned((elf), (str), \
- offsetof(typeof(*(str)),e64.elem), \
- sizeof((str)->e64.elem)) \
- : elf_access_unsigned((elf), (str), \
- offsetof(typeof(*(str)),e32.elem), \
- sizeof((str)->e32.elem)))
-
-#define elf_sval(elf, str, elem) \
- ((ELFCLASS64 == (elf)->class) \
- ? elf_access_signed((elf), (str), \
- offsetof(typeof(*(str)),e64.elem), \
- sizeof((str)->e64.elem)) \
- : elf_access_signed((elf), (str), \
- offsetof(typeof(*(str)),e32.elem), \
- sizeof((str)->e32.elem)))
-
-#define elf_size(elf, str) \
- ((ELFCLASS64 == (elf)->class) \
- ? sizeof((str)->e64) \
- : sizeof((str)->e32))
+#define elf_uval(elf, str, elem) \
+ ((ELFCLASS64 == (elf)->class) \
+ ? elf_access_unsigned((elf), (str), \
+ offsetof(typeof(*(str)),e64.elem), \
+ sizeof((str)->e64.elem)) \
+ : elf_access_unsigned((elf), (str), \
+ offsetof(typeof(*(str)),e32.elem), \
+ sizeof((str)->e32.elem)))
+
+#define elf_sval(elf, str, elem) \
+ ((ELFCLASS64 == (elf)->class) \
+ ? elf_access_signed((elf), (str), \
+ offsetof(typeof(*(str)),e64.elem), \
+ sizeof((str)->e64.elem)) \
+ : elf_access_signed((elf), (str), \
+ offsetof(typeof(*(str)),e32.elem), \
+ sizeof((str)->e32.elem)))
+
+#define elf_size(elf, str) \
+ ((ELFCLASS64 == (elf)->class) \
+ ? sizeof((str)->e64) : sizeof((str)->e32))

uint64_t elf_access_unsigned(struct elf_binary *elf, const void *ptr,
- uint64_t offset, size_t size);
+ uint64_t offset, size_t size);
int64_t elf_access_signed(struct elf_binary *elf, const void *ptr,
- uint64_t offset, size_t size);
+ uint64_t offset, size_t size);

uint64_t elf_round_up(struct elf_binary *elf, uint64_t addr);

@@ -149,6 +150,11 @@ int elf_is_elfbinary(const void *image);
int elf_is_elfbinary(const void *image);
int elf_phdr_is_loadable(struct elf_binary *elf, const elf_phdr * phdr);

+unsigned long elf_copy_ehdr(struct elf_binary *elf, void *dest);
+unsigned long elf_copy_shdr(struct elf_binary *elf, void *dest);
+unsigned long elf_copy_section(struct elf_binary *elf,
+ const elf_shdr *shdr, void *dest);
+
/* ------------------------------------------------------------------------ */
/* xc_libelf_loader.c */

@@ -185,8 +191,8 @@ struct xen_elfnote {
enum xen_elfnote_type type;
const char *name;
union {
- const char *str;
- uint64_t num;
+ const char *str;
+ uint64_t num;
} data;
};

@@ -215,7 +221,8 @@ struct elf_dom_parms {
/* calculated */
uint64_t virt_offset;
uint64_t virt_kstart;
- uint64_t virt_kend;
+ uint64_t virt_kend; /* end of kernel image */
+ uint64_t virt_end; /* end of kernel symtab (== virt_kend if none) */
};

static inline void elf_xen_feature_set(int nr, uint32_t * addr)
@@ -228,14 +235,17 @@ static inline int elf_xen_feature_get(in
}

int elf_xen_parse_features(const char *features,
- uint32_t *supported,
- uint32_t *required);
+ uint32_t *supported,
+ uint32_t *required);
int elf_xen_parse_note(struct elf_binary *elf,
- struct elf_dom_parms *parms,
- const elf_note *note);
+ struct elf_dom_parms *parms,
+ const elf_note *note);
int elf_xen_parse_guest_info(struct elf_binary *elf,
- struct elf_dom_parms *parms);
+ struct elf_dom_parms *parms);
int elf_xen_parse(struct elf_binary *elf,
- struct elf_dom_parms *parms);
+ struct elf_dom_parms *parms);
+
+int elf_xen_dom_load_binary(struct elf_binary *elf,
+ struct elf_dom_parms *parms);

#endif /* __XC_LIBELF__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog
[xen-unstable] Merge [ In reply to ]
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xensource.com>
# Date 1186572443 -3600
# Node ID 35337d5c83f99daaf3c23c96c980bdc3b1243f8d
# Parent 123ad31e9c3bb98685fea54a2e4d9f4cf57ac44f
# Parent da2c7dab1a3ad37a9e28d1e5c090affc58bebc5d
Merge
---
xen/arch/x86/hvm/hvm.c | 88 +++++++-
xen/arch/x86/hvm/svm/svm.c | 392 +++++++++---------------------------
xen/arch/x86/hvm/svm/vmcb.c | 17 -
xen/arch/x86/hvm/vioapic.c | 4
xen/arch/x86/hvm/vmx/vmcs.c | 12 -
xen/arch/x86/hvm/vmx/vmx.c | 386 +++++++++++------------------------
xen/arch/x86/hvm/vmx/x86_32/exits.S | 2
xen/arch/x86/hvm/vmx/x86_64/exits.S | 2
xen/arch/x86/mm.c | 4
xen/arch/x86/mm/hap/guest_walk.c | 2
xen/arch/x86/mm/hap/hap.c | 43 +--
xen/arch/x86/mm/shadow/common.c | 4
xen/arch/x86/mm/shadow/multi.c | 27 +-
xen/arch/x86/x86_32/asm-offsets.c | 2
xen/arch/x86/x86_64/asm-offsets.c | 2
xen/include/asm-x86/hvm/hvm.h | 69 +-----
xen/include/asm-x86/hvm/support.h | 3
xen/include/asm-x86/hvm/svm/asid.h | 14 -
xen/include/asm-x86/hvm/svm/vmcb.h | 5
xen/include/asm-x86/hvm/vcpu.h | 12 +
xen/include/asm-x86/hvm/vmx/vmcs.h | 6
xen/include/asm-x86/hvm/vmx/vmx.h | 4
22 files changed, 393 insertions(+), 707 deletions(-)

diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c Wed Aug 08 12:27:23 2007 +0100
@@ -520,6 +520,87 @@ void hvm_triple_fault(void)
domain_shutdown(v->domain, SHUTDOWN_reboot);
}

+int hvm_set_cr3(unsigned long value)
+{
+ unsigned long old_base_mfn, mfn;
+ struct vcpu *v = current;
+
+ if ( paging_mode_hap(v->domain) || !hvm_paging_enabled(v) )
+ {
+ /* Nothing to do. */
+ }
+ else if ( value == v->arch.hvm_vcpu.guest_cr[3] )
+ {
+ /* Shadow-mode TLB flush. Invalidate the shadow. */
+ mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
+ if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
+ goto bad_cr3;
+ }
+ else
+ {
+ /* Shadow-mode CR3 change. Check PDBR and then make a new shadow. */
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
+ mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
+ if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
+ goto bad_cr3;
+
+ old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
+ v->arch.guest_table = pagetable_from_pfn(mfn);
+
+ if ( old_base_mfn )
+ put_page(mfn_to_page(old_base_mfn));
+
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
+ }
+
+ v->arch.hvm_vcpu.guest_cr[3] = value;
+ paging_update_cr3(v);
+ return 1;
+
+ bad_cr3:
+ gdprintk(XENLOG_ERR, "Invalid CR3\n");
+ domain_crash(v->domain);
+ return 0;
+}
+
+int hvm_set_cr4(unsigned long value)
+{
+ struct vcpu *v = current;
+ unsigned long old_cr;
+
+ if ( value & HVM_CR4_GUEST_RESERVED_BITS )
+ {
+ HVM_DBG_LOG(DBG_LEVEL_1,
+ "Guest attempts to set reserved bit in CR4: %lx",
+ value);
+ goto gpf;
+ }
+
+ if ( !(value & X86_CR4_PAE) && hvm_long_mode_enabled(v) )
+ {
+ HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
+ "EFER.LMA is set");
+ goto gpf;
+ }
+
+ old_cr = v->arch.hvm_vcpu.guest_cr[4];
+ v->arch.hvm_vcpu.guest_cr[4] = value;
+ v->arch.hvm_vcpu.hw_cr[4] = value | HVM_CR4_HOST_MASK;
+ if ( paging_mode_hap(v->domain) )
+ v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_PAE;
+ hvm_update_guest_cr(v, 4);
+
+ /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
+ if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
+ paging_update_paging_modes(v);
+
+ return 1;
+
+ gpf:
+ hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ return 0;
+}
+
/*
* __hvm_copy():
* @buf = hypervisor buffer
@@ -668,7 +749,6 @@ static hvm_hypercall_t *hvm_hypercall32_
static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
HYPERCALL(memory_op),
[ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
- HYPERCALL(multicall),
HYPERCALL(xen_version),
HYPERCALL(grant_table_op),
HYPERCALL(event_channel_op),
@@ -811,12 +891,6 @@ int hvm_do_hypercall(struct cpu_user_reg

return (this_cpu(hc_preempted) ? HVM_HCALL_preempted :
flush ? HVM_HCALL_invalidate : HVM_HCALL_completed);
-}
-
-void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
-{
- v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
- hvm_funcs.update_guest_cr3(v);
}

static void hvm_latch_shinfo_size(struct domain *d)
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Wed Aug 08 12:27:23 2007 +0100
@@ -78,7 +78,7 @@ static void svm_inject_exception(
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;

if ( trap == TRAP_page_fault )
- HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_svm.cpu_cr2, error_code);
+ HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
else
HVMTRACE_2D(INJ_EXC, v, trap, error_code);

@@ -97,55 +97,14 @@ static void svm_cpu_down(void)
write_efer(read_efer() & ~EFER_SVME);
}

+static int svm_lme_is_set(struct vcpu *v)
+{
#ifdef __x86_64__
-
-static int svm_lme_is_set(struct vcpu *v)
-{
- u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
+ u64 guest_efer = v->arch.hvm_vcpu.guest_efer;
return guest_efer & EFER_LME;
-}
-
-static int svm_long_mode_enabled(struct vcpu *v)
-{
- u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
- return guest_efer & EFER_LMA;
-}
-
-#else /* __i386__ */
-
-static int svm_lme_is_set(struct vcpu *v)
-{ return 0; }
-static int svm_long_mode_enabled(struct vcpu *v)
-{ return 0; }
-
+#else
+ return 0;
#endif
-
-static int svm_cr4_pae_is_set(struct vcpu *v)
-{
- unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
- return guest_cr4 & X86_CR4_PAE;
-}
-
-static int svm_paging_enabled(struct vcpu *v)
-{
- unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
- return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
-}
-
-static int svm_pae_enabled(struct vcpu *v)
-{
- unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
- return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
-}
-
-static int svm_nx_enabled(struct vcpu *v)
-{
- return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
-}
-
-static int svm_pgbit_test(struct vcpu *v)
-{
- return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
}

static void svm_store_cpu_guest_regs(
@@ -165,10 +124,10 @@ static void svm_store_cpu_guest_regs(
if ( crs != NULL )
{
/* Returning the guest's regs */
- crs[0] = v->arch.hvm_svm.cpu_shadow_cr0;
- crs[2] = v->arch.hvm_svm.cpu_cr2;
- crs[3] = v->arch.hvm_svm.cpu_cr3;
- crs[4] = v->arch.hvm_svm.cpu_shadow_cr4;
+ crs[0] = v->arch.hvm_vcpu.guest_cr[0];
+ crs[2] = v->arch.hvm_vcpu.guest_cr[2];
+ crs[3] = v->arch.hvm_vcpu.guest_cr[3];
+ crs[4] = v->arch.hvm_vcpu.guest_cr[4];
}
}

@@ -202,7 +161,8 @@ static enum handler_return long_mode_do_
if ( (msr_content & EFER_LME) && !svm_lme_is_set(v) )
{
/* EFER.LME transition from 0 to 1. */
- if ( svm_paging_enabled(v) || !svm_cr4_pae_is_set(v) )
+ if ( hvm_paging_enabled(v) ||
+ !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
{
gdprintk(XENLOG_WARNING, "Trying to set LME bit when "
"in paging mode or PAE bit is not set\n");
@@ -212,7 +172,7 @@ static enum handler_return long_mode_do_
else if ( !(msr_content & EFER_LME) && svm_lme_is_set(v) )
{
/* EFER.LME transistion from 1 to 0. */
- if ( svm_paging_enabled(v) )
+ if ( hvm_paging_enabled(v) )
{
gdprintk(XENLOG_WARNING,
"Trying to clear EFER.LME while paging enabled\n");
@@ -220,9 +180,9 @@ static enum handler_return long_mode_do_
}
}

- v->arch.hvm_svm.cpu_shadow_efer = msr_content;
+ v->arch.hvm_vcpu.guest_efer = msr_content;
vmcb->efer = msr_content | EFER_SVME;
- if ( !svm_paging_enabled(v) )
+ if ( !hvm_paging_enabled(v) )
vmcb->efer &= ~(EFER_LME | EFER_LMA);

break;
@@ -297,10 +257,10 @@ int svm_vmcb_save(struct vcpu *v, struct
c->rsp = vmcb->rsp;
c->rflags = vmcb->rflags;

- c->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
- c->cr2 = v->arch.hvm_svm.cpu_cr2;
- c->cr3 = v->arch.hvm_svm.cpu_cr3;
- c->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+ c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+ c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
+ c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
+ c->cr4 = v->arch.hvm_vcpu.guest_cr[4];

#ifdef HVM_DEBUG_SUSPEND
printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
@@ -383,10 +343,10 @@ int svm_vmcb_restore(struct vcpu *v, str
vmcb->rsp = c->rsp;
vmcb->rflags = c->rflags;

- v->arch.hvm_svm.cpu_shadow_cr0 = c->cr0;
+ v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
vmcb->cr0 = c->cr0 | X86_CR0_WP | X86_CR0_ET | X86_CR0_PG;

- v->arch.hvm_svm.cpu_cr2 = c->cr2;
+ v->arch.hvm_vcpu.guest_cr[2] = c->cr2;

#ifdef HVM_DEBUG_SUSPEND
printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
@@ -396,13 +356,13 @@ int svm_vmcb_restore(struct vcpu *v, str
c->cr4);
#endif

- if ( !svm_paging_enabled(v) )
+ if ( !hvm_paging_enabled(v) )
{
printk("%s: paging not enabled.\n", __func__);
goto skip_cr3;
}

- if ( c->cr3 == v->arch.hvm_svm.cpu_cr3 )
+ if ( c->cr3 == v->arch.hvm_vcpu.guest_cr[3] )
{
/*
* This is simple TLB flush, implying the guest has
@@ -428,12 +388,12 @@ int svm_vmcb_restore(struct vcpu *v, str
v->arch.guest_table = pagetable_from_pfn(mfn);
if (old_base_mfn)
put_page(mfn_to_page(old_base_mfn));
- v->arch.hvm_svm.cpu_cr3 = c->cr3;
+ v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
}

skip_cr3:
vmcb->cr4 = c->cr4 | HVM_CR4_HOST_MASK;
- v->arch.hvm_svm.cpu_shadow_cr4 = c->cr4;
+ v->arch.hvm_vcpu.guest_cr[4] = c->cr4;

vmcb->idtr.limit = c->idtr_limit;
vmcb->idtr.base = c->idtr_base;
@@ -488,8 +448,8 @@ int svm_vmcb_restore(struct vcpu *v, str

if ( paging_mode_hap(v->domain) )
{
- vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
- vmcb->cr4 = (v->arch.hvm_svm.cpu_shadow_cr4 |
+ vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+ vmcb->cr4 = (v->arch.hvm_vcpu.guest_cr[4] |
(HVM_CR4_HOST_MASK & ~X86_CR4_PAE));
vmcb->cr3 = c->cr3;
vmcb->np_enable = 1;
@@ -521,7 +481,6 @@ int svm_vmcb_restore(struct vcpu *v, str
}

paging_update_paging_modes(v);
- svm_asid_g_update_paging(v);

return 0;

@@ -540,7 +499,7 @@ static void svm_save_cpu_state(struct vc
data->msr_star = vmcb->star;
data->msr_cstar = vmcb->cstar;
data->msr_syscall_mask = vmcb->sfmask;
- data->msr_efer = v->arch.hvm_svm.cpu_shadow_efer;
+ data->msr_efer = v->arch.hvm_vcpu.guest_efer;
data->msr_flags = -1ULL;

data->tsc = hvm_get_guest_time(v);
@@ -556,7 +515,7 @@ static void svm_load_cpu_state(struct vc
vmcb->star = data->msr_star;
vmcb->cstar = data->msr_cstar;
vmcb->sfmask = data->msr_syscall_mask;
- v->arch.hvm_svm.cpu_shadow_efer = data->msr_efer;
+ v->arch.hvm_vcpu.guest_efer = data->msr_efer;
vmcb->efer = data->msr_efer | EFER_SVME;
/* VMCB's EFER.LME isn't set unless we're actually in long mode
* (see long_mode_do_msr_write()) */
@@ -605,11 +564,11 @@ static int svm_guest_x86_mode(struct vcp
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;

- if ( unlikely(!(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PE)) )
+ if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
return 0;
if ( unlikely(vmcb->rflags & X86_EFLAGS_VM) )
return 1;
- if ( svm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
+ if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
return 8;
return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
}
@@ -619,9 +578,20 @@ static void svm_update_host_cr3(struct v
/* SVM doesn't have a HOST_CR3 equivalent to update. */
}

-static void svm_update_guest_cr3(struct vcpu *v)
-{
- v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
+static void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
+{
+ switch ( cr )
+ {
+ case 3:
+ v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
+ svm_asid_inv_asid(v);
+ break;
+ case 4:
+ v->arch.hvm_svm.vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4];
+ break;
+ default:
+ BUG();
+ }
}

static void svm_flush_guest_tlbs(void)
@@ -639,24 +609,6 @@ static void svm_update_vtpr(struct vcpu
vmcb->vintr.fields.tpr = value & 0x0f;
}

-static unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
-{
- switch ( num )
- {
- case 0:
- return v->arch.hvm_svm.cpu_shadow_cr0;
- case 2:
- return v->arch.hvm_svm.cpu_cr2;
- case 3:
- return v->arch.hvm_svm.cpu_cr3;
- case 4:
- return v->arch.hvm_svm.cpu_shadow_cr4;
- default:
- BUG();
- }
- return 0; /* dummy */
-}
-
static void svm_sync_vmcb(struct vcpu *v)
{
struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
@@ -674,7 +626,7 @@ static unsigned long svm_get_segment_bas
static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- int long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
+ int long_mode = vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v);

switch ( seg )
{
@@ -748,7 +700,7 @@ static void svm_stts(struct vcpu *v)
* then this is not necessary: no FPU activity can occur until the guest
* clears CR0.TS, and we will initialise the FPU when that happens.
*/
- if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
+ if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
{
v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
vmcb->cr0 |= X86_CR0_TS;
@@ -949,7 +901,7 @@ static void svm_hvm_inject_exception(
{
struct vcpu *v = current;
if ( trapnr == TRAP_page_fault )
- v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_svm.cpu_cr2 = cr2;
+ v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2] = cr2;
svm_inject_exception(v, trapnr, (errcode != -1), errcode);
}

@@ -970,17 +922,12 @@ static struct hvm_function_table svm_fun
.load_cpu_guest_regs = svm_load_cpu_guest_regs,
.save_cpu_ctxt = svm_save_vmcb_ctxt,
.load_cpu_ctxt = svm_load_vmcb_ctxt,
- .paging_enabled = svm_paging_enabled,
- .long_mode_enabled = svm_long_mode_enabled,
- .pae_enabled = svm_pae_enabled,
- .nx_enabled = svm_nx_enabled,
.interrupts_enabled = svm_interrupts_enabled,
.guest_x86_mode = svm_guest_x86_mode,
- .get_guest_ctrl_reg = svm_get_ctrl_reg,
.get_segment_base = svm_get_segment_base,
.get_segment_register = svm_get_segment_register,
.update_host_cr3 = svm_update_host_cr3,
- .update_guest_cr3 = svm_update_guest_cr3,
+ .update_guest_cr = svm_update_guest_cr,
.flush_guest_tlbs = svm_flush_guest_tlbs,
.update_vtpr = svm_update_vtpr,
.stts = svm_stts,
@@ -1075,7 +1022,7 @@ static void svm_do_no_device_fault(struc
setup_fpu(v);
vmcb->exception_intercepts &= ~(1U << TRAP_no_device);

- if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
+ if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
vmcb->cr0 &= ~X86_CR0_TS;
}

@@ -1347,7 +1294,7 @@ static int svm_get_io_address(
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;

/* If we're in long mode, don't check the segment presence & limit */
- long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
+ long_mode = vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v);

/* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit.
* l field combined with EFER_LMA says whether it's 16 or 64 bit.
@@ -1650,7 +1597,7 @@ static int svm_set_cr0(unsigned long val
static int svm_set_cr0(unsigned long value)
{
struct vcpu *v = current;
- unsigned long mfn, old_value = v->arch.hvm_svm.cpu_shadow_cr0;
+ unsigned long mfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
unsigned long old_base_mfn;

@@ -1687,25 +1634,25 @@ static int svm_set_cr0(unsigned long val
{
if ( svm_lme_is_set(v) )
{
- if ( !svm_cr4_pae_is_set(v) )
+ if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable");
svm_inject_exception(v, TRAP_gp_fault, 1, 0);
return 0;
}
HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode");
- v->arch.hvm_svm.cpu_shadow_efer |= EFER_LMA;
+ v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
vmcb->efer |= EFER_LMA | EFER_LME;
}

if ( !paging_mode_hap(v->domain) )
{
/* The guest CR3 must be pointing to the guest physical. */
- mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
+ mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT);
if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain))
{
gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
- v->arch.hvm_svm.cpu_cr3, mfn);
+ v->arch.hvm_vcpu.guest_cr[3], mfn);
domain_crash(v->domain);
return 0;
}
@@ -1717,42 +1664,36 @@ static int svm_set_cr0(unsigned long val
put_page(mfn_to_page(old_base_mfn));

HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
- v->arch.hvm_vmx.cpu_cr3, mfn);
+ v->arch.hvm_vcpu.guest_cr[3], mfn);
}
}
else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
{
/* When CR0.PG is cleared, LMA is cleared immediately. */
- if ( svm_long_mode_enabled(v) )
+ if ( hvm_long_mode_enabled(v) )
{
vmcb->efer &= ~(EFER_LME | EFER_LMA);
- v->arch.hvm_svm.cpu_shadow_efer &= ~EFER_LMA;
- }
-
- if ( !paging_mode_hap(v->domain) && v->arch.hvm_svm.cpu_cr3 )
+ v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
+ }
+
+ if ( !paging_mode_hap(v->domain) && v->arch.hvm_vcpu.guest_cr[3] )
{
put_page(mfn_to_page(get_mfn_from_gpfn(
- v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)));
+ v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
v->arch.guest_table = pagetable_null();
}
}

- vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0 = value;
+ vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0] = value;
if ( !paging_mode_hap(v->domain) )
vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;

if ( (value ^ old_value) & X86_CR0_PG )
- {
paging_update_paging_modes(v);
- svm_asid_g_update_paging(v);
- }

return 1;
}

-/*
- * Read from control registers. CR0 and CR4 are read from the shadow.
- */
static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
{
unsigned long value = 0;
@@ -1763,16 +1704,16 @@ static void mov_from_cr(int cr, int gp,
switch ( cr )
{
case 0:
- value = v->arch.hvm_svm.cpu_shadow_cr0;
+ value = v->arch.hvm_vcpu.guest_cr[0];
break;
case 2:
value = vmcb->cr2;
break;
case 3:
- value = (unsigned long)v->arch.hvm_svm.cpu_cr3;
+ value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
break;
case 4:
- value = (unsigned long)v->arch.hvm_svm.cpu_shadow_cr4;
+ value = (unsigned long)v->arch.hvm_vcpu.guest_cr[4];
break;
case 8:
value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
@@ -1791,13 +1732,9 @@ static void mov_from_cr(int cr, int gp,
HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx", cr, value);
}

-
-/*
- * Write to control registers
- */
static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
{
- unsigned long value, old_cr, old_base_mfn, mfn;
+ unsigned long value;
struct vcpu *v = current;
struct vlapic *vlapic = vcpu_vlapic(v);
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -1815,131 +1752,10 @@ static int mov_to_cr(int gpreg, int cr,
return svm_set_cr0(value);

case 3:
- if ( paging_mode_hap(v->domain) )
- {
- vmcb->cr3 = v->arch.hvm_svm.cpu_cr3 = value;
- break;
- }
-
- /* If paging is not enabled yet, simply copy the value to CR3. */
- if ( !svm_paging_enabled(v) )
- {
- v->arch.hvm_svm.cpu_cr3 = value;
- break;
- }
-
- /* We make a new one if the shadow does not exist. */
- if ( value == v->arch.hvm_svm.cpu_cr3 )
- {
- /*
- * This is simple TLB flush, implying the guest has
- * removed some translation or changed page attributes.
- * We simply invalidate the shadow.
- */
- mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
- if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
- goto bad_cr3;
- paging_update_cr3(v);
- /* signal paging update to ASID handler */
- svm_asid_g_mov_to_cr3 (v);
- }
- else
- {
- /*
- * If different, make a shadow. Check if the PDBR is valid
- * first.
- */
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
- mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
- if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
- goto bad_cr3;
-
- old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
- v->arch.guest_table = pagetable_from_pfn(mfn);
-
- if ( old_base_mfn )
- put_page(mfn_to_page(old_base_mfn));
-
- v->arch.hvm_svm.cpu_cr3 = value;
- update_cr3(v);
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
- /* signal paging update to ASID handler */
- svm_asid_g_mov_to_cr3 (v);
- }
- break;
-
- case 4: /* CR4 */
- if ( value & HVM_CR4_GUEST_RESERVED_BITS )
- {
- HVM_DBG_LOG(DBG_LEVEL_1,
- "Guest attempts to set reserved bit in CR4: %lx",
- value);
- svm_inject_exception(v, TRAP_gp_fault, 1, 0);
- break;
- }
-
- if ( paging_mode_hap(v->domain) )
- {
- v->arch.hvm_svm.cpu_shadow_cr4 = value;
- vmcb->cr4 = value | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
- paging_update_paging_modes(v);
- /* signal paging update to ASID handler */
- svm_asid_g_update_paging (v);
- break;
- }
-
- old_cr = v->arch.hvm_svm.cpu_shadow_cr4;
- if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
- {
- if ( svm_pgbit_test(v) )
- {
-#if CONFIG_PAGING_LEVELS >= 3
- /* The guest is a 32-bit PAE guest. */
- unsigned long mfn, old_base_mfn;
- mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
- if ( !mfn_valid(mfn) ||
- !get_page(mfn_to_page(mfn), v->domain) )
- goto bad_cr3;
-
- /*
- * Now arch.guest_table points to machine physical.
- */
- old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
- v->arch.guest_table = pagetable_from_pfn(mfn);
- if ( old_base_mfn )
- put_page(mfn_to_page(old_base_mfn));
- paging_update_paging_modes(v);
- /* signal paging update to ASID handler */
- svm_asid_g_update_paging (v);
-
- HVM_DBG_LOG(DBG_LEVEL_VMMU,
- "Update CR3 value = %lx, mfn = %lx",
- v->arch.hvm_svm.cpu_cr3, mfn);
-#endif
- }
- }
- else if ( !(value & X86_CR4_PAE) )
- {
- if ( svm_long_mode_enabled(v) )
- {
- svm_inject_exception(v, TRAP_gp_fault, 1, 0);
- }
- }
-
- v->arch.hvm_svm.cpu_shadow_cr4 = value;
- vmcb->cr4 = value | HVM_CR4_HOST_MASK;
-
- /*
- * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
- * all TLB entries except global entries.
- */
- if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
- {
- paging_update_paging_modes(v);
- /* signal paging update to ASID handler */
- svm_asid_g_update_paging (v);
- }
- break;
+ return hvm_set_cr3(value);
+
+ case 4:
+ return hvm_set_cr4(value);

case 8:
vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
@@ -1953,19 +1769,11 @@ static int mov_to_cr(int gpreg, int cr,
}

return 1;
-
- bad_cr3:
- gdprintk(XENLOG_ERR, "Invalid CR3\n");
- domain_crash(v->domain);
- return 0;
-}
-
-
-#define ARR_SIZE(x) (sizeof(x) / sizeof(x[0]))
-
-
-static int svm_cr_access(struct vcpu *v, unsigned int cr, unsigned int type,
- struct cpu_user_regs *regs)
+}
+
+static void svm_cr_access(
+ struct vcpu *v, unsigned int cr, unsigned int type,
+ struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
int inst_len = 0;
@@ -1990,12 +1798,12 @@ static int svm_cr_access(struct vcpu *v,
if ( type == TYPE_MOV_TO_CR )
{
inst_len = __get_instruction_length_from_list(
- v, list_a, ARR_SIZE(list_a), &buffer[index], &match);
+ v, list_a, ARRAY_SIZE(list_a), &buffer[index], &match);
}
else /* type == TYPE_MOV_FROM_CR */
{
inst_len = __get_instruction_length_from_list(
- v, list_b, ARR_SIZE(list_b), &buffer[index], &match);
+ v, list_b, ARRAY_SIZE(list_b), &buffer[index], &match);
}

ASSERT(inst_len > 0);
@@ -2008,7 +1816,8 @@ static int svm_cr_access(struct vcpu *v,

HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx", (unsigned long) vmcb->rip);

- switch (match)
+ switch ( match )
+
{
case INSTR_MOV2CR:
gpreg = decode_src_reg(prefix, buffer[index+2]);
@@ -2025,18 +1834,18 @@ static int svm_cr_access(struct vcpu *v,
setup_fpu(current);
vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
- v->arch.hvm_svm.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
+ v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
break;

case INSTR_LMSW:
gpreg = decode_src_reg(prefix, buffer[index+2]);
value = get_reg(gpreg, regs, vmcb) & 0xF;
- value = (v->arch.hvm_svm.cpu_shadow_cr0 & ~0xF) | value;
+ value = (v->arch.hvm_vcpu.guest_cr[0] & ~0xF) | value;
result = svm_set_cr0(value);
break;

case INSTR_SMSW:
- value = v->arch.hvm_svm.cpu_shadow_cr0 & 0xFFFF;
+ value = v->arch.hvm_vcpu.guest_cr[0] & 0xFFFF;
modrm = buffer[index+2];
addr_size = svm_guest_x86_mode(v);
if ( addr_size < 2 )
@@ -2099,9 +1908,8 @@ static int svm_cr_access(struct vcpu *v,

ASSERT(inst_len);

- __update_guest_eip(vmcb, inst_len);
-
- return result;
+ if ( result )
+ __update_guest_eip(vmcb, inst_len);
}

static void svm_do_msr_access(
@@ -2129,7 +1937,7 @@ static void svm_do_msr_access(
break;

case MSR_EFER:
- msr_content = v->arch.hvm_svm.cpu_shadow_efer;
+ msr_content = v->arch.hvm_vcpu.guest_efer;
break;

case MSR_K8_MC4_MISC: /* Threshold register */
@@ -2319,8 +2127,7 @@ void svm_handle_invlpg(const short invlp
HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0));

paging_invlpg(v, g_vaddr);
- /* signal invplg to ASID handler */
- svm_asid_g_invlpg (v, g_vaddr);
+ svm_asid_g_invlpg(v, g_vaddr);
}


@@ -2335,29 +2142,28 @@ static int svm_reset_to_realmode(struct
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;

- /* clear the vmcb and user regs */
memset(regs, 0, sizeof(struct cpu_user_regs));
-
- /* VMCB State */
+
vmcb->cr0 = X86_CR0_ET | X86_CR0_PG | X86_CR0_WP;
- v->arch.hvm_svm.cpu_shadow_cr0 = X86_CR0_ET;
+ v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;

vmcb->cr2 = 0;
vmcb->efer = EFER_SVME;

vmcb->cr4 = HVM_CR4_HOST_MASK;
- v->arch.hvm_svm.cpu_shadow_cr4 = 0;
-
- if ( paging_mode_hap(v->domain) ) {
- vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
- vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 |
- (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
+ v->arch.hvm_vcpu.guest_cr[4] = 0;
+
+ if ( paging_mode_hap(v->domain) )
+ {
+ vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+ vmcb->cr4 = (v->arch.hvm_vcpu.guest_cr[4] |
+ (HVM_CR4_HOST_MASK & ~X86_CR4_PAE));
}

/* This will jump to ROMBIOS */
vmcb->rip = 0xFFF0;

- /* setup the segment registers and all their hidden states */
+ /* Set up the segment registers and all their hidden states. */
vmcb->cs.sel = 0xF000;
vmcb->cs.attr.bytes = 0x089b;
vmcb->cs.limit = 0xffff;
@@ -2495,7 +2301,7 @@ asmlinkage void svm_vmexit_handler(struc
break;
}

- v->arch.hvm_svm.cpu_cr2 = vmcb->cr2 = va;
+ v->arch.hvm_vcpu.guest_cr[2] = vmcb->cr2 = va;
svm_inject_exception(v, TRAP_page_fault, 1, regs->error_code);
break;
}
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c Wed Aug 08 12:27:23 2007 +0100
@@ -111,7 +111,7 @@ static int construct_vmcb(struct vcpu *v
svm_segment_attributes_t attrib;

/* TLB control, and ASID assigment. */
- svm_asid_init_vcpu (v);
+ svm_asid_init_vcpu(v);

vmcb->general1_intercepts =
GENERAL1_INTERCEPT_INTR | GENERAL1_INTERCEPT_NMI |
@@ -218,25 +218,24 @@ static int construct_vmcb(struct vcpu *v

/* Guest CR0. */
vmcb->cr0 = read_cr0();
- arch_svm->cpu_shadow_cr0 = vmcb->cr0 & ~(X86_CR0_PG | X86_CR0_TS);
- vmcb->cr0 |= X86_CR0_WP;
+ v->arch.hvm_vcpu.guest_cr[0] = vmcb->cr0 & ~(X86_CR0_PG | X86_CR0_TS);

/* Guest CR4. */
- arch_svm->cpu_shadow_cr4 =
+ v->arch.hvm_vcpu.guest_cr[4] =
read_cr4() & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
- vmcb->cr4 = arch_svm->cpu_shadow_cr4 | HVM_CR4_HOST_MASK;
+ vmcb->cr4 = v->arch.hvm_vcpu.guest_cr[4] | HVM_CR4_HOST_MASK;

paging_update_paging_modes(v);
- vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
+ vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];

if ( paging_mode_hap(v->domain) )
{
- vmcb->cr0 = arch_svm->cpu_shadow_cr0;
+ vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
vmcb->np_enable = 1; /* enable nested paging */
vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
- vmcb->cr4 = arch_svm->cpu_shadow_cr4 =
- (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
+ vmcb->cr4 = v->arch.hvm_vcpu.guest_cr[4] =
+ HVM_CR4_HOST_MASK & ~X86_CR4_PAE;
vmcb->exception_intercepts = HVM_TRAP_MASK;

/* No point in intercepting CR3/4 reads, because the hardware
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/vioapic.c
--- a/xen/arch/x86/hvm/vioapic.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/vioapic.c Wed Aug 08 12:27:23 2007 +0100
@@ -43,10 +43,6 @@
/* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */
#define IRQ0_SPECIAL_ROUTING 1

-#if defined(__ia64__)
-#define opt_hvm_debug_level opt_vmx_debug_level
-#endif
-
static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq);

static unsigned long vioapic_read_indirect(struct hvm_hw_vioapic *vioapic,
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Aug 08 12:27:23 2007 +0100
@@ -506,17 +506,17 @@ static void construct_vmcs(struct vcpu *

/* Guest CR0. */
cr0 = read_cr0();
- v->arch.hvm_vmx.cpu_cr0 = cr0;
- __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
- v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
- __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+ v->arch.hvm_vcpu.hw_cr[0] = cr0;
+ __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+ v->arch.hvm_vcpu.guest_cr[0] = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);

/* Guest CR4. */
cr4 = read_cr4();
__vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
- v->arch.hvm_vmx.cpu_shadow_cr4 =
+ v->arch.hvm_vcpu.guest_cr[4] =
cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
- __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+ __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);

if ( cpu_has_vmx_tpr_shadow )
{
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Aug 08 12:27:23 2007 +0100
@@ -100,39 +100,11 @@ static void vmx_vcpu_destroy(struct vcpu
vmx_destroy_vmcs(v);
}

-static int vmx_paging_enabled(struct vcpu *v)
-{
- unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
- return (cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
-}
-
-static int vmx_pgbit_test(struct vcpu *v)
-{
- unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
- return cr0 & X86_CR0_PG;
-}
-
-static int vmx_pae_enabled(struct vcpu *v)
-{
- unsigned long cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
- return vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE);
-}
-
-static int vmx_nx_enabled(struct vcpu *v)
-{
- return v->arch.hvm_vmx.efer & EFER_NX;
-}
-
#ifdef __x86_64__

static int vmx_lme_is_set(struct vcpu *v)
{
- return v->arch.hvm_vmx.efer & EFER_LME;
-}
-
-static int vmx_long_mode_enabled(struct vcpu *v)
-{
- return v->arch.hvm_vmx.efer & EFER_LMA;
+ return v->arch.hvm_vcpu.guest_efer & EFER_LME;
}

static void vmx_enable_long_mode(struct vcpu *v)
@@ -143,7 +115,7 @@ static void vmx_enable_long_mode(struct
vm_entry_value |= VM_ENTRY_IA32E_MODE;
__vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);

- v->arch.hvm_vmx.efer |= EFER_LMA;
+ v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
}

static void vmx_disable_long_mode(struct vcpu *v)
@@ -154,7 +126,7 @@ static void vmx_disable_long_mode(struct
vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
__vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);

- v->arch.hvm_vmx.efer &= ~EFER_LMA;
+ v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
}

static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
@@ -190,7 +162,7 @@ static enum handler_return long_mode_do_
switch ( ecx )
{
case MSR_EFER:
- msr_content = v->arch.hvm_vmx.efer;
+ msr_content = v->arch.hvm_vcpu.guest_efer;
break;

case MSR_FS_BASE:
@@ -204,7 +176,7 @@ static enum handler_return long_mode_do_
case MSR_SHADOW_GS_BASE:
msr_content = v->arch.hvm_vmx.shadow_gs;
check_long_mode:
- if ( !(vmx_long_mode_enabled(v)) )
+ if ( !(hvm_long_mode_enabled(v)) )
{
vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
return HNDL_exception_raised;
@@ -263,9 +235,9 @@ static enum handler_return long_mode_do_
}

if ( (msr_content & EFER_LME)
- && !(v->arch.hvm_vmx.efer & EFER_LME) )
- {
- if ( unlikely(vmx_paging_enabled(v)) )
+ && !(v->arch.hvm_vcpu.guest_efer & EFER_LME) )
+ {
+ if ( unlikely(hvm_paging_enabled(v)) )
{
gdprintk(XENLOG_WARNING,
"Trying to set EFER.LME with paging enabled\n");
@@ -273,9 +245,9 @@ static enum handler_return long_mode_do_
}
}
else if ( !(msr_content & EFER_LME)
- && (v->arch.hvm_vmx.efer & EFER_LME) )
- {
- if ( unlikely(vmx_paging_enabled(v)) )
+ && (v->arch.hvm_vcpu.guest_efer & EFER_LME) )
+ {
+ if ( unlikely(hvm_paging_enabled(v)) )
{
gdprintk(XENLOG_WARNING,
"Trying to clear EFER.LME with paging enabled\n");
@@ -283,17 +255,17 @@ static enum handler_return long_mode_do_
}
}

- if ( (msr_content ^ v->arch.hvm_vmx.efer) & (EFER_NX|EFER_SCE) )
+ if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & (EFER_NX|EFER_SCE) )
write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
(msr_content & (EFER_NX|EFER_SCE)));

- v->arch.hvm_vmx.efer = msr_content;
+ v->arch.hvm_vcpu.guest_efer = msr_content;
break;

case MSR_FS_BASE:
case MSR_GS_BASE:
case MSR_SHADOW_GS_BASE:
- if ( !vmx_long_mode_enabled(v) )
+ if ( !hvm_long_mode_enabled(v) )
goto gp_fault;

if ( !is_canonical_address(msr_content) )
@@ -394,21 +366,19 @@ static void vmx_restore_guest_msrs(struc
clear_bit(i, &guest_flags);
}

- if ( (v->arch.hvm_vmx.efer ^ read_efer()) & (EFER_NX | EFER_SCE) )
+ if ( (v->arch.hvm_vcpu.guest_efer ^ read_efer()) & (EFER_NX | EFER_SCE) )
{
HVM_DBG_LOG(DBG_LEVEL_2,
"restore guest's EFER with value %lx",
- v->arch.hvm_vmx.efer);
+ v->arch.hvm_vcpu.guest_efer);
write_efer((read_efer() & ~(EFER_NX | EFER_SCE)) |
- (v->arch.hvm_vmx.efer & (EFER_NX | EFER_SCE)));
+ (v->arch.hvm_vcpu.guest_efer & (EFER_NX | EFER_SCE)));
}
}

#else /* __i386__ */

static int vmx_lme_is_set(struct vcpu *v)
-{ return 0; }
-static int vmx_long_mode_enabled(struct vcpu *v)
{ return 0; }
static void vmx_enable_long_mode(struct vcpu *v)
{ BUG(); }
@@ -427,13 +397,13 @@ static void vmx_restore_host_msrs(void)

static void vmx_restore_guest_msrs(struct vcpu *v)
{
- if ( (v->arch.hvm_vmx.efer ^ read_efer()) & EFER_NX )
+ if ( (v->arch.hvm_vcpu.guest_efer ^ read_efer()) & EFER_NX )
{
HVM_DBG_LOG(DBG_LEVEL_2,
"restore guest's EFER with value %lx",
- v->arch.hvm_vmx.efer);
+ v->arch.hvm_vcpu.guest_efer);
write_efer((read_efer() & ~EFER_NX) |
- (v->arch.hvm_vmx.efer & EFER_NX));
+ (v->arch.hvm_vcpu.guest_efer & EFER_NX));
}
}

@@ -444,7 +414,7 @@ static enum handler_return long_mode_do_

switch ( regs->ecx ) {
case MSR_EFER:
- msr_content = v->arch.hvm_vmx.efer;
+ msr_content = v->arch.hvm_vcpu.guest_efer;
break;

default:
@@ -475,10 +445,10 @@ static enum handler_return long_mode_do_
return HNDL_exception_raised;
}

- if ( (msr_content ^ v->arch.hvm_vmx.efer) & EFER_NX )
+ if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & EFER_NX )
write_efer((read_efer() & ~EFER_NX) | (msr_content & EFER_NX));

- v->arch.hvm_vmx.efer = msr_content;
+ v->arch.hvm_vcpu.guest_efer = msr_content;
break;

default:
@@ -501,12 +471,12 @@ static int vmx_guest_x86_mode(struct vcp

ASSERT(v == current);

- if ( unlikely(!(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_PE)) )
+ if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
return 0;
if ( unlikely(__vmread(GUEST_RFLAGS) & X86_EFLAGS_VM) )
return 1;
cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
- if ( vmx_long_mode_enabled(v) &&
+ if ( hvm_long_mode_enabled(v) &&
likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
return 8;
return (likely(cs_ar_bytes & X86_SEG_AR_DEF_OP_SIZE) ? 4 : 2);
@@ -551,12 +521,12 @@ void vmx_vmcs_save(struct vcpu *v, struc
c->rsp = __vmread(GUEST_RSP);
c->rflags = __vmread(GUEST_RFLAGS);

- c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
- c->cr2 = v->arch.hvm_vmx.cpu_cr2;
- c->cr3 = v->arch.hvm_vmx.cpu_cr3;
- c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
-
- c->msr_efer = v->arch.hvm_vmx.efer;
+ c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+ c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
+ c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
+ c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
+
+ c->msr_efer = v->arch.hvm_vcpu.guest_efer;

#ifdef HVM_DEBUG_SUSPEND
printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
@@ -635,22 +605,22 @@ int vmx_vmcs_restore(struct vcpu *v, str
__vmwrite(GUEST_RSP, c->rsp);
__vmwrite(GUEST_RFLAGS, c->rflags);

- v->arch.hvm_vmx.cpu_cr0 = (c->cr0 | X86_CR0_PE | X86_CR0_PG |
- X86_CR0_NE | X86_CR0_WP | X86_CR0_ET);
- __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
- v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
- __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
-
- v->arch.hvm_vmx.cpu_cr2 = c->cr2;
-
- v->arch.hvm_vmx.efer = c->msr_efer;
+ v->arch.hvm_vcpu.hw_cr[0] = (c->cr0 | X86_CR0_PE | X86_CR0_PG |
+ X86_CR0_NE | X86_CR0_WP | X86_CR0_ET);
+ __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+ v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+
+ v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
+
+ v->arch.hvm_vcpu.guest_efer = c->msr_efer;

#ifdef HVM_DEBUG_SUSPEND
printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
__func__, c->cr3, c->cr0, c->cr4);
#endif

- if ( !vmx_paging_enabled(v) )
+ if ( !hvm_paging_enabled(v) )
{
HVM_DBG_LOG(DBG_LEVEL_VMMU, "%s: paging not enabled.", __func__);
goto skip_cr3;
@@ -672,14 +642,14 @@ int vmx_vmcs_restore(struct vcpu *v, str
put_page(mfn_to_page(old_base_mfn));

skip_cr3:
- v->arch.hvm_vmx.cpu_cr3 = c->cr3;
-
- if ( vmx_long_mode_enabled(v) )
+ v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
+
+ if ( hvm_long_mode_enabled(v) )
vmx_enable_long_mode(v);

__vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
- v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
- __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+ v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
+ __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);

__vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
__vmwrite(GUEST_IDTR_BASE, c->idtr_base);
@@ -884,10 +854,10 @@ static void vmx_store_cpu_guest_regs(

if ( crs != NULL )
{
- crs[0] = v->arch.hvm_vmx.cpu_shadow_cr0;
- crs[2] = v->arch.hvm_vmx.cpu_cr2;
- crs[3] = v->arch.hvm_vmx.cpu_cr3;
- crs[4] = v->arch.hvm_vmx.cpu_shadow_cr4;
+ crs[0] = v->arch.hvm_vcpu.guest_cr[0];
+ crs[2] = v->arch.hvm_vcpu.guest_cr[2];
+ crs[3] = v->arch.hvm_vcpu.guest_cr[3];
+ crs[4] = v->arch.hvm_vcpu.guest_cr[4];
}

vmx_vmcs_exit(v);
@@ -928,24 +898,6 @@ static void vmx_load_cpu_guest_regs(stru
vmx_vmcs_exit(v);
}

-static unsigned long vmx_get_ctrl_reg(struct vcpu *v, unsigned int num)
-{
- switch ( num )
- {
- case 0:
- return v->arch.hvm_vmx.cpu_cr0;
- case 2:
- return v->arch.hvm_vmx.cpu_cr2;
- case 3:
- return v->arch.hvm_vmx.cpu_cr3;
- case 4:
- return v->arch.hvm_vmx.cpu_shadow_cr4;
- default:
- BUG();
- }
- return 0; /* dummy */
-}
-
static unsigned long vmx_get_segment_base(struct vcpu *v, enum x86_segment seg)
{
unsigned long base = 0;
@@ -953,7 +905,7 @@ static unsigned long vmx_get_segment_bas

ASSERT(v == current);

- if ( vmx_long_mode_enabled(v) &&
+ if ( hvm_long_mode_enabled(v) &&
(__vmread(GUEST_CS_AR_BYTES) & X86_SEG_AR_CS_LM_ACTIVE) )
long_mode = 1;

@@ -1059,10 +1011,10 @@ static void vmx_stts(struct vcpu *v)
* then this is not necessary: no FPU activity can occur until the guest
* clears CR0.TS, and we will initialise the FPU when that happens.
*/
- if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
- {
- v->arch.hvm_vmx.cpu_cr0 |= X86_CR0_TS;
- __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
+ if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ {
+ v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
+ __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
__vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
}
}
@@ -1135,11 +1087,25 @@ static void vmx_update_host_cr3(struct v
vmx_vmcs_exit(v);
}

-static void vmx_update_guest_cr3(struct vcpu *v)
+static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
{
ASSERT((v == current) || !vcpu_runnable(v));
+
vmx_vmcs_enter(v);
- __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
+
+ switch ( cr )
+ {
+ case 3:
+ __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
+ break;
+ case 4:
+ __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
+ __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
+ break;
+ default:
+ BUG();
+ }
+
vmx_vmcs_exit(v);
}

@@ -1156,7 +1122,7 @@ static void vmx_inject_exception(
struct vcpu *v = current;
vmx_inject_hw_exception(v, trapnr, errcode);
if ( trapnr == TRAP_page_fault )
- v->arch.hvm_vmx.cpu_cr2 = cr2;
+ v->arch.hvm_vcpu.guest_cr[2] = cr2;
}

static void vmx_update_vtpr(struct vcpu *v, unsigned long value)
@@ -1200,17 +1166,12 @@ static struct hvm_function_table vmx_fun
.load_cpu_guest_regs = vmx_load_cpu_guest_regs,
.save_cpu_ctxt = vmx_save_vmcs_ctxt,
.load_cpu_ctxt = vmx_load_vmcs_ctxt,
- .paging_enabled = vmx_paging_enabled,
- .long_mode_enabled = vmx_long_mode_enabled,
- .pae_enabled = vmx_pae_enabled,
- .nx_enabled = vmx_nx_enabled,
.interrupts_enabled = vmx_interrupts_enabled,
.guest_x86_mode = vmx_guest_x86_mode,
- .get_guest_ctrl_reg = vmx_get_ctrl_reg,
.get_segment_base = vmx_get_segment_base,
.get_segment_register = vmx_get_segment_register,
.update_host_cr3 = vmx_update_host_cr3,
- .update_guest_cr3 = vmx_update_guest_cr3,
+ .update_guest_cr = vmx_update_guest_cr,
.flush_guest_tlbs = vmx_flush_guest_tlbs,
.update_vtpr = vmx_update_vtpr,
.stts = vmx_stts,
@@ -1315,10 +1276,10 @@ static void vmx_do_no_device_fault(void)
__vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);

/* Disable TS in guest CR0 unless the guest wants the exception too. */
- if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
- {
- v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS;
- __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
+ if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+ {
+ v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
+ __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
}
}

@@ -1773,7 +1734,7 @@ static void vmx_do_str_pio(unsigned long

sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
ar_bytes = __vmread(GUEST_CS_AR_BYTES);
- if ( vmx_long_mode_enabled(current) &&
+ if ( hvm_long_mode_enabled(current) &&
(ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
long_mode = 1;
addr = __vmread(GUEST_LINEAR_ADDRESS);
@@ -1900,9 +1861,9 @@ static void vmx_world_save(struct vcpu *
c->esp = __vmread(GUEST_RSP);
c->eflags = __vmread(GUEST_RFLAGS) & ~X86_EFLAGS_RF;

- c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
- c->cr3 = v->arch.hvm_vmx.cpu_cr3;
- c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
+ c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+ c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
+ c->cr4 = v->arch.hvm_vcpu.guest_cr[4];

c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
c->idtr_base = __vmread(GUEST_IDTR_BASE);
@@ -1959,13 +1920,13 @@ static int vmx_world_restore(struct vcpu
__vmwrite(GUEST_RSP, c->esp);
__vmwrite(GUEST_RFLAGS, c->eflags);

- v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
- __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
-
- if ( !vmx_paging_enabled(v) )
+ v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+
+ if ( !hvm_paging_enabled(v) )
goto skip_cr3;

- if ( c->cr3 == v->arch.hvm_vmx.cpu_cr3 )
+ if ( c->cr3 == v->arch.hvm_vcpu.guest_cr[3] )
{
/*
* This is simple TLB flush, implying the guest has
@@ -1990,18 +1951,18 @@ static int vmx_world_restore(struct vcpu
v->arch.guest_table = pagetable_from_pfn(mfn);
if ( old_base_mfn )
put_page(mfn_to_page(old_base_mfn));
- v->arch.hvm_vmx.cpu_cr3 = c->cr3;
+ v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
}

skip_cr3:
- if ( !vmx_paging_enabled(v) )
+ if ( !hvm_paging_enabled(v) )
HVM_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
else
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);

__vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
- v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
- __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+ v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
+ __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);

__vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
__vmwrite(GUEST_IDTR_BASE, c->idtr_base);
@@ -2184,22 +2145,22 @@ static int vmx_set_cr0(unsigned long val
__vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
}

- old_cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
+ old_cr0 = v->arch.hvm_vcpu.guest_cr[0];
paging_enabled = old_cr0 & X86_CR0_PG;

- v->arch.hvm_vmx.cpu_cr0 = (value | X86_CR0_PE | X86_CR0_PG
- | X86_CR0_NE | X86_CR0_WP);
- __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
-
- v->arch.hvm_vmx.cpu_shadow_cr0 = value;
- __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+ v->arch.hvm_vcpu.hw_cr[0] = (value | X86_CR0_PE | X86_CR0_PG |
+ X86_CR0_NE | X86_CR0_WP);
+ __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+
+ v->arch.hvm_vcpu.guest_cr[0] = value;
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);

/* Trying to enable paging. */
if ( (value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled )
{
- if ( vmx_lme_is_set(v) && !vmx_long_mode_enabled(v) )
- {
- if ( !(v->arch.hvm_vmx.cpu_shadow_cr4 & X86_CR4_PAE) )
+ if ( vmx_lme_is_set(v) && !hvm_long_mode_enabled(v) )
+ {
+ if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest enabled paging "
"with EFER.LME set but not CR4.PAE");
@@ -2214,11 +2175,11 @@ static int vmx_set_cr0(unsigned long val
/*
* The guest CR3 must be pointing to the guest physical.
*/
- mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
+ mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT);
if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
{
gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
- v->arch.hvm_vmx.cpu_cr3, mfn);
+ v->arch.hvm_vcpu.guest_cr[3], mfn);
domain_crash(v->domain);
return 0;
}
@@ -2232,7 +2193,7 @@ static int vmx_set_cr0(unsigned long val
put_page(mfn_to_page(old_base_mfn));

HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
- v->arch.hvm_vmx.cpu_cr3, mfn);
+ v->arch.hvm_vcpu.guest_cr[3], mfn);

paging_update_paging_modes(v);
}
@@ -2242,13 +2203,13 @@ static int vmx_set_cr0(unsigned long val
paging_enabled )
{
/* When CR0.PG is cleared, LMA is cleared immediately. */
- if ( vmx_long_mode_enabled(v) )
+ if ( hvm_long_mode_enabled(v) )
vmx_disable_long_mode(v);

- if ( v->arch.hvm_vmx.cpu_cr3 )
+ if ( v->arch.hvm_vcpu.guest_cr[3] )
{
put_page(mfn_to_page(get_mfn_from_gpfn(
- v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)));
+ v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
v->arch.guest_table = pagetable_null();
}
}
@@ -2316,12 +2277,9 @@ static int vmx_set_cr0(unsigned long val
CASE_ ## T ## ET_REG(R15, r15)
#endif

-/*
- * Write to control registers
- */
static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
{
- unsigned long value, old_cr, old_base_mfn, mfn;
+ unsigned long value;
struct vcpu *v = current;
struct vlapic *vlapic = vcpu_vlapic(v);

@@ -2353,108 +2311,10 @@ static int mov_to_cr(int gp, int cr, str
return vmx_set_cr0(value);

case 3:
- /*
- * If paging is not enabled yet, simply copy the value to CR3.
- */
- if ( !vmx_paging_enabled(v) )
- {
- v->arch.hvm_vmx.cpu_cr3 = value;
- break;
- }
-
- /*
- * We make a new one if the shadow does not exist.
- */
- if ( value == v->arch.hvm_vmx.cpu_cr3 ) {
- /*
- * This is simple TLB flush, implying the guest has
- * removed some translation or changed page attributes.
- * We simply invalidate the shadow.
- */
- mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
- if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
- goto bad_cr3;
- paging_update_cr3(v);
- } else {
- /*
- * If different, make a shadow. Check if the PDBR is valid
- * first.
- */
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
- mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
- if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
- goto bad_cr3;
- old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
- v->arch.guest_table = pagetable_from_pfn(mfn);
- if ( old_base_mfn )
- put_page(mfn_to_page(old_base_mfn));
- v->arch.hvm_vmx.cpu_cr3 = value;
- update_cr3(v);
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
- }
- break;
-
- case 4: /* CR4 */
- old_cr = v->arch.hvm_vmx.cpu_shadow_cr4;
-
- if ( value & HVM_CR4_GUEST_RESERVED_BITS )
- {
- HVM_DBG_LOG(DBG_LEVEL_1,
- "Guest attempts to set reserved bit in CR4: %lx",
- value);
- vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
- return 0;
- }
-
- if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
- {
- if ( vmx_pgbit_test(v) )
- {
-#if CONFIG_PAGING_LEVELS >= 3
- /* The guest is a 32-bit PAE guest. */
- unsigned long mfn, old_base_mfn;
- mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
- if ( !mfn_valid(mfn) ||
- !get_page(mfn_to_page(mfn), v->domain) )
- goto bad_cr3;
-
- /*
- * Now arch.guest_table points to machine physical.
- */
- old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
- v->arch.guest_table = pagetable_from_pfn(mfn);
- if ( old_base_mfn )
- put_page(mfn_to_page(old_base_mfn));
-
- HVM_DBG_LOG(DBG_LEVEL_VMMU,
- "Update CR3 value = %lx, mfn = %lx",
- v->arch.hvm_vmx.cpu_cr3, mfn);
-#endif
- }
- }
- else if ( !(value & X86_CR4_PAE) )
- {
- if ( unlikely(vmx_long_mode_enabled(v)) )
- {
- HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
- "EFER.LMA is set");
- vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
- return 0;
- }
- }
-
- __vmwrite(GUEST_CR4, value | HVM_CR4_HOST_MASK);
- v->arch.hvm_vmx.cpu_shadow_cr4 = value;
- __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
-
- /*
- * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
- * all TLB entries except global entries.
- */
- if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
- paging_update_paging_modes(v);
-
- break;
+ return hvm_set_cr3(value);
+
+ case 4:
+ return hvm_set_cr4(value);

case 8:
vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
@@ -2462,14 +2322,11 @@ static int mov_to_cr(int gp, int cr, str

default:
gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
- domain_crash(v->domain);
- return 0;
+ goto exit_and_crash;
}

return 1;

- bad_cr3:
- gdprintk(XENLOG_ERR, "Invalid CR3\n");
exit_and_crash:
domain_crash(v->domain);
return 0;
@@ -2487,7 +2344,7 @@ static void mov_from_cr(int cr, int gp,
switch ( cr )
{
case 3:
- value = (unsigned long)v->arch.hvm_vmx.cpu_cr3;
+ value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
break;
case 8:
value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
@@ -2530,7 +2387,8 @@ static int vmx_cr_access(unsigned long e
unsigned long value;
struct vcpu *v = current;

- switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE ) {
+ switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE )
+ {
case TYPE_MOV_TO_CR:
gp = exit_qualification & CONTROL_REG_ACCESS_REG;
cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
@@ -2545,14 +2403,14 @@ static int vmx_cr_access(unsigned long e
setup_fpu(v);
__vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);

- v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS; /* clear TS */
- __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
-
- v->arch.hvm_vmx.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
- __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+ v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS; /* clear TS */
+ __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+
+ v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
break;
case TYPE_LMSW:
- value = v->arch.hvm_vmx.cpu_shadow_cr0;
+ value = v->arch.hvm_vcpu.guest_cr[0];
value = (value & ~0xF) |
(((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
return vmx_set_cr0(value);
@@ -2943,7 +2801,7 @@ asmlinkage void vmx_vmexit_handler(struc
break;
}

- v->arch.hvm_vmx.cpu_cr2 = exit_qualification;
+ v->arch.hvm_vcpu.guest_cr[2] = exit_qualification;
vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
break;
case TRAP_nmi:
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/vmx/x86_32/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_32/exits.S Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S Wed Aug 08 12:27:23 2007 +0100
@@ -74,7 +74,7 @@ ENTRY(vmx_asm_do_vmentry)
jnz vmx_process_softirqs

call vmx_intr_assist
- movl VCPU_vmx_cr2(%ebx),%eax
+ movl VCPU_hvm_guest_cr2(%ebx),%eax
movl %eax,%cr2
call vmx_trace_vmentry

diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/vmx/x86_64/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_64/exits.S Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S Wed Aug 08 12:27:23 2007 +0100
@@ -88,7 +88,7 @@ ENTRY(vmx_asm_do_vmentry)
jnz vmx_process_softirqs

call vmx_intr_assist
- movq VCPU_vmx_cr2(%rbx),%rax
+ movq VCPU_hvm_guest_cr2(%rbx),%rax
movq %rax,%cr2
call vmx_trace_vmentry

diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/mm.c Wed Aug 08 12:27:23 2007 +0100
@@ -394,8 +394,8 @@ void write_ptbase(struct vcpu *v)
write_cr3(v->arch.cr3);
}

-/* Should be called after CR3 is updated.
- * Updates vcpu->arch.cr3 and, for HVM guests, vcpu->arch.hvm_vcpu.cpu_cr3.
+/*
+ * Should be called after CR3 is updated.
*
* Uses values found in vcpu->arch.(guest_table and guest_table_user), and
* for HVM guests, arch.monitor_table and hvm's guest CR3.
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/mm/hap/guest_walk.c
--- a/xen/arch/x86/mm/hap/guest_walk.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/mm/hap/guest_walk.c Wed Aug 08 12:27:23 2007 +0100
@@ -62,7 +62,7 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)(
struct vcpu *v, unsigned long gva)
{
- unsigned long gcr3 = hvm_get_guest_ctrl_reg(v, 3);
+ unsigned long gcr3 = v->arch.hvm_vcpu.guest_cr[3];
int mode = GUEST_PAGING_LEVELS;
int lev, index;
paddr_t gpa = 0;
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Wed Aug 08 12:27:23 2007 +0100
@@ -603,47 +603,36 @@ static int hap_invlpg(struct vcpu *v, un
return 0;
}

-/*
- * HAP guests do not need to take any action on CR3 writes (they are still
- * intercepted, so that Xen's copy of the guest's CR3 can be kept in sync.)
- */
static void hap_update_cr3(struct vcpu *v, int do_locking)
{
+ v->arch.hvm_vcpu.hw_cr[3] = v->arch.hvm_vcpu.guest_cr[3];
+ hvm_update_guest_cr(v, 3);
}

static void hap_update_paging_modes(struct vcpu *v)
{
- struct domain *d;
-
- d = v->domain;
+ struct domain *d = v->domain;
+
hap_lock(d);

- /* update guest paging mode. Note that we rely on hvm functions to detect
- * guest's paging mode. So, make sure the shadow registers (CR0, CR4, EFER)
- * reflect guest's status correctly.
- */
- if ( hvm_paging_enabled(v) )
- {
- if ( hvm_long_mode_enabled(v) )
- v->arch.paging.mode = &hap_paging_long_mode;
- else if ( hvm_pae_enabled(v) )
- v->arch.paging.mode = &hap_paging_pae_mode;
- else
- v->arch.paging.mode = &hap_paging_protected_mode;
- }
- else
- {
- v->arch.paging.mode = &hap_paging_real_mode;
- }
-
- v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
+ v->arch.paging.mode =
+ !hvm_paging_enabled(v) ? &hap_paging_real_mode :
+ hvm_long_mode_enabled(v) ? &hap_paging_long_mode :
+ hvm_pae_enabled(v) ? &hap_paging_pae_mode :
+ &hap_paging_protected_mode;
+
+ v->arch.paging.translate_enabled = hvm_paging_enabled(v);

if ( pagetable_is_null(v->arch.monitor_table) )
{
mfn_t mmfn = hap_make_monitor_table(v);
v->arch.monitor_table = pagetable_from_mfn(mmfn);
make_cr3(v, mfn_x(mmfn));
- }
+ hvm_update_host_cr3(v);
+ }
+
+ /* CR3 is effectively updated by a mode change. Flush ASIDs, etc. */
+ hap_update_cr3(v, 0);

hap_unlock(d);
}
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/mm/shadow/common.c Wed Aug 08 12:27:23 2007 +0100
@@ -2266,7 +2266,7 @@ static void sh_update_paging_modes(struc
ASSERT(shadow_mode_translate(d));
ASSERT(shadow_mode_external(d));

- v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
+ v->arch.paging.translate_enabled = hvm_paging_enabled(v);
if ( !v->arch.paging.translate_enabled )
{
/* Set v->arch.guest_table to use the p2m map, and choose
@@ -2347,7 +2347,7 @@ static void sh_update_paging_modes(struc
SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d g=%u s=%u "
"(was g=%u s=%u)\n",
d->domain_id, v->vcpu_id,
- is_hvm_domain(d) ? !!hvm_paging_enabled(v) : 1,
+ is_hvm_domain(d) ? hvm_paging_enabled(v) : 1,
v->arch.paging.mode->guest_levels,
v->arch.paging.mode->shadow.shadow_levels,
old_mode ? old_mode->guest_levels : 0,
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c Wed Aug 08 12:27:23 2007 +0100
@@ -175,7 +175,7 @@ guest_supports_superpages(struct vcpu *v
/* The _PAGE_PSE bit must be honoured in HVM guests, whenever
* CR4.PSE is set or the guest is in PAE or long mode */
return (is_hvm_vcpu(v) && (GUEST_PAGING_LEVELS != 2
- || (hvm_get_guest_ctrl_reg(v, 4) & X86_CR4_PSE)));
+ || (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PSE)));
}

static inline int
@@ -3483,7 +3483,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
* Paravirtual guests should set v->arch.guest_table (and guest_table_user,
* if appropriate).
* HVM guests should also make sure hvm_get_guest_cntl_reg(v, 3) works;
- * this function will call hvm_update_guest_cr3() to tell them where the
+ * this function will call hvm_update_guest_cr(v, 3) to tell them where the
* shadow tables are.
* If do_locking != 0, assume we are being called from outside the
* shadow code, and must take and release the shadow lock; otherwise
@@ -3525,7 +3525,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
// Is paging enabled on this vcpu?
if ( paging_vcpu_mode_translate(v) )
{
- gfn = _gfn(paddr_to_pfn(hvm_get_guest_ctrl_reg(v, 3)));
+ gfn = _gfn(paddr_to_pfn(v->arch.hvm_vcpu.guest_cr[3]));
gmfn = vcpu_gfn_to_mfn(v, gfn);
ASSERT(mfn_valid(gmfn));
ASSERT(pagetable_get_pfn(v->arch.guest_table) == mfn_x(gmfn));
@@ -3576,11 +3576,11 @@ sh_update_cr3(struct vcpu *v, int do_loc

if ( shadow_mode_external(d) && paging_vcpu_mode_translate(v) )
/* Paging enabled: find where in the page the l3 table is */
- guest_idx = guest_index((void *)hvm_get_guest_ctrl_reg(v, 3));
- else
- /* Paging disabled or PV: l3 is at the start of a page */
- guest_idx = 0;
-
+ guest_idx = guest_index((void *)v->arch.hvm_vcpu.guest_cr[3]);
+ else
+ /* Paging disabled or PV: l3 is at the start of a page */
+ guest_idx = 0;
+
// Ignore the low 2 bits of guest_idx -- they are really just
// cache control.
guest_idx &= ~3;
@@ -3718,18 +3718,21 @@ sh_update_cr3(struct vcpu *v, int do_loc


///
- /// v->arch.hvm_vcpu.hw_cr3
+ /// v->arch.hvm_vcpu.hw_cr[3]
///
if ( shadow_mode_external(d) )
{
ASSERT(is_hvm_domain(d));
#if SHADOW_PAGING_LEVELS == 3
/* 2-on-3 or 3-on-3: Use the PAE shadow l3 table we just fabricated */
- hvm_update_guest_cr3(v, virt_to_maddr(&v->arch.paging.shadow.l3table));
+ v->arch.hvm_vcpu.hw_cr[3] =
+ virt_to_maddr(&v->arch.paging.shadow.l3table);
#else
/* 2-on-2 or 4-on-4: Just use the shadow top-level directly */
- hvm_update_guest_cr3(v, pagetable_get_paddr(v->arch.shadow_table[0]));
-#endif
+ v->arch.hvm_vcpu.hw_cr[3] =
+ pagetable_get_paddr(v->arch.shadow_table[0]);
+#endif
+ hvm_update_guest_cr(v, 3);
}

/* Fix up the linear pagetable mappings */
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/x86_32/asm-offsets.c
--- a/xen/arch/x86/x86_32/asm-offsets.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/x86_32/asm-offsets.c Wed Aug 08 12:27:23 2007 +0100
@@ -85,7 +85,7 @@ void __dummy__(void)
BLANK();

OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
- OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
+ OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
BLANK();

OFFSET(VMCB_rax, struct vmcb_struct, rax);
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/x86_64/asm-offsets.c
--- a/xen/arch/x86/x86_64/asm-offsets.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/x86_64/asm-offsets.c Wed Aug 08 12:27:23 2007 +0100
@@ -88,7 +88,7 @@ void __dummy__(void)
BLANK();

OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
- OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
+ OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
BLANK();

OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h Wed Aug 08 12:27:23 2007 +0100
@@ -95,36 +95,26 @@ struct hvm_function_table {

/*
* Examine specifics of the guest state:
- * 1) determine whether paging is enabled,
- * 2) determine whether long mode is enabled,
- * 3) determine whether PAE paging is enabled,
- * 4) determine whether NX is enabled,
- * 5) determine whether interrupts are enabled or not,
- * 6) determine the mode the guest is running in,
- * 7) return the current guest control-register value
- * 8) return the current guest segment descriptor base
- * 9) return the current guest segment descriptor
- */
- int (*paging_enabled)(struct vcpu *v);
- int (*long_mode_enabled)(struct vcpu *v);
- int (*pae_enabled)(struct vcpu *v);
- int (*nx_enabled)(struct vcpu *v);
+ * 1) determine whether interrupts are enabled or not
+ * 2) determine the mode the guest is running in
+ * 3) return the current guest segment descriptor base
+ * 4) return the current guest segment descriptor
+ */
int (*interrupts_enabled)(struct vcpu *v, enum hvm_intack);
int (*guest_x86_mode)(struct vcpu *v);
- unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg);

/*
- * Re-set the value of CR3 that Xen runs on when handling VM exits
+ * Re-set the value of CR3 that Xen runs on when handling VM exits.
*/
void (*update_host_cr3)(struct vcpu *v);

/*
- * Called to inform HVM layer that a guest cr3 has changed
- */
- void (*update_guest_cr3)(struct vcpu *v);
+ * Called to inform HVM layer that a guest control register has changed.
+ */
+ void (*update_guest_cr)(struct vcpu *v, unsigned int cr);

/*
* Called to ensure than all guest-specific mappings in a tagged TLB
@@ -189,38 +179,24 @@ void hvm_set_guest_time(struct vcpu *v,
void hvm_set_guest_time(struct vcpu *v, u64 gtime);
u64 hvm_get_guest_time(struct vcpu *v);

-static inline int
-hvm_paging_enabled(struct vcpu *v)
-{
- return hvm_funcs.paging_enabled(v);
-}
+#define hvm_paging_enabled(v) \
+ (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG))
+#define hvm_pae_enabled(v) \
+ (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
+#define hvm_nx_enabled(v) \
+ (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))

#ifdef __x86_64__
-static inline int
-hvm_long_mode_enabled(struct vcpu *v)
-{
- return hvm_funcs.long_mode_enabled(v);
-}
+#define hvm_long_mode_enabled(v) \
+ ((v)->arch.hvm_vcpu.guest_efer & EFER_LMA)
#else
#define hvm_long_mode_enabled(v) (v,0)
#endif

static inline int
-hvm_pae_enabled(struct vcpu *v)
-{
- return hvm_funcs.pae_enabled(v);
-}
-
-static inline int
hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
{
return hvm_funcs.interrupts_enabled(v, type);
-}
-
-static inline int
-hvm_nx_enabled(struct vcpu *v)
-{
- return hvm_funcs.nx_enabled(v);
}

static inline int
@@ -244,7 +220,10 @@ hvm_update_vtpr(struct vcpu *v, unsigned
hvm_funcs.update_vtpr(v, value);
}

-void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
+static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
+{
+ hvm_funcs.update_guest_cr(v, cr);
+}

static inline void
hvm_flush_guest_tlbs(void)
@@ -255,12 +234,6 @@ hvm_flush_guest_tlbs(void)

void hvm_hypercall_page_initialise(struct domain *d,
void *hypercall_page);
-
-static inline unsigned long
-hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
-{
- return hvm_funcs.get_guest_ctrl_reg(v, num);
-}

static inline unsigned long
hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/support.h Wed Aug 08 12:27:23 2007 +0100
@@ -234,4 +234,7 @@ void hvm_hlt(unsigned long rflags);
void hvm_hlt(unsigned long rflags);
void hvm_triple_fault(void);

+int hvm_set_cr3(unsigned long value);
+int hvm_set_cr4(unsigned long value);
+
#endif /* __ASM_X86_HVM_SUPPORT_H__ */
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/svm/asid.h
--- a/xen/include/asm-x86/hvm/svm/asid.h Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/svm/asid.h Wed Aug 08 12:27:23 2007 +0100
@@ -32,20 +32,6 @@ void svm_asid_inv_asid(struct vcpu *v);
void svm_asid_inv_asid(struct vcpu *v);
void svm_asid_inc_generation(void);

-/*
- * ASID related, guest triggered events.
- */
-
-static inline void svm_asid_g_update_paging(struct vcpu *v)
-{
- svm_asid_inv_asid(v);
-}
-
-static inline void svm_asid_g_mov_to_cr3(struct vcpu *v)
-{
- svm_asid_inv_asid(v);
-}
-
static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
{
#if 0
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h Wed Aug 08 12:27:23 2007 +0100
@@ -440,11 +440,6 @@ struct arch_svm_struct {
u32 *msrpm;
int launch_core;
bool_t vmcb_in_sync; /* VMCB sync'ed with VMSAVE? */
- unsigned long cpu_shadow_cr0; /* Guest value for CR0 */
- unsigned long cpu_shadow_cr4; /* Guest value for CR4 */
- unsigned long cpu_shadow_efer; /* Guest value for EFER */
- unsigned long cpu_cr2;
- unsigned long cpu_cr3;
};

struct vmcb_struct *alloc_vmcb(void);
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/vcpu.h Wed Aug 08 12:27:23 2007 +0100
@@ -29,7 +29,17 @@
#define HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI 1

struct hvm_vcpu {
- unsigned long hw_cr3; /* value we give to HW to use */
+ /* Guest control-register and EFER values, just as the guest sees them. */
+ unsigned long guest_cr[5];
+ unsigned long guest_efer;
+
+ /*
+ * Processor-visible CR0-4 while guest executes.
+ * Only CR3 is guaranteed to be valid: all other array entries are private
+ * to the specific HVM implementation (e.g., VMX, SVM).
+ */
+ unsigned long hw_cr[5];
+
struct hvm_io_op io_op;
struct vlapic vlapic;
s64 cache_tsc_offset;
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Aug 08 12:27:23 2007 +0100
@@ -67,17 +67,11 @@ struct arch_vmx_struct {
/* Cache of cpu execution control. */
u32 exec_control;

- unsigned long cpu_cr0; /* copy of guest CR0 */
- unsigned long cpu_shadow_cr0; /* copy of guest read shadow CR0 */
- unsigned long cpu_shadow_cr4; /* copy of guest read shadow CR4 */
- unsigned long cpu_cr2; /* save CR2 */
- unsigned long cpu_cr3;
#ifdef __x86_64__
struct vmx_msr_state msr_state;
unsigned long shadow_gs;
unsigned long cstar;
#endif
- unsigned long efer;

/* Following fields are all specific to vmxassist. */
unsigned long vmxassist_enabled:1;
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Wed Aug 08 12:27:23 2007 +0100
@@ -279,8 +279,8 @@ static inline void __vmx_inject_exceptio

__vmwrite(VM_ENTRY_INTR_INFO, intr_fields);

- if (trap == TRAP_page_fault)
- HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vmx.cpu_cr2, error_code);
+ if ( trap == TRAP_page_fault )
+ HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
else
HVMTRACE_2D(INJ_EXC, v, trap, error_code);
}

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog