aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoriap10@freefall.cl.cam.ac.uk <iap10@freefall.cl.cam.ac.uk>2005-06-21 07:02:30 +0000
committeriap10@freefall.cl.cam.ac.uk <iap10@freefall.cl.cam.ac.uk>2005-06-21 07:02:30 +0000
commit22e32f8c3c64f489370ec47dbec54e4b43fbb7cb (patch)
tree3a01cde6054b87a4f61789610a3b44bc605f869e
parent0338f79f59e6ab68f5d3c37fa7a1108f0bc70918 (diff)
parent677f37671b794a0f0e39fad4ecc6679e4f738809 (diff)
downloadxen-22e32f8c3c64f489370ec47dbec54e4b43fbb7cb.tar.gz
xen-22e32f8c3c64f489370ec47dbec54e4b43fbb7cb.tar.bz2
xen-22e32f8c3c64f489370ec47dbec54e4b43fbb7cb.zip
bitkeeper revision 1.1720 (42b7bb86ag6KD5OEx2v6YdSnS1BhGQ)
Merge freefall.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk into freefall.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bk
-rw-r--r--.rootkeys31
-rw-r--r--BitKeeper/etc/logging_ok1
-rw-r--r--Config.mk3
-rw-r--r--buildconfigs/Rules.mk3
-rw-r--r--docs/misc/shype4xen_readme.txt580
-rw-r--r--linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c10
-rw-r--r--tools/Makefile1
-rw-r--r--tools/libxc/xc.h2
-rw-r--r--tools/libxc/xc_domain.c3
-rw-r--r--tools/misc/policyprocessor/SecurityLabel.java34
-rw-r--r--tools/misc/policyprocessor/SecurityPolicySpec.xsd115
-rw-r--r--tools/misc/policyprocessor/SsidsEntry.java29
-rw-r--r--tools/misc/policyprocessor/XmlToBin.java1588
-rw-r--r--tools/misc/policyprocessor/XmlToBinInterface.java135
-rw-r--r--tools/misc/policyprocessor/myHandler.java47
-rw-r--r--tools/misc/policyprocessor/readme.install33
-rw-r--r--tools/misc/policyprocessor/readme.xen65
-rw-r--r--tools/misc/policyprocessor/xen_sample_def.xml46
-rw-r--r--tools/misc/policyprocessor/xen_sample_policy.xml58
-rw-r--r--tools/policy/Makefile36
-rw-r--r--tools/policy/policy_tool.c557
-rw-r--r--tools/python/xen/lowlevel/xc/xc.c10
-rw-r--r--tools/python/xen/lowlevel/xs/xs.c76
-rw-r--r--tools/python/xen/xend/XendDomainInfo.py20
-rw-r--r--tools/python/xen/xend/image.py6
-rw-r--r--tools/python/xen/xend/server/SrvDomainDir.py1
-rwxr-xr-xtools/python/xen/xend/server/blkif.py5
-rwxr-xr-xtools/python/xen/xend/server/netif.py2
-rw-r--r--tools/python/xen/xend/xenstore/xsnode.py2
-rw-r--r--tools/python/xen/xm/create.py7
-rw-r--r--tools/python/xen/xm/main.py10
-rw-r--r--tools/python/xen/xm/opts.py7
-rw-r--r--xen/Makefile4
-rw-r--r--xen/Rules.mk1
-rw-r--r--xen/acm/Makefile15
-rw-r--r--xen/acm/acm_chinesewall_hooks.c503
-rw-r--r--xen/acm/acm_core.c205
-rw-r--r--xen/acm/acm_null_hooks.c76
-rw-r--r--xen/acm/acm_policy.c197
-rw-r--r--xen/acm/acm_simple_type_enforcement_hooks.c638
-rw-r--r--xen/arch/ia64/Makefile2
-rw-r--r--xen/arch/ia64/asm-offsets.c3
-rw-r--r--xen/arch/ia64/dom0_ops.c58
-rw-r--r--xen/arch/ia64/domain.c226
-rw-r--r--xen/arch/ia64/hypercall.c27
-rw-r--r--xen/arch/ia64/hyperprivop.S54
-rw-r--r--xen/arch/ia64/ivt.S49
-rw-r--r--xen/arch/ia64/patch/linux-2.6.11/io.h2
-rw-r--r--xen/arch/ia64/patch/linux-2.6.11/ptrace.h8
-rw-r--r--xen/arch/ia64/patch/linux-2.6.11/uaccess.h22
-rw-r--r--xen/arch/ia64/privop.c53
-rw-r--r--xen/arch/ia64/process.c50
-rw-r--r--xen/arch/ia64/regionreg.c10
-rw-r--r--xen/arch/ia64/tools/mkbuildtree2
-rw-r--r--xen/arch/ia64/vcpu.c22
-rw-r--r--xen/arch/ia64/vhpt.c31
-rw-r--r--xen/arch/ia64/vmmu.c76
-rw-r--r--xen/arch/ia64/vmx_ivt.S84
-rw-r--r--xen/arch/ia64/vmx_minstate.h8
-rw-r--r--xen/arch/ia64/vmx_process.c1
-rw-r--r--xen/arch/ia64/vtlb.c96
-rw-r--r--xen/arch/ia64/xenmem.c2
-rw-r--r--xen/arch/ia64/xenmisc.c8
-rw-r--r--xen/arch/x86/cpu/amd.c29
-rw-r--r--xen/arch/x86/cpu/common.c49
-rw-r--r--xen/arch/x86/cpu/cpu.h1
-rw-r--r--xen/arch/x86/cpu/intel.c23
-rw-r--r--xen/arch/x86/dom0_ops.c4
-rw-r--r--xen/arch/x86/setup.c7
-rw-r--r--xen/arch/x86/smpboot.c32
-rw-r--r--xen/arch/x86/x86_32/entry.S1
-rw-r--r--xen/common/Makefile4
-rw-r--r--xen/common/dom0_ops.c19
-rw-r--r--xen/common/event_channel.c4
-rw-r--r--xen/common/grant_table.c6
-rw-r--r--xen/common/kernel.c41
-rw-r--r--xen/common/policy_ops.c117
-rw-r--r--xen/include/acm/acm_core.h117
-rw-r--r--xen/include/acm/acm_endian.h88
-rw-r--r--xen/include/acm/acm_hooks.h337
-rw-r--r--xen/include/asm-ia64/config.h5
-rw-r--r--xen/include/asm-ia64/domain.h15
-rw-r--r--xen/include/asm-ia64/event.h16
-rw-r--r--xen/include/asm-ia64/mm.h140
-rw-r--r--xen/include/asm-ia64/tlb.h10
-rw-r--r--xen/include/asm-ia64/vcpu.h4
-rw-r--r--xen/include/asm-ia64/vhpt.h17
-rw-r--r--xen/include/asm-ia64/vmmu.h31
-rw-r--r--xen/include/asm-ia64/vmx_platform.h2
-rw-r--r--xen/include/asm-ia64/vmx_ptrace.h97
-rw-r--r--xen/include/asm-ia64/vmx_vpd.h1
-rw-r--r--xen/include/asm-x86/event.h16
-rw-r--r--xen/include/asm-x86/processor.h1
-rw-r--r--xen/include/asm-x86/smp.h2
-rw-r--r--xen/include/public/acm.h161
-rw-r--r--xen/include/public/acm_dom0_setup.h34
-rw-r--r--xen/include/public/arch-ia64.h138
-rw-r--r--xen/include/public/arch-x86_32.h3
-rw-r--r--xen/include/public/arch-x86_64.h3
-rw-r--r--xen/include/public/dom0_ops.h3
-rw-r--r--xen/include/public/policy_ops.h74
-rw-r--r--xen/include/public/version.h30
-rw-r--r--xen/include/public/xen.h4
-rw-r--r--xen/include/xen/event.h2
-rw-r--r--xen/include/xen/sched.h2
-rw-r--r--xen/include/xen/smp.h2
-rw-r--r--xen/include/xen/string.h5
107 files changed, 7147 insertions, 606 deletions
diff --git a/.rootkeys b/.rootkeys
index d2c3de4b04..f88922acce 100644
--- a/.rootkeys
+++ b/.rootkeys
@@ -21,6 +21,7 @@
420b949cy9ZGzED74Fz_DaWlK7tT4g docs/misc/crashdb.txt
4251a1f82AexscYEiF4Iku8Gc_kWfQ docs/misc/grant-tables.txt
424d462b5GuApQ_NyMsRFt9LbrsWow docs/misc/sedf_scheduler_mini-HOWTO.txt
+42b7434c-M2l4Og0klGf6xSAARqa2w docs/misc/shype4xen_readme.txt
40d6ccbfKKBq8jE0ula4eHEzBiQuDA docs/misc/xen_config.html
410a4c2bAO_m_l4RsiiPHnZ4ixHWbQ docs/misc/xend.tex
3f9e7d564bWFB-Czjv1qdmE6o0GqNg docs/src/interface.tex
@@ -777,6 +778,16 @@
40c9c469kT0H9COWzA4XzPBjWK0WsA tools/misc/netfix
4022a73cEKvrYe_DVZW2JlAxobg9wg tools/misc/nsplitd/Makefile
4022a73cKms4Oq030x2JBzUB426lAQ tools/misc/nsplitd/nsplitd.c
+42b74436oXEaaUH_dPcGFviMiwNgCQ tools/misc/policyprocessor/SecurityLabel.java
+42b74436fIW8ZI3pUpu13-Ox6G2cOA tools/misc/policyprocessor/SecurityPolicySpec.xsd
+42b74436T4CN4HMWsuaHD2zS8jY1BA tools/misc/policyprocessor/SsidsEntry.java
+42b74436Dk3WKJl6-SyP3LEBo3DXkQ tools/misc/policyprocessor/XmlToBin.java
+42b74436ABj4SOVBWqY_IEIboFUkeA tools/misc/policyprocessor/XmlToBinInterface.java
+42b7443684kBOrEBKFod4fGvnJ-rdA tools/misc/policyprocessor/myHandler.java
+42b74436JjvZmOp2DfMb-TnpGZXQ8w tools/misc/policyprocessor/readme.install
+42b74436-0Ig0yb-w1BYyCAFVTwqUg tools/misc/policyprocessor/readme.xen
+42b74436WAJ6lmTO3foadk2527PFBQ tools/misc/policyprocessor/xen_sample_def.xml
+42b744365VrTALmqRroQOBZ9EopUsw tools/misc/policyprocessor/xen_sample_policy.xml
42308df9dv_ZuP49nNPIROEMQ3F_LA tools/misc/xc_shadow.c
3f5ef5a2ir1kVAthS14Dc5QIRCEFWg tools/misc/xen-clone
3f5ef5a2dTZP0nnsFoeq2jRf3mWDDg tools/misc/xen-clone.README
@@ -785,6 +796,8 @@
41adc641dV-0cDLSyzMs5BT8nL7v3Q tools/misc/xenperf.c
4056f5155QYZdsk-1fLdjsZPFTnlhg tools/misc/xensymoops
40cf2937dqM1jWW87O5OoOYND8leuA tools/misc/xm
+42b742f6JFcp6LFpYu-B4AEsfQwSFw tools/policy/Makefile
+42b742f66XOdRMrwaHvbCdSSQyCrFw tools/policy/policy_tool.c
4270cc81g3nSNYCZ1ryCMDEbLtMtbQ tools/pygrub/Makefile
4270deeccyRsJn6jLnRh9odRtMW9SA tools/pygrub/README
4270cc81EIl7NyaS3Av6IPRk2c2a6Q tools/pygrub/setup.py
@@ -1101,6 +1114,12 @@
3f72f1bdJPsV3JCnBqs9ddL9tr6D2g xen/COPYING
3ddb79bcbOVHh38VJzc97-JEGD4dJQ xen/Makefile
3ddb79bcWnTwYsQRWl_PaneJfa6p0w xen/Rules.mk
+42b742f6XHTfIEm_hUPtzjKr37LVhw xen/acm/Makefile
+42b742f6tHzn0fZWH3TjPva8gbqpow xen/acm/acm_chinesewall_hooks.c
+42b742f6bM8kZwuIUbepHZ8SQQkjJA xen/acm/acm_core.c
+42b742f6cwfrPubqH47gQpke8xkYSA xen/acm/acm_null_hooks.c
+42b742f69qSxm5MM-wtPaWtCqyI3KA xen/acm/acm_policy.c
+42b742f6VbmdlwekQRMhXugjcu9QXg xen/acm/acm_simple_type_enforcement_hooks.c
421098b25A0RvuYN3rP28ga3_FN3_Q xen/arch/ia64/Makefile
421098b2okIeYXS9w9avmSozls61xA xen/arch/ia64/Rules.mk
421098b21p12UcKjHBrLh_LjlvNEwA xen/arch/ia64/acpi.c
@@ -1146,6 +1165,7 @@
425ae516juUB257qrwUdsL9AsswrqQ xen/arch/ia64/patch/linux-2.6.11/time.c
425ae5167zQn7zYcgKtDUDX2v-e8mw xen/arch/ia64/patch/linux-2.6.11/tlb.c
425ae5162bIl2Dgd19x-FceB4L9oGw xen/arch/ia64/patch/linux-2.6.11/types.h
+42ae01f01KDfSgVQnscwJ0psRmEaCw xen/arch/ia64/patch/linux-2.6.11/uaccess.h
425ae516cFUNY2jHD46bujcF5NJheA xen/arch/ia64/patch/linux-2.6.11/unaligned.c
421098b39QFMC-1t1r38CA7NxAYBPA xen/arch/ia64/patch/linux-2.6.7/bootmem.h
421098b3SIA1vZX9fFUjo1T3o_jMCQ xen/arch/ia64/patch/linux-2.6.7/current.h
@@ -1323,6 +1343,7 @@
41a61536SZbR6cj1ukWTb0DYU-vz9w xen/common/multicall.c
3ddb79bdD4SLmmdMD7yLW5HcUWucXw xen/common/page_alloc.c
3e54c38dkHAev597bPr71-hGzTdocg xen/common/perfc.c
+42b742f6mgq9puEr7lUrLST0VEpsig xen/common/policy_ops.c
40589968dD2D1aejwSOvrROg7fOvGQ xen/common/sched_bvt.c
41ebbfe9oF1BF3cH5v7yE3eOL9uPbA xen/common/sched_sedf.c
3e397e6619PgAfBbw2XFbXkewvUWgw xen/common/schedule.c
@@ -1338,6 +1359,9 @@
4049e6bfNSIq7s7OV-Bd69QD0RpR2Q xen/drivers/char/console.c
4298e018XQtZkCdufpyFimOGZqqsFA xen/drivers/char/ns16550.c
3e4a8cb7nMChlro4wvOBo76n__iCFA xen/drivers/char/serial.c
+42b742f6OteAMPWnoqxqfRX3yxD0yw xen/include/acm/acm_core.h
+42b742f6XfIijctEwA0YWL2BoWtDNg xen/include/acm/acm_endian.h
+42b742f6jXvp1vdbU2v2WJjTPku65A xen/include/acm/acm_hooks.h
40715b2cFpte_UNWnBZW0Du7z9AhTQ xen/include/acpi/acconfig.h
40715b2ctNvVZ058w8eM8DR9hOat_A xen/include/acpi/acexcep.h
40715b2com8I01qcHcAw47e93XsCqQ xen/include/acpi/acglobal.h
@@ -1364,6 +1388,7 @@
421098b6ZcIrn_gdqjUtdJyCE0YkZQ xen/include/asm-ia64/debugger.h
421098b6z0zSuW1rcSJK1gR8RUi-fw xen/include/asm-ia64/dom_fw.h
421098b6Nn0I7hGB8Mkd1Cis0KMkhA xen/include/asm-ia64/domain.h
+42b1d2d0rkNCmG2nFOnL-OfhJG9mDw xen/include/asm-ia64/event.h
4241e880hAyo_dk0PPDYj3LsMIvf-Q xen/include/asm-ia64/flushtlb.h
421098b6X3Fs2yht42TE2ufgKqt2Fw xen/include/asm-ia64/ia64_int.h
421098b7psFAn8kbeR-vcRCdc860Vw xen/include/asm-ia64/init.h
@@ -1388,7 +1413,6 @@
428b9f38is0zTsIm96_BKo4MLw0SzQ xen/include/asm-ia64/vmx_pal_vsa.h
428b9f38iDqbugHUheJrcTCD7zlb4g xen/include/asm-ia64/vmx_phy_mode.h
428b9f38grd_B0AGB1yp0Gi2befHaQ xen/include/asm-ia64/vmx_platform.h
-428b9f38lm0ntDBusHggeQXkx1-1HQ xen/include/asm-ia64/vmx_ptrace.h
428b9f38XgwHchZEpOzRtWfz0agFNQ xen/include/asm-ia64/vmx_vcpu.h
428b9f38tDTTJbkoONcAB9ODP8CiVg xen/include/asm-ia64/vmx_vpd.h
428b9f38_o0U5uJqmxZf_bqi6_PqVw xen/include/asm-ia64/vtm.h
@@ -1412,6 +1436,7 @@
40715b2dTokMLYGSuD58BnxOqyWVew xen/include/asm-x86/div64.h
4204e7acwzqgXyTAPKa1nM-L7Ec0Qw xen/include/asm-x86/domain.h
41d3eaaeIBzW621S1oa0c2yk7X43qQ xen/include/asm-x86/e820.h
+42b1d2caFkOByU5n4LuMnT05f3kJFg xen/include/asm-x86/event.h
3ddb79c3NU8Zy40OTrq3D-i30Y3t4A xen/include/asm-x86/fixmap.h
3e2d29944GI24gf7vOP_7x8EyuqxeA xen/include/asm-x86/flushtlb.h
4294b5eep4lWuDtYUR74gYwt-_FnHA xen/include/asm-x86/genapic.h
@@ -1487,6 +1512,8 @@
404f1bc4tWkB9Qr8RkKtZGW5eMQzhw xen/include/asm-x86/x86_64/uaccess.h
422f27c8RHFkePhD34VIEpMMqofZcA xen/include/asm-x86/x86_emulate.h
400304fcmRQmDdFYEzDh0wcBba9alg xen/include/public/COPYING
+42b742f6duiOTlZvysQkRYZHYBXqvg xen/include/public/acm.h
+42b742f7TIMsQgUaNDJXp3QlBve2SQ xen/include/public/acm_dom0_setup.h
421098b7OKb9YH_EUA_UpCxBjaqtgA xen/include/public/arch-ia64.h
404f1bc68SXxmv0zQpXBWGrCzSyp8w xen/include/public/arch-x86_32.h
404f1bc7IwU-qnH8mJeVu0YsNGMrcw xen/include/public/arch-x86_64.h
@@ -1500,8 +1527,10 @@
41d40e9b8zCk5VDqhVbuQyhc7G3lqA xen/include/public/io/ring.h
41ee5e8c6mLxIx82KPsbpt_uts_vSA xen/include/public/io/usbif.h
4051db79512nOCGweabrFWO2M2h5ng xen/include/public/physdev.h
+42b742f7Lzy8SKKG25L_-fgk5FHA2Q xen/include/public/policy_ops.h
40589968wmhPmV5-ENbBYmMjnedgKw xen/include/public/sched_ctl.h
404f3d2eR2Owk-ZcGOx9ULGHg3nrww xen/include/public/trace.h
+42b5a5f2QC1IxeuwCwwsOEhvcJ2BJg xen/include/public/version.h
4266bd01Ul-pC01ZVvBkhBnv5eqzvw xen/include/public/vmx_assist.h
3ddb79c25UE59iu4JJcbRalx95mvcg xen/include/public/xen.h
3e397e66m2tO3s-J8Jnr7Ws_tGoPTg xen/include/xen/ac_timer.h
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index 2443869070..87c83d88c0 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -39,6 +39,7 @@ iap10@labyrinth.cl.cam.ac.uk
iap10@nidd.cl.cam.ac.uk
iap10@pb001.cl.cam.ac.uk
iap10@pb007.cl.cam.ac.uk
+iap10@spot.cl.cam.ac.uk
iap10@striker.cl.cam.ac.uk
iap10@tetris.cl.cam.ac.uk
jrb44@plym.cl.cam.ac.uk
diff --git a/Config.mk b/Config.mk
index b3320b2711..390a9b3abe 100644
--- a/Config.mk
+++ b/Config.mk
@@ -31,3 +31,6 @@ endif
LDFLAGS += $(foreach i, $(EXTRA_LIB), -L$(i))
CFLAGS += $(foreach i, $(EXTRA_INCLUDES), -I$(i))
+
+# Choose the best mirror to download linux kernel
+KERNEL_REPO = http://www.kernel.org
diff --git a/buildconfigs/Rules.mk b/buildconfigs/Rules.mk
index fe02b1e11b..f2e65f0046 100644
--- a/buildconfigs/Rules.mk
+++ b/buildconfigs/Rules.mk
@@ -27,7 +27,7 @@ vpath linux-%.tar.bz2 $(LINUX_SRC_PATH)
linux-%.tar.bz2: override _LINUX_VDIR = $(word 1,$(subst ., ,$*)).$(word 2,$(subst ., ,$*))
linux-%.tar.bz2:
@echo "Cannot find $@ in path $(LINUX_SRC_PATH)"
- wget http://www.kernel.org/pub/linux/kernel/v$(_LINUX_VDIR)/$@ -O./$@
+ wget $(KERNEL_REPO)/pub/linux/kernel/v$(_LINUX_VDIR)/$@ -O./$@
# Expand NetBSD release to NetBSD version
NETBSD_RELEASE ?= 2.0
@@ -57,6 +57,7 @@ $(patsubst %,pristine-%/.valid-pristine,$(ALLSPARSETREES)) : pristine-%/.valid-p
mkdir -p tmp-pristine-$*
touch tmp-pristine-$*/.bk_skip
tar -C tmp-pristine-$* -jxf $<
+ -@rm tmp-pristine-$*/pax_global_header
mv tmp-pristine-$*/* $(@D)
@rm -rf tmp-pristine-$*
touch $@ # update timestamp to avoid rebuild
diff --git a/docs/misc/shype4xen_readme.txt b/docs/misc/shype4xen_readme.txt
new file mode 100644
index 0000000000..e2c09a0153
--- /dev/null
+++ b/docs/misc/shype4xen_readme.txt
@@ -0,0 +1,580 @@
+Copyright: IBM Corporation (C)
+20 June 2005
+Author: Reiner Sailer
+
+This document is a very short introduction into the sHype access control
+security architecture implementation and how it is perceived by users. It
+is a very preliminary draft for the courageous ones to get "their feet wet"
+and to be able to give feedback (via the xen-devel/xense-devel mailing lists).
+
+Install:
+
+cd into xeno-unstable.bk
+(use --dry-run option if you want to test the patch only)
+patch -p1 -g0 < *tools.diff
+patch -p1 -g0 < *xen.diff
+
+(no rejects, probably some line offsets)
+
+make uninstall; make mrproper; make; ./install.sh should install the default
+sHype into Xen (rebuild your initrd images if necessary). Reboot.
+
+Debug output: there are two triggers for debug output:
+a) General sHype debug:
+ xeno-unstable.bk/xen/include/public/acm.h
+ undefine ACM_DEBUG to switch this debug off
+
+b) sHype enforcement hook trace: This prints a small trace for each enforcement
+hook that is executed. The trigger is in
+ xeno-unstable.bk/xen/include/acm/acm_hooks.h
+ undefine ACM_TRACE_MODE to switch this debug off
+
+1. The default NULL policy
+***************************
+When you apply the patches and startup xen, you should at first not notice any
+difference because the default policy is the "NULL" policy, which as the name
+implies does not enforce anything.
+
+However, when you try
+
+[root@laptop policy]# xm list
+Name Id Mem(MB) CPU State Time(s) Console SSID-REF
+Domain-0 0 620 0 r---- 25.6 default
+
+You might detect a new parameter "SSID-REF" displayed for domains. This
+parameter describes the subject security identifier reference of the domain. It
+is shown as "default" since there is no policy to be enforced.
+
+To display the currently enforced policy, use the policy tool under xeno-
+unstable.bk/tools/policy: policy_tool getpolicy. You should see output like the
+one below.
+
+[root@laptop policy]#./policy_tool getpolicy
+
+Policy dump:
+============
+Magic = 1debc.
+PolVer = aaaa0000.
+Len = 14.
+Primary = NULL policy (c=0, off=14).
+Secondary = NULL policy (c=0, off=14).
+No primary policy (NULL).
+No secondary policy (NULL).
+
+Policy dump End.
+
+Since this is a dump of a binary policy, it's not pretty. The important parts
+are the "Primary" and "Secondary" policy fields set to "NULL policy". sHype
+currently allows to set two independent policies; thus the two SSID-REF parts
+shown in 'xm list'. Right here: primary policy only means this policy is
+checked first, the secondary policy is checked if the primary results in
+"permitted access". The result of the combined policy is "permitted" if both
+policies return permitted (NULL policy always returns permitted). The result is
+"denied" if at least one of the policies returns "denied". Look into xeno-
+unstable.bk/xen/include/acm/acm_hooks.h for the general hook structure
+integrating the policy decisions (if you like, you won't need it for the rest
+of the Readme file).
+
+2. Setting Chinese Wall and Simple Type Enforcement policies:
+*************************************************************
+
+We'll get fast to the point. However, in order to understand what we are doing,
+we must at least understand the purpose of the policies that we are going to
+enforce. The two policies presented here are just examples and the
+implementation encourages adding new policies easily.
+
+2.1. Chinese Wall policy: "decides whether a domain can be started based on
+this domain's ssidref and the ssidrefs of the currently running domains".
+Generally, the Chinese wall policy allows specifying certain types (or classes
+or categories, whatever the preferred word) that conflict; we usually assign a
+type to a workload and the set of types of those workloads running in a domain
+make up the type set for this domain. Each domain is assigned a set of types
+through its SSID-REF (we register Chinese Wall as primary policy, so the
+ssidref used for determining the Chinese Wall types is the one annotated with
+"p:" in xm list) since each SSID-REF points at a set of types. We'll see how
+SSIDREFs are represented in Xen later when we will look at the policy. (A good
+read for Chinese Wall is: Brewer/Nash The Chinese Wall Security Policy 1989.)
+
+So let's assume the Chinese Wall policy we are running distinguishes 10 types:
+t0 ... t9. Let us assume further that each SSID-REF points to a set that
+includes exactly one type (attached to domains that run workloads of a single
+type). SSID-REF 0 points to {t0}, ssidref 1 points to {t1} ... 9 points to
+{t9}. [This is actually the example policy we are going to push into xen later]
+
+Now the Chinese Wall policy allows you to define "Conflict type sets" and it
+guarantees that of any conflict set at most one type is "running" at any time.
+As an example, we have defined 2 conflict set: {t2, t3} and {t0, t5, t6}.
+Specifying these conflict sets, sHype ensures that at most one type of each set
+is running (either t2 or t3 but not both; either t0 or t5 or t6 but not
+multiple of them).
+
+The effect is that administrators can define which workload types cannot run
+simultaneously on a single Xen system. This is useful to limit the covert
+timing channels between such payloads or to ensure that payloads don't
+interfere with each other through existing resource dependencies.
+
+2.2. Simple Type Enforcement (ste) policy: "decides whether two domains can
+share data, e.g., setup event channels or grant tables to each other, based on
+the two domains' ssidref. This, as the name says, is a simple policy. Think of
+each type as of a single color. Each domain has one or more colors, i.e., the
+domains ssid for the ste policy points to a set that has set one or multiple
+types. Let us assume in our example policy we differentiate 5 colors (types)
+and define 5 different ssids referenced by ssidref=0..4. Each ssid shall have
+exactly one type set, i.e., describes a uni-color. Only ssid(0) has all types
+set, i.e., has all defined colors.
+
+Sharing is enforced by the ste policy by requiring that two domains that want
+to establish an event channel or grant pages to each other must have a common
+color. Currently all domains communicate through DOM0 by default; i.e., Domain0
+will necessarily have all colors to be able to create domains (thus, we will
+assign ssidref(0) to Domain0 in our example below.
+
+More complex mandatory access control policies governing sharing will follow;
+such policies are more sophisticated than the "color" scheme above by allowing
+more flexible (and complex :_) access control decisions than "share a color" or
+"don't share a color" and will be able to express finer-grained policies.
+
+
+2.3 Binary Policy:
+In the future, we will have a policy tool that takes as input a more humane
+policy description, using types such as development, home-banking, donated-
+Grid, CorpA-Payload ... and translates the respective policy into what we see
+today as the binary policy using 1s and 0s and sets of them. For now, we must
+live with the binary policy when working with sHype.
+
+
+2.4 Exemplary use of a real sHype policy on Xen. To activate a real policy,
+edit the file (yes, this will soon be a compile option):
+ xeno-unstable.bk/xen/include/public/acm.h
+ Change: #define ACM_USE_SECURITY_POLICY ACM_NULL_POLICY
+ To : #define ACM_USE_SECURITY_POLICY ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY
+ cd xeno-unstable.bk
+ make mrproper
+ make uninstall (manually remove /etc/xen.old if necessary)
+ make
+ ./install.sh (recreate your kernel initrd's if necessary)
+ Reboot into new xen.gz
+
+After booting, check out 'xm dmesg'; should show somewhere in the middle:
+
+(XEN) acm_init: Enforcing Primary CHINESE WALL policy, Secondary SIMPLE TYPE
+ENFORCEMENT policy.
+
+Even though you can activate those policies in any combination and also
+independently, the policy tool currently only supports setting the policy for
+the above combination.
+
+Now look at the minimal startup policy with:
+ xeno-unstable.bk/tools/policytool getpolicy
+
+You should see something like:
+
+[root@laptop policy]# ./policy_tool getpolicy
+
+Policy dump:
+============
+Magic = 1debc.
+PolVer = aaaa0000.
+Len = 36.
+Primary = CHINESE WALL policy (c=1, off=14).
+Secondary = SIMPLE TYPE ENFORCEMENT policy (c=2, off=2c).
+
+
+Chinese Wall policy:
+====================
+Max Types = 1.
+Max Ssidrefs = 1.
+Max ConfSets = 1.
+Ssidrefs Off = 10.
+Conflicts Off = 12.
+Runing T. Off = 14.
+C. Agg. Off = 16.
+
+SSID To CHWALL-Type matrix:
+
+ ssidref 0: 00
+
+Confict Sets:
+
+ c-set 0: 00
+
+Running
+Types: 00
+
+Conflict
+Aggregate Set: 00
+
+
+Simple Type Enforcement policy:
+===============================
+Max Types = 1.
+Max Ssidrefs = 1.
+Ssidrefs Off = 8.
+
+SSID To STE-Type matrix:
+
+ ssidref 0: 01
+
+
+Policy dump End.
+
+This is a minimal policy (of little use), except it will disable starting any
+domain that does not have ssidref set to 0x0. The Chinese Wall policy has
+nothing to enforce and the ste policy only knows one type, which is set for the
+only defined ssidref.
+
+The item that defines the ssidref in a domain configuration is:
+
+ssidref = 0x12345678
+
+Where ssidref is interpreted as a 32bit number, where the lower 16bits become
+the ssidref for the primary policy and the higher 16bits become the ssidref for
+the secondary policy. sHype currently supports two policies but this is an
+implementation decision and can be extended if necessary.
+
+This reference defines the security information of a domain. The meaning of the
+SSID-REF depends on the policy, so we explain it when we explain the real
+policies.
+
+
+Setting a new Security Policy:
+******************************
+The policy tool with all its current limitations has one usable example policy
+compiled-in. Please try at this time to use the setpolicy command:
+ xeno-unstable.bk/tools/policy/policy_tool setpolicy
+
+You should see a dump of the policy you are setting. It should say at the very
+end:
+
+Policy successfully set.
+
+Now try to dump the currently enforced policy, which is the policy we have just
+set and the dynamic security state information of this policy
+(<<< ... some additional explanations)
+
+[root@laptop policy]# ./policy_tool getpolicy
+
+Policy dump:
+============
+Magic = 1debc.
+PolVer = aaaa0000.
+Len = 112.
+Primary = CHINESE WALL policy (c=1, off=14).
+Secondary = SIMPLE TYPE ENFORCEMENT policy (c=2, off=d8).
+
+
+Chinese Wall policy:
+====================
+Max Types = a.
+Max Ssidrefs = 5.
+Max ConfSets = 2.
+Ssidrefs Off = 10.
+Conflicts Off = 74.
+Runing T. Off = 9c.
+C. Agg. Off = b0.
+
+SSID To CHWALL-Type matrix:
+
+ ssidref 0: 01 00 00 00 00 00 00 00 00 00 <<< type0 is set for ssidref0
+ ssidref 1: 00 01 00 00 00 00 00 00 00 00
+ ssidref 2: 00 00 01 00 00 00 00 00 00 00
+ ssidref 3: 00 00 00 01 00 00 00 00 00 00
+ ssidref 4: 00 00 00 00 01 00 00 00 00 00 <<< type4 is set for ssidref4
+ <<< types 5-9 are unused
+Confict Sets:
+
+ c-set 0: 00 00 01 01 00 00 00 00 00 00 <<< type2 and type3 never run together
+ c-set 1: 01 00 00 00 00 01 01 00 00 00 <<< only one of types 0, 5 or 6
+ <<< can run simultaneously
+Running
+Types: 01 00 00 00 00 00 00 00 00 00 <<< ref-count for types of running domains
+
+Conflict
+Aggregate Set: 00 00 00 00 00 01 01 00 00 00 <<< aggregated set of types that
+ <<< cannot run because they
+ <<< are in conflict set 1 and
+ <<< (domain 0 is running w t0)
+
+
+Simple Type Enforcement policy:
+===============================
+Max Types = 5.
+Max Ssidrefs = 5.
+Ssidrefs Off = 8.
+
+SSID To STE-Type matrix:
+
+ ssidref 0: 01 01 01 01 01 <<< ssidref0 points to a set that
+ <<< has all types set (colors)
+ ssidref 1: 00 01 00 00 00 <<< ssidref1 has color1 set
+ ssidref 2: 00 00 01 00 00 <<< ...
+ ssidref 3: 00 00 00 01 00
+ ssidref 4: 00 00 00 00 01
+
+
+Policy dump End.
+
+
+This is a small example policy with which we will demonstrate the enforcement.
+
+Starting Domains with policy enforcement
+========================================
+Now let us play with this policy.
+
+Define 3 or 4 domain configurations. I use the following config using a ramdisk
+only and about 8MBytes of memory for each DomU (test purposes):
+
+#-------configuration xmsec1-------------------------
+kernel = "/boot/vmlinuz-2.6.11-xenU"
+ramdisk="/boot/U1_ramdisk.img"
+#security reference identifier
+ssidref= 0x00010001
+memory = 10
+name = "xmsec1"
+cpu = -1 # leave to Xen to pick
+# Number of network interfaces. Default is 1.
+nics=1
+dhcp="dhcp"
+#-----------------------------------------------------
+
+xmsec2 and xmsec3 look the same except for the name and the ssidref line. Use
+your domain config file and add "ssidref = 0x00010001" to the first (xmsec1),
+"ssidref= 0x00020002" to the second (call it xmsec2), and "ssidref=0x00030003"
+to the third (we will call this one xmsec3).
+
+First start xmsec1: xm create -c xmsec1 (succeeds)
+
+Then
+[root@laptop policy]# xm list
+Name Id Mem(MB) CPU State Time(s) Console SSID-REF
+Domain-0 0 620 0 r---- 42.3 s:00/p:00
+xmnosec 1 9 0 -b--- 0.3 9601 s:00/p:05
+xmsec1 2 9 0 -b--- 0.2 9602 s:01/p:01
+
+Shows a new domain xmsec1 running with primary (here: chinese wall) ssidref 1
+and secondary (here: simple type enforcement) ssidref 1. The ssidrefs are
+independent and can differ for a domain.
+
+[root@laptop policy]# ./policy_tool getpolicy
+
+Policy dump:
+============
+Magic = 1debc.
+PolVer = aaaa0000.
+Len = 112.
+Primary = CHINESE WALL policy (c=1, off=14).
+Secondary = SIMPLE TYPE ENFORCEMENT policy (c=2, off=d8).
+
+
+Chinese Wall policy:
+====================
+Max Types = a.
+Max Ssidrefs = 5.
+Max ConfSets = 2.
+Ssidrefs Off = 10.
+Conflicts Off = 74.
+Runing T. Off = 9c.
+C. Agg. Off = b0.
+
+SSID To CHWALL-Type matrix:
+
+ ssidref 0: 01 00 00 00 00 00 00 00 00 00
+ ssidref 1: 00 01 00 00 00 00 00 00 00 00
+ ssidref 2: 00 00 01 00 00 00 00 00 00 00
+ ssidref 3: 00 00 00 01 00 00 00 00 00 00
+ ssidref 4: 00 00 00 00 01 00 00 00 00 00
+
+Confict Sets:
+
+ c-set 0: 00 00 01 01 00 00 00 00 00 00
+ c-set 1: 01 00 00 00 00 01 01 00 00 00 <<< t1 is not part of any c-set
+
+Running
+Types: 01 01 00 00 00 00 00 00 00 00 <<< xmsec1 has ssidref 1->type1
+ ^^ <<< ref-count at position 1 incr
+Conflict
+Aggregate Set: 00 00 00 00 00 01 01 00 00 00 <<< domain 1 was allowed to
+ <<< start since type 1 was not
+ <<< in conflict with running
+ <<< types
+
+Simple Type Enforcement policy:
+===============================
+Max Types = 5.
+Max Ssidrefs = 5.
+Ssidrefs Off = 8.
+
+SSID To STE-Type matrix:
+
+ ssidref 0: 01 01 01 01 01 <<< the ste policy does not maintain; we
+ ssidref 1: 00 01 00 00 00 <-- <<< see that domain xmsec1 has ste
+ ssidref 2: 00 00 01 00 00 <<< ssidref1->type1 and has this type in
+ ssidref 3: 00 00 00 01 00 <<< common with dom0
+ ssidref 4: 00 00 00 00 01
+
+
+Policy dump End.
+
+Look at sHype output in xen dmesg:
+
+[root@laptop xen]# xm dmesg
+.
+.
+[somewhere near the very end]
+(XEN) chwall_init_domain_ssid: determined chwall_ssidref to 1.
+(XEN) ste_init_domain_ssid.
+(XEN) ste_init_domain_ssid: determined ste_ssidref to 1.
+(XEN) acm_init_domain_ssid: Instantiated individual ssid for domain 0x01.
+(XEN) chwall_post_domain_create.
+(XEN) ste_pre_eventchannel_interdomain.
+(XEN) ste_pre_eventchannel_interdomain: (evtchn 0 --> 1) common type #01.
+(XEN) shype_authorize_domops.
+(XEN) ste_pre_eventchannel_interdomain.
+(XEN) ste_pre_eventchannel_interdomain: (evtchn 0 --> 1) common type #01.
+(XEN) ste_pre_eventchannel_interdomain.
+(XEN) ste_pre_eventchannel_interdomain: (evtchn 0 --> 1) common type #01.
+
+
+You can see that the chinese wall policy does not complain and that the ste
+policy makes three access control decisions for three event-channels setup
+between domain 0 and the new domain 1. Each time, the two domains share the
+type1 and setting up the eventchannel is permitted.
+
+
+Starting up a second domain xmsec2:
+
+[root@laptop xen]# xm create -c xmsec2
+Using config file "xmsec2".
+Started domain xmsec2, console on port 9602
+************ REMOTE CONSOLE: CTRL-] TO QUIT ********
+Linux version 2.6.11-xenU (root@laptop.home.org) (gcc version 3.4.2 20041017
+(Red Hat 3.4.2-6.fc3)) #1 Wed Mar 30 13:14:31 EST 2005
+.
+.
+.
+[root@laptop policy]# xm list
+Name Id Mem(MB) CPU State Time(s) Console SSID-REF
+Domain-0 0 620 0 r---- 71.7 s:00/p:00
+xmsec1 1 9 0 -b--- 0.3 9601 s:01/p:01
+xmsec2 2 7 0 -b--- 0.3 9602 s:02/p:02 << our domain runs both policies with ssidref 2
+
+
+[root@laptop policy]# ./policy_tool getpolicy
+
+Policy dump:
+============
+Magic = 1debc.
+PolVer = aaaa0000.
+Len = 112.
+Primary = CHINESE WALL policy (c=1, off=14).
+Secondary = SIMPLE TYPE ENFORCEMENT policy (c=2, off=d8).
+
+
+Chinese Wall policy:
+====================
+Max Types = a.
+Max Ssidrefs = 5.
+Max ConfSets = 2.
+Ssidrefs Off = 10.
+Conflicts Off = 74.
+Runing T. Off = 9c.
+C. Agg. Off = b0.
+
+SSID To CHWALL-Type matrix:
+
+ ssidref 0: 01 00 00 00 00 00 00 00 00 00
+ ssidref 1: 00 01 00 00 00 00 00 00 00 00
+ ssidref 2: 00 00 01 00 00 00 00 00 00 00 <<< our domain has type 2 set
+ ssidref 3: 00 00 00 01 00 00 00 00 00 00
+ ssidref 4: 00 00 00 00 01 00 00 00 00 00
+
+Confict Sets:
+
+ c-set 0: 00 00 01 01 00 00 00 00 00 00 <<< t2 is in c-set0 with type 3
+ c-set 1: 01 00 00 00 00 01 01 00 00 00
+
+Running
+Types: 01 01 01 00 00 00 00 00 00 00 <<< t2 is running since the
+ ^^ <<< current aggregate conflict
+ <<< set (see above) does not
+ <<< include type 2
+Conflict
+Aggregate Set: 00 00 00 01 00 01 01 00 00 00 <<< type 3 is added to the
+ <<< conflict aggregate
+
+
+Simple Type Enforcement policy:
+===============================
+Max Types = 5.
+Max Ssidrefs = 5.
+Ssidrefs Off = 8.
+
+SSID To STE-Type matrix:
+
+ ssidref 0: 01 01 01 01 01
+ ssidref 1: 00 01 00 00 00
+ ssidref 2: 00 00 01 00 00
+ ssidref 3: 00 00 00 01 00
+ ssidref 4: 00 00 00 00 01
+
+
+Policy dump End.
+
+
+The sHype xen dmesg output looks similar to the one above when starting the
+first domain.
+
+Now we start xmsec3 and it has ssidref3. Thus, it tries to run as type3 which
+conflicts with running type2 (from xmsec2). As expected, creating this domain
+fails for security policy enforcement reasons.
+
+[root@laptop xen]# xm create -c xmsec3
+Using config file "xmsec3".
+Error: Error creating domain: (22, 'Invalid argument')
+[root@laptop xen]#
+
+[root@laptop xen]# xm dmesg
+.
+.
+[somewhere near the very end]
+(XEN) chwall_pre_domain_create.
+(XEN) chwall_pre_domain_create: CHINESE WALL CONFLICT in type 03.
+
+xmsec3 ssidref3 points to type3, which is in the current conflict aggregate
+set. This domain cannot start until domain xmsec2 is destroyed, at which time
+the aggregate conflict set is reduced and type3 is excluded from it. Then,
+xmsec3 can start. Of course, afterwards, xmsec2 cannot be restarted. Try it.
+
+3. Policy tool
+**************
+toos/policy/policy_tool.c
+
+a) ./policy_tool getpolicy
+ prints the currently enforced policy
+ (see for example section 1.)
+
+b) ./policy_tool setpolicy
+ sets a predefined and hardcoded security
+ policy (the one described in section 2.)
+
+c) ./policy_tool dumpstats
+ prints some status information about the caching
+ of access control decisions (number of cache hits
+ and number of policy evaluations for grant_table
+ and event channels).
+
+d) ./policy_tool loadpolicy <binary_policy_file>
+ sets the policy defined in the <binary_policy_file>
+ please use the policy_processor that is posted to this
+ mailing list to create such a binary policy from an XML
+ policy description
+
+4. Policy interface:
+********************
+The Policy interface is working in "network-byte-order" (big endian). The reason for this
+is that policy files/management should be portable and independent of the platforms.
+
+Our policy interface enables managers to create a single binary policy file in a trusted
+environment and distributed it to multiple systems for enforcement.
+
+====================end-of file======================================= \ No newline at end of file
diff --git a/linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c b/linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c
index c1cf253510..208b00353f 100644
--- a/linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c
+++ b/linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c
@@ -623,7 +623,7 @@ static int netif_poll(struct net_device *dev, int *pbudget)
/* Only copy the packet if it fits in the current MTU. */
if (skb->len <= (dev->mtu + ETH_HLEN)) {
if ((skb->tail > skb->end) && net_ratelimit())
- printk(KERN_INFO "Received packet needs %d bytes more "
+ printk(KERN_INFO "Received packet needs %zd bytes more "
"headroom.\n", skb->tail - skb->end);
if ((nskb = alloc_xen_skb(skb->len + 2)) != NULL) {
@@ -967,9 +967,9 @@ static int create_netdev(int handle, struct net_device **val)
/* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
for (i = 0; i <= NETIF_TX_RING_SIZE; i++)
- np->tx_skbs[i] = (void *)(i+1);
+ np->tx_skbs[i] = (void *)((unsigned long) i+1);
for (i = 0; i <= NETIF_RX_RING_SIZE; i++)
- np->rx_skbs[i] = (void *)(i+1);
+ np->rx_skbs[i] = (void *)((unsigned long) i+1);
dev->open = network_open;
dev->hard_start_xmit = network_start_xmit;
@@ -1343,7 +1343,7 @@ static int xennet_proc_read(
{
struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
struct net_private *np = netdev_priv(dev);
- int len = 0, which_target = (int)data & 3;
+ int len = 0, which_target = (unsigned long) data & 3;
switch (which_target)
{
@@ -1368,7 +1368,7 @@ static int xennet_proc_write(
{
struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
struct net_private *np = netdev_priv(dev);
- int which_target = (int)data & 3;
+ int which_target = (unsigned long) data & 3;
char string[64];
long target;
diff --git a/tools/Makefile b/tools/Makefile
index 3a38e899de..00eb4991cc 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -12,6 +12,7 @@ SUBDIRS += xcs
SUBDIRS += xcutils
SUBDIRS += pygrub
SUBDIRS += firmware
+SUBDIRS += policy
.PHONY: all install clean check check_clean ioemu eioemuinstall ioemuclean
diff --git a/tools/libxc/xc.h b/tools/libxc/xc.h
index 3123d6e5a4..09eff6675f 100644
--- a/tools/libxc/xc.h
+++ b/tools/libxc/xc.h
@@ -110,6 +110,7 @@ int xc_waitdomain_core(int domain,
typedef struct {
u32 domid;
+ u32 ssidref;
unsigned int dying:1, crashed:1, shutdown:1,
paused:1, blocked:1, running:1;
unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
@@ -124,6 +125,7 @@ typedef struct {
typedef dom0_getdomaininfo_t xc_domaininfo_t;
int xc_domain_create(int xc_handle,
+ u32 ssidref,
u32 *pdomid);
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 8f0bba3216..2edf11c39d 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -9,6 +9,7 @@
#include "xc_private.h"
int xc_domain_create(int xc_handle,
+ u32 ssidref,
u32 *pdomid)
{
int err;
@@ -16,6 +17,7 @@ int xc_domain_create(int xc_handle,
op.cmd = DOM0_CREATEDOMAIN;
op.u.createdomain.domain = (domid_t)*pdomid;
+ op.u.createdomain.ssidref = ssidref;
if ( (err = do_dom0_op(xc_handle, &op)) != 0 )
return err;
@@ -101,6 +103,7 @@ int xc_domain_getinfo(int xc_handle,
info->crashed = 1;
}
+ info->ssidref = op.u.getdomaininfo.ssidref;
info->nr_pages = op.u.getdomaininfo.tot_pages;
info->max_memkb = op.u.getdomaininfo.max_pages<<(PAGE_SHIFT);
info->shared_info_frame = op.u.getdomaininfo.shared_info_frame;
diff --git a/tools/misc/policyprocessor/SecurityLabel.java b/tools/misc/policyprocessor/SecurityLabel.java
new file mode 100644
index 0000000000..c7ffbcaaeb
--- /dev/null
+++ b/tools/misc/policyprocessor/SecurityLabel.java
@@ -0,0 +1,34 @@
+/**
+ * (C) Copyright IBM Corp. 2005
+ *
+ * $Id: SecurityLabel.java,v 1.2 2005/06/17 20:00:04 rvaldez Exp $
+ *
+ * Author: Ray Valdez
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * SecurityLabel Class.
+ *
+ * <p>
+ *
+ * Keeps track of types.
+ *
+ * <p>
+ *
+ *
+ */
+import java.util.*;
+public class SecurityLabel
+{
+ Vector ids;
+ Vector vlans;
+ Vector slots;
+ Vector steTypes;
+ int steSsidPosition;
+ Vector chwIDs;
+ Vector chwTypes;
+ int chwSsidPosition;
+}
diff --git a/tools/misc/policyprocessor/SecurityPolicySpec.xsd b/tools/misc/policyprocessor/SecurityPolicySpec.xsd
new file mode 100644
index 0000000000..bb7265e6b0
--- /dev/null
+++ b/tools/misc/policyprocessor/SecurityPolicySpec.xsd
@@ -0,0 +1,115 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Author: Ray Valdez, rvaldez@us.ibm.com -->
+<!-- xml schema definition for xen xml policies -->
+<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
+targetNamespace="http://www.ibm.com"
+xmlns="http://www.ibm.com"
+elementFormDefault="qualified">
+
+<xsd:element name="TE" type="xsd:string" />
+<xsd:element name="ChWall" type="xsd:string" />
+
+<xsd:element name="Definition">
+ <xsd:complexType>
+ <xsd:sequence>
+
+ <!-- simple type enforcement -->
+ <xsd:element name="Types" minOccurs ="0" maxOccurs="1">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element ref="TE" minOccurs ="1" maxOccurs ="unbounded"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <!-- chinese wall -->
+ <!-- type definition -->
+ <xsd:element name="ChWallTypes" minOccurs ="0" maxOccurs="1">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element ref="ChWall" minOccurs ="1" maxOccurs ="unbounded"/>
+
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <!-- conflict set -->
+ <xsd:element name="ConflictSet" minOccurs ="0" maxOccurs="unbounded">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element ref="ChWall" minOccurs ="2" maxOccurs ="unbounded"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ </xsd:sequence>
+ </xsd:complexType>
+</xsd:element>
+
+<xsd:element name="Policy">
+ <xsd:complexType>
+ <xsd:sequence>
+
+ <xsd:element name="PolicyHeader">
+ <xsd:complexType>
+ <xsd:all>
+ <xsd:element name = "Name" type="xsd:string"/>
+ <xsd:element name = "DateTime" type="xsd:dateTime"/>
+ <xsd:element name = "Tag" minOccurs ="1" maxOccurs ="1" type="xsd:string"/>
+ <xsd:element name = "TypeDefinition">
+ <xsd:complexType>
+ <xsd:all>
+ <xsd:element name = "url" type="xsd:string"/>
+ <xsd:element name = "hash" minOccurs ="0" maxOccurs ="1" type="xsd:string"/>
+ </xsd:all>
+ </xsd:complexType>
+ </xsd:element>
+
+ </xsd:all>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="VM" minOccurs ="1" maxOccurs="unbounded">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="id" type="xsd:integer"/>
+ <xsd:element ref="TE" minOccurs="0" maxOccurs="unbounded" />
+ <xsd:element ref="ChWall" minOccurs ="0" maxOccurs="unbounded"/>
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="Vlan" minOccurs ="0" maxOccurs="unbounded">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="vid" type="xsd:integer"/>
+ <xsd:element ref="TE" minOccurs="1" maxOccurs="unbounded" />
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+ <xsd:element name="Slot" minOccurs ="0" maxOccurs="unbounded">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="bus" type="xsd:integer"/>
+ <xsd:element name="slot" type="xsd:integer"/>
+ <xsd:element ref="TE" minOccurs="1" maxOccurs="unbounded" />
+ </xsd:sequence>
+ </xsd:complexType>
+ </xsd:element>
+
+
+ </xsd:sequence>
+ </xsd:complexType>
+</xsd:element>
+
+<!-- root element -->
+<xsd:element name="SecurityPolicySpec">
+ <xsd:complexType>
+ <xsd:choice>
+ <xsd:element ref="Definition" minOccurs ="1" maxOccurs="unbounded"/>
+ <xsd:element ref="Policy" minOccurs ="1" maxOccurs="unbounded"/>
+ </xsd:choice>
+ </xsd:complexType>
+</xsd:element>
+</xsd:schema>
diff --git a/tools/misc/policyprocessor/SsidsEntry.java b/tools/misc/policyprocessor/SsidsEntry.java
new file mode 100644
index 0000000000..e178d9e6a3
--- /dev/null
+++ b/tools/misc/policyprocessor/SsidsEntry.java
@@ -0,0 +1,29 @@
+/**
+ * (C) Copyright IBM Corp. 2005
+ *
+ * $Id: SsidsEntry.java,v 1.2 2005/06/17 20:02:40 rvaldez Exp $
+ *
+ * Author: Ray Valdez
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * SsidsEntry Class.
+ * <p>
+ *
+ * Holds ssid information.
+ *
+ * <p>
+ *
+ *
+ */
+public class SsidsEntry
+ {
+ int id; /* used for partition and vlan */
+ int bus; /* used for slots */
+ int slot;
+ int ste = 0xffffffff;
+ int chw = 0xffffffff;
+ }
diff --git a/tools/misc/policyprocessor/XmlToBin.java b/tools/misc/policyprocessor/XmlToBin.java
new file mode 100644
index 0000000000..1b21b41535
--- /dev/null
+++ b/tools/misc/policyprocessor/XmlToBin.java
@@ -0,0 +1,1588 @@
+/**
+ * (C) Copyright IBM Corp. 2005
+ *
+ * $Id: XmlToBin.java,v 1.2 2005/06/17 20:00:04 rvaldez Exp $
+ *
+ * Author: Ray Valdez
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * XmlToBin Class.
+ * <p>
+ *
+ * Translates a xml representation of a SHYPE policy into a binary
+ * format. The class processes an xml policy file based on elment tags
+ * defined in a schema definition files: SecurityPolicySpec.xsd.
+ *
+ * XmlToBin Command line Options:
+ *
+ * -i inputFile: name of policyfile (.xml)
+ * -o outputFile: name of binary policy file (Big Endian)
+ * -xssid SsidFile: xen ssids to types text file
+ * -xssidconf SsidConf: xen conflict ssids to types text file
+ * -debug turn on debug messages
+ * -help help. This printout
+ *
+ * <p>
+ *
+ *
+ */
+import java.util.*;
+import java.io.*;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.Attr;
+import org.w3c.dom.NodeList;
+import org.w3c.dom.NamedNodeMap;
+import org.xml.sax.*;
+import javax.xml.parsers.*;
+import org.xml.sax.helpers.*;
+
+public class XmlToBin
+ implements XmlToBinInterface
+{
+ class SlotInfo {
+ String bus;
+ String slot;
+ }
+
+ boolean LittleEndian = false;
+ boolean debug = false;
+
+ static final String JAXP_SCHEMA_LANGUAGE = "http://java.sun.com/xml/jaxp/properties/schemaLanguage";
+
+ static final String W3C_XML_SCHEMA = "http://www.w3.org/2001/XMLSchema";
+
+ public static void printUsage()
+ {
+ System.out.println("XmlToBin Command line Options: ");
+ System.out.println("\t-i\t\tinputFile:\tname of policyfile (.xml)");
+ System.out.println("\t-o\t\toutputFile:\tname of binary policy file (Big Endian)");
+ System.out.println("\t-xssid\t\tSsidFile:\tXen ssids to named types text file");
+ System.out.println("\t-xssidconf\tSsidConfFile:\tXen conflict ssids to named types text file");
+ System.out.println("\t-debug\t\t\t\tturn on debug messages");
+ System.out.println("\t-help\t\t\t\thelp. This printout");
+ return;
+ }
+
+ public void printDebug(String message)
+ {
+ if (debug)
+ System.out.println(message);
+ }
+
+ public void writeBinPolicy(byte[] binPolicy, String outputFileName)
+ throws Exception
+ {
+ if (debug)
+ printHex(binPolicy,binPolicy.length);
+
+ DataOutputStream writeObj = new DataOutputStream(
+ new FileOutputStream(outputFileName));
+
+ writeObj.write(binPolicy);
+ writeObj.flush();
+ writeObj.close();
+ System.out.println(" wBP:: wrote outputfile: " + outputFileName);
+
+ return;
+ }
+
+ public void writeXenTypeVectorFile(Vector list, String outputFileName)
+ throws Exception
+ {
+ PrintWriter out;
+
+ if (0 == list.size())
+ {
+ printDebug(" wSTF : size of input is zero when writing :" + outputFileName);
+ return;
+ }
+ out = new PrintWriter(
+ new BufferedWriter(
+ new FileWriter(outputFileName)));
+
+
+ for (int i = 0; i < list.size(); i++)
+ {
+ Vector ee = (Vector) list.elementAt(i);
+ out.println(i + " " +ee.toString());
+ }
+ out.close();
+
+ return;
+ }
+
+ public void writeXenTypeFile(Vector list, String outputFileName, boolean slabel)
+ throws Exception
+ {
+ Vector entry;
+ String strTypes = "";
+ SecurityLabel ee;
+ PrintWriter out;
+
+ if (0 == list.size())
+ {
+ printDebug(" wSTF : size of input is zero when writing :" + outputFileName);
+ return;
+ }
+ out = new PrintWriter(
+ new BufferedWriter(
+ new FileWriter(outputFileName)));
+
+ for (int i = 0; i < list.size(); i++)
+ {
+ ee = (SecurityLabel) list.elementAt(i);
+
+ if (slabel)
+ {
+ entry = ee.steTypes;
+ } else {
+
+ entry = ee.chwTypes;
+ }
+ if (null == entry) continue;
+
+ Enumeration e = entry.elements();
+ while (e.hasMoreElements())
+ {
+ String typeName = (String) e.nextElement();
+ strTypes = strTypes + " " + typeName;
+ }
+ printDebug(" WXTF:: ssid : "+i +" :"+strTypes);
+ out.println(i +" "+strTypes);
+ strTypes = "";
+ }
+ out.close();
+
+ return;
+ }
+
+ public void setDebug(boolean value)
+ {
+ debug=value;
+ }
+
+ public void setEndian(boolean value)
+ {
+ LittleEndian = value;
+ }
+
+ public byte[] generateVlanSsids(Vector bagOfSsids)
+ throws Exception
+ {
+ /**
+ typedef struct {
+ u16 vlan;
+ u16 ssid_ste;
+ } acm_vlan_entry_t;
+ **/
+
+ Hashtable vlanSsid = new Hashtable();
+ printDebug(" gVS::Size of bagOfSsids: "+ bagOfSsids.size());
+
+ /* Get the number of partitions */
+ for (int i = 0; i < bagOfSsids.size(); i++)
+ {
+ SecurityLabel entry = (SecurityLabel) bagOfSsids.elementAt(i);
+
+ if (null == entry.vlans)
+ continue;
+
+ Enumeration e = entry.vlans.elements();
+ while (e.hasMoreElements())
+ {
+ String id = (String) e.nextElement();
+ printDebug(" gVS:: vlan: " + id + "has ste ssid: " + entry.steSsidPosition);
+ if (-1 == entry.steSsidPosition)
+ continue;
+
+ /* Only use ste for vlan */
+ SsidsEntry ssidsObj = new SsidsEntry();
+
+ ssidsObj.id = Integer.parseInt(id);
+ ssidsObj.ste = entry.steSsidPosition;
+
+ if (vlanSsid.contains(id))
+ printDebug(" gVS:: Error already in the Hash part:" + ssidsObj.id);
+ else
+ vlanSsid.put(id, ssidsObj);
+ printDebug(" gVS:: added part: " + id + "has ste ssid: " + entry.steSsidPosition);
+ }
+ }
+
+ /* allocate array */
+ int numOfVlan = vlanSsid.size();
+ int totalSize = (numOfVlan * vlanEntrySz);
+
+ if (0 == numOfVlan)
+ {
+ printDebug(" gVS:: vlan: binary ==> zero");
+ return new byte[0];
+ }
+
+ byte[] vlanArray = new byte[totalSize];
+
+ int index = 0;
+
+ Enumeration e = vlanSsid.elements();
+ while (e.hasMoreElements())
+ {
+ SsidsEntry entry = (SsidsEntry) e.nextElement();
+ printDebug(" gVS:: part: " + entry.id + " ste ssid: " + entry.ste);
+
+ /* Write id */
+ writeShortToStream(vlanArray,(short)entry.id,index);
+ index = index + u16Size;
+
+ /* write ste ssid */
+ writeShortToStream(vlanArray,(short) entry.ste,index);
+ index = index + u16Size;
+ }
+
+ printDebug(" gVS:: vlan: num of vlans " + numOfVlan);
+ printDebug(" gVS:: vlan: binary ==> Length "+ vlanArray.length);
+
+ if (debug)
+ printHex(vlanArray,vlanArray.length);
+ printDebug("\n");
+
+ return vlanArray;
+ }
+
+ public byte[] generateSlotSsids(Vector bagOfSsids)
+ throws Exception
+ {
+ /**
+ typedef struct {
+ u16 slot_max;
+ u16 slot_offset;
+ } acm_slot_buffer_t;
+
+ typedef struct {
+ u16 bus;
+ u16 slot;
+ u16 ssid_ste;
+ } acm_slot_entry_t;
+ **/
+ Hashtable slotSsid = new Hashtable();
+ printDebug(" gSS::Size of bagOfSsids: "+ bagOfSsids.size());
+
+ /* Find the number of VMs */
+ for (int i = 0; i < bagOfSsids.size(); i++)
+ {
+ SecurityLabel entry = (SecurityLabel) bagOfSsids.elementAt(i);
+
+ if (null == entry.slots)
+ continue;
+
+ Enumeration e = entry.slots.elements();
+ while (e.hasMoreElements())
+ {
+ SlotInfo item = (SlotInfo) e.nextElement();
+ printDebug(" gSS:: bus slot: " + item.bus + " "+ item.slot + " " + entry.steSsidPosition);
+ if (-1 == entry.steSsidPosition)
+ continue;
+
+ SsidsEntry ssidsObj = new SsidsEntry();
+
+ String id = item.bus +" "+item.slot;
+ ssidsObj.bus = Integer.parseInt(item.bus);
+ ssidsObj.slot = Integer.parseInt(item.slot);
+ /* set ste ssid */
+ ssidsObj.ste = entry.steSsidPosition;
+
+ if (slotSsid.contains(id))
+ printDebug(" gSS:: Error already in the Hash part:" + id);
+ else
+ slotSsid.put(id, ssidsObj);
+
+ printDebug(" gSS:: added slot: " + id + "has ste ssid: " + entry.steSsidPosition);
+ }
+ }
+
+ /* allocate array */
+ int numOfSlot = slotSsid.size();
+
+ if (0 == numOfSlot)
+ {
+ printDebug(" gVS:: slot: binary ==> zero");
+ return new byte[0];
+ }
+
+ int totalSize = (numOfSlot * slotEntrySz);
+
+ byte[] slotArray = new byte[totalSize];
+
+ int index = 0;
+
+ Enumeration e = slotSsid.elements();
+ while (e.hasMoreElements())
+ {
+ SsidsEntry entry = (SsidsEntry) e.nextElement();
+ System.out.println(" gSS:: bus slot: " + entry.bus + " " + entry.slot + " ste ssid: " + entry.ste);
+
+ /* Write bus */
+ writeShortToStream(slotArray,(short)entry.bus,index);
+ index = index + u16Size;
+
+ /* Write slot */
+ writeShortToStream(slotArray,(short)entry.slot,index);
+ index = index + u16Size;
+
+ /* Write ste ssid */
+ writeShortToStream(slotArray,(short) entry.ste,index);
+ index = index + u16Size;
+
+ }
+
+ printDebug(" gSS:: slot: num of vlans " + numOfSlot);
+ printDebug(" gSS:: slot: binary ==> Length "+ slotArray.length);
+
+ if (debug)
+ printHex(slotArray,slotArray.length);
+ printDebug("\n");
+
+ return slotArray;
+
+ }
+
+ public byte[] generatePartSsids(Vector bagOfSsids, Vector bagOfChwSsids)
+ throws Exception
+ {
+ /**
+ typedef struct {
+ u16 id;
+ u16 ssid_ste;
+ u16 ssid_chwall;
+ } acm_partition_entry_t;
+
+ **/
+ Hashtable partSsid = new Hashtable();
+ printDebug(" gPS::Size of bagOfSsids: "+ bagOfSsids.size());
+
+ /* Find the number of VMs */
+ for (int i = 0; i < bagOfSsids.size(); i++)
+ {
+ SecurityLabel entry = (SecurityLabel) bagOfSsids.elementAt(i);
+
+ if (null == entry.ids)
+ continue;
+
+ Enumeration e = entry.ids.elements();
+ while (e.hasMoreElements())
+ {
+ String id = (String) e.nextElement();
+ printDebug(" gPS:: part: " + id + "has ste ssid: " + entry.steSsidPosition);
+ if (-1 == entry.steSsidPosition)
+ continue;
+
+ SsidsEntry ssidsObj = new SsidsEntry();
+
+ ssidsObj.id = Integer.parseInt(id);
+ ssidsObj.ste = entry.steSsidPosition;
+
+ if (partSsid.contains(id))
+ printDebug(" gPS:: Error already in the Hash part:" + ssidsObj.id);
+ else
+ partSsid.put(id, ssidsObj);
+ printDebug(" gPS:: added part: " + id + "has ste ssid: " + entry.steSsidPosition);
+ }
+
+ }
+
+ for (int i = 0; i < bagOfChwSsids.size(); i++)
+ {
+ SecurityLabel entry = (SecurityLabel) bagOfChwSsids.elementAt(i);
+
+ Enumeration e = entry.chwIDs.elements();
+ while (e.hasMoreElements())
+ {
+ String id = (String) e.nextElement();
+ printDebug(" gPS:: part: " + id + "has chw ssid: " + entry.chwSsidPosition);
+ if (partSsid.containsKey(id))
+ {
+ SsidsEntry item = (SsidsEntry) partSsid.get(id);
+ item.chw = entry.chwSsidPosition;
+ printDebug(" gPS:: added :" + item.id +" chw: " + item.chw);
+ }
+ else
+ {
+ printDebug(" gPS:: creating :" + id +" chw: " + entry.chwSsidPosition);
+ SsidsEntry ssidsObj = new SsidsEntry();
+ ssidsObj.id = Integer.parseInt(id);
+ ssidsObj.chw = entry.chwSsidPosition;
+ partSsid.put(id, ssidsObj);
+
+ }
+ }
+ }
+
+ /* Allocate array */
+ int numOfPar = partSsid.size();
+ int totalSize = (numOfPar * partitionEntrySz);
+
+ if (0 == numOfPar)
+ {
+ printDebug(" gPS:: part: binary ==> zero");
+ return new byte[0];
+ }
+
+ byte[] partArray = new byte[totalSize];
+
+ int index = 0;
+
+ Enumeration e = partSsid.elements();
+ while (e.hasMoreElements())
+ {
+ SsidsEntry entry = (SsidsEntry) e.nextElement();
+ printDebug(" gPS:: part: " + entry.id + " ste ssid: " + entry.ste + " chw ssid: "+ entry.chw);
+
+ /* Write id */
+ writeShortToStream(partArray,(short)entry.id,index);
+ index = index + u16Size;
+
+ /* Write ste ssid */
+ writeShortToStream(partArray,(short) entry.ste,index);
+ index = index + u16Size;
+
+ /* Write chw ssid */
+ writeShortToStream(partArray,(short) entry.chw,index);
+ index = index + u16Size;
+ }
+
+ printDebug(" gPS:: part: num of partitions " + numOfPar);
+ printDebug(" gPS:: part: binary ==> Length " + partArray.length);
+
+ if (debug)
+ printHex(partArray,partArray.length);
+ printDebug("\n");
+
+ return partArray;
+ }
+
+ public byte[] GenBinaryPolicyBuffer(byte[] chwPolicy, byte[] stePolicy, byte [] partMap, byte[] vlanMap, byte[] slotMap)
+ {
+ byte[] binBuffer;
+ short chwSize =0;
+ short steSize =0;
+ int index = 0;
+
+ /* Builds data structure acm_policy_buffer_t */
+ /* Get number of colorTypes */
+ if (null != chwPolicy)
+ chwSize = (short) chwPolicy.length;
+
+ if (null != stePolicy)
+ steSize = (short) stePolicy.length;
+
+ int totalDataSize = chwSize + steSize + resourceOffsetSz + 3 *(2 * u16Size);
+
+ /* Add vlan and slot */
+ totalDataSize = totalDataSize +partMap.length + vlanMap.length + slotMap.length;
+ binBuffer = new byte[binaryBufferHeaderSz +totalDataSize];
+
+
+ try {
+ /* Write magic */
+ writeIntToStream(binBuffer,ACM_MAGIC,index);
+ index = u32Size;
+
+ /* Write policy version */
+ writeIntToStream(binBuffer,POLICY_INTERFACE_VERSION,index);
+ index = index + u32Size;
+
+ /* write len */
+ writeIntToStream(binBuffer,binBuffer.length,index);
+ index = index + u32Size;
+
+ } catch (IOException ee) {
+ System.out.println(" GBPB:: got exception : " + ee);
+ return null;
+ }
+
+ int offset, address;
+ address = index;
+
+ if (null != partMap)
+ offset = binaryBufferHeaderSz + resourceOffsetSz;
+ else
+ offset = binaryBufferHeaderSz;
+
+ try {
+
+ if (null == chwPolicy || null == stePolicy)
+ {
+ writeShortToStream(binBuffer,ACM_NULL_POLICY,index);
+ index = index + u16Size;
+
+ writeShortToStream(binBuffer,(short) 0,index);
+ index = index + u16Size;
+
+ writeShortToStream(binBuffer,ACM_NULL_POLICY,index);
+ index = index + u16Size;
+
+ writeShortToStream(binBuffer,(short) 0,index);
+ index = index + u16Size;
+
+ }
+ index = address;
+ if (null != chwPolicy)
+ {
+
+ /* Write policy name */
+ writeShortToStream(binBuffer,ACM_CHINESE_WALL_POLICY,index);
+ index = index + u16Size;
+
+ /* Write offset */
+ writeShortToStream(binBuffer,(short) offset,index);
+ index = index + u16Size;
+
+ /* Write payload. No need increment index */
+ address = offset;
+ System.arraycopy(chwPolicy, 0, binBuffer,address, chwPolicy.length);
+ address = address + chwPolicy.length;
+
+ if (null != stePolicy)
+ {
+ /* Write policy name */
+ writeShortToStream(binBuffer,ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,index);
+ index = index + u16Size;
+
+ /* Write offset */
+ writeShortToStream(binBuffer,(short) address,index);
+ index = index + u16Size;
+
+ /* Copy array */
+ System.arraycopy(stePolicy, 0, binBuffer,address, stePolicy.length);
+ /* Update address */
+ address = address + stePolicy.length;
+ } else {
+ /* Skip writing policy name and offset */
+ index = index + 2 * u16Size;
+
+ }
+
+ } else {
+
+ if (null != stePolicy)
+ {
+ /* Write policy name */
+ writeShortToStream(binBuffer,ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,index);
+ index = index + u16Size;
+
+ /* Write offset */
+ address = offset;
+ writeShortToStream(binBuffer, (short) offset,index);
+ index = index + u16Size;
+
+ /* Copy array */
+ System.arraycopy(stePolicy, 0, binBuffer,address, stePolicy.length);
+ /* Update address */
+ address = address + stePolicy.length;
+
+ /* Increment index, since there is no secondary */
+ index = index + secondaryPolicyCodeSz + secondaryBufferOffsetSz;
+
+ }
+
+ }
+ int size;
+ /* Assumes that you will always have a partition defined in policy */
+ if ( 0 < partMap.length)
+ {
+ writeShortToStream(binBuffer, (short) address,index);
+ index = address;
+
+ /* Compute num of VMs */
+ size = partMap.length / (3 * u16Size);
+
+ writeShortToStream(binBuffer, (short)size,index);
+ index = index + u16Size;
+
+ /* part, vlan and slot: each one consists of two entries */
+ offset = 3 * (2 * u16Size);
+ writeShortToStream(binBuffer, (short) offset,index);
+
+ /* Write partition array at offset */
+ System.arraycopy(partMap, 0, binBuffer,(offset + address), partMap.length);
+ index = index + u16Size;
+ offset = offset + partMap.length;
+ }
+
+ if ( 0 < vlanMap.length)
+ {
+ size = vlanMap.length / (2 * u16Size);
+ writeShortToStream(binBuffer, (short) size,index);
+ index = index + u16Size;
+
+ writeShortToStream(binBuffer, (short) offset,index);
+ index = index + u16Size;
+ System.arraycopy(vlanMap, 0, binBuffer,(offset + address), vlanMap.length);
+ } else {
+ /* Write vlan max */
+ writeShortToStream(binBuffer, (short) 0,index);
+ index = index + u16Size;
+
+ /* Write vlan offset */
+ writeShortToStream(binBuffer, (short) 0,index);
+ index = index + u16Size;
+
+ }
+
+ offset = offset + vlanMap.length;
+ if ( 0 < slotMap.length)
+ {
+ size = slotMap.length / (3 * u16Size);
+ writeShortToStream(binBuffer, (short) size,index);
+ index = index + u16Size;
+
+ writeShortToStream(binBuffer, (short) offset,index);
+ index = index + u16Size;
+ System.arraycopy(slotMap, 0, binBuffer,(offset + address), slotMap.length);
+ }
+
+ } catch (IOException ee)
+ {
+ System.out.println(" GBPB:: got exception : " + ee);
+ return null;
+ }
+
+ printDebug(" GBP:: Binary Policy ==> length " + binBuffer.length);
+ if (debug)
+ printHex(binBuffer,binBuffer.length);
+
+ return binBuffer;
+ }
+
+ public byte[] generateChwBuffer(Vector Ssids, Vector ConflictSsids, Vector ColorTypes)
+ {
+ byte[] chwBuffer;
+ int index = 0;
+ int position = 0;
+
+ /* Get number of rTypes */
+ short maxTypes = (short) ColorTypes.size();
+
+ /* Get number of SSids entry */
+ short maxSsids = (short) Ssids.size();
+
+ /* Get number of conflict sets */
+ short maxConflict = (short) ConflictSsids.size();
+
+
+ if (maxTypes * maxSsids == 0)
+ return null;
+ /*
+ data structure acm_chwall_policy_buffer_t;
+
+ uint16 policy_code;
+ uint16 chwall_max_types;
+ uint16 chwall_max_ssidrefs;
+ uint16 chwall_max_conflictsets;
+ uint16 chwall_ssid_offset;
+ uint16 chwall_conflict_sets_offset;
+ uint16 chwall_running_types_offset;
+ uint16 chwall_conflict_aggregate_offset;
+ */
+ int totalBytes = chwHeaderSize + u16Size *(maxTypes * (maxSsids + maxConflict));
+
+ chwBuffer = new byte[ totalBytes ];
+ int address = chwHeaderSize + (u16Size * maxTypes * maxSsids );
+
+ printDebug(" gCB:: chwall totalbytes : "+totalBytes);
+
+ try {
+ index = 0;
+ writeShortToStream(chwBuffer,ACM_CHINESE_WALL_POLICY,index);
+ index = u16Size;
+
+ writeShortToStream(chwBuffer,maxTypes,index);
+ index = index + u16Size;
+
+ writeShortToStream(chwBuffer,maxSsids,index);
+ index = index + u16Size;
+
+ writeShortToStream(chwBuffer,maxConflict,index);
+ index = index + u16Size;
+
+ /* Write chwall_ssid_offset */
+ writeShortToStream(chwBuffer,chwHeaderSize,index);
+ index = index + u16Size;
+
+ /* Write chwall_conflict_sets_offset */
+ writeShortToStream(chwBuffer,(short) address,index);
+ index = index + u16Size;
+
+ /* Write chwall_running_types_offset */
+ writeShortToStream(chwBuffer,(short) 0,index);
+ index = index + u16Size;
+
+ /* Write chwall_conflict_aggregate_offset */
+ writeShortToStream(chwBuffer,(short) 0,index);
+ index = index + u16Size;
+
+ } catch (IOException ee) {
+ System.out.println(" gCB:: got exception : " + ee);
+ return null;
+ }
+ int markPos = 0;
+
+ /* Create the SSids entry */
+ for (int i = 0; i < maxSsids; i++)
+ {
+
+ SecurityLabel ssidEntry = (SecurityLabel) Ssids.elementAt(i);
+ /* Get chwall types */
+ ssidEntry.chwSsidPosition = i;
+ Enumeration e = ssidEntry.chwTypes.elements();
+ while (e.hasMoreElements())
+ {
+ String typeName = (String) e.nextElement();
+ printDebug(" gCB:: Ssid "+ i+ ": has type : " + typeName);
+ position = ColorTypes.indexOf(typeName);
+
+ if (position < 0)
+ {
+ System.out.println (" gCB:: Error type : " + typeName + " not found in ColorTypes");
+ return null;
+ }
+ printDebug(" GCB:: type : " + typeName + " found in ColorTypes at position: " + position);
+ markPos = ((i * maxTypes + position) * u16Size) + index;
+
+ try {
+ writeShortToStream(chwBuffer,markSymbol,markPos);
+ } catch (IOException ee) {
+ System.out.println(" gCB:: got exception : ");
+ return null;
+ }
+ }
+ }
+
+ if (debug)
+ printHex(chwBuffer,chwBuffer.length);
+
+ /* Add conflict set */
+ index = address;
+ for (int i = 0; i < maxConflict; i++)
+ {
+ /* Get ste types */
+ Vector entry = (Vector) ConflictSsids.elementAt(i);
+ Enumeration e = entry.elements();
+ while (e.hasMoreElements())
+ {
+ String typeName = (String) e.nextElement();
+ printDebug (" GCB:: conflict Ssid "+ i+ ": has type : " + typeName);
+ position = ColorTypes.indexOf(typeName);
+
+ if (position < 0)
+ {
+ System.out.println (" GCB:: Error type : " + typeName + " not found in ColorTypes");
+ return null;
+ }
+ printDebug(" GCB:: type : " + typeName + " found in ColorTypes at position: " + position);
+ markPos = ((i * maxTypes + position) * u16Size) + index;
+
+ try {
+ writeShortToStream(chwBuffer,markSymbol,markPos);
+ } catch (IOException ee) {
+ System.out.println(" GCB:: got exception : ");
+ return null;
+ }
+ }
+
+ }
+ printDebug(" gSB:: chw binary ==> Length " + chwBuffer.length);
+ if (debug)
+ printHex(chwBuffer,chwBuffer.length);
+ printDebug("\n");
+
+ return chwBuffer;
+ }
+
+/**********************************************************************
+ Generate byte representation of policy using type information
+ <p>
+ @param Ssids Vector
+ @param ColorTypes Vector
+ <p>
+ @return bytes represenation of simple type enforcement policy
+**********************************************************************/
+ public byte[] generateSteBuffer(Vector Ssids, Vector ColorTypes)
+ {
+ byte[] steBuffer;
+ int index = 0;
+ int position = 0;
+
+ /* Get number of colorTypes */
+ short numColorTypes = (short) ColorTypes.size();
+
+ /* Get number of SSids entry */
+ short numSsids = (short) Ssids.size();
+
+ if (numColorTypes * numSsids == 0)
+ return null;
+
+ /* data structure: acm_ste_policy_buffer_t
+ *
+ * policy code (uint16) >
+ * max_types (uint16) >
+ * max_ssidrefs (uint16) > steHeaderSize
+ * ssid_offset (uint16) >
+ * DATA (colorTypes(size) * Ssids(size) *unit16)
+ *
+ * total bytes: steHeaderSize * 2B + colorTypes(size) * Ssids(size)
+ *
+ */
+ steBuffer = new byte[ steHeaderSize + (numColorTypes * numSsids) * 2];
+
+ try {
+
+ index = 0;
+ writeShortToStream(steBuffer,ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,index);
+ index = u16Size;
+
+ writeShortToStream(steBuffer,numColorTypes,index);
+ index = index + u16Size;
+
+ writeShortToStream(steBuffer,numSsids,index);
+ index = index + u16Size;
+
+ writeShortToStream(steBuffer,(short)steHeaderSize,index);
+ index = index + u16Size;
+
+ } catch (IOException ee) {
+ System.out.println(" gSB:: got exception : " + ee);
+ return null;
+ }
+ int markPos = 0;
+ for (int i = 0; i < numSsids; i++)
+ {
+
+ SecurityLabel ssidEntry = (SecurityLabel) Ssids.elementAt(i);
+ ssidEntry.steSsidPosition = i;
+ /* Get ste types */
+ Enumeration e = ssidEntry.steTypes.elements();
+ while (e.hasMoreElements())
+ {
+ String typeName = (String) e.nextElement();
+ printDebug (" gSB:: Ssid "+ i+ ": has type : " + typeName);
+ position = ColorTypes.indexOf(typeName);
+
+ if (position < 0)
+ {
+ printDebug(" gSB:: Error type : " + typeName + " not found in ColorTypes");
+ return null;
+ }
+ printDebug(" gSB:: type : " + typeName + " found in ColorTypes at position: " + position);
+ markPos = ((i * numColorTypes + position) * u16Size) + index;
+
+ try {
+ writeShortToStream(steBuffer,markSymbol,markPos);
+ } catch (IOException ee)
+ {
+ System.out.println(" gSB:: got exception : ");
+ return null;
+ }
+ }
+
+ }
+
+ printDebug(" gSB:: ste binary ==> Length " + steBuffer.length);
+ if (debug)
+ printHex(steBuffer,steBuffer.length);
+ printDebug("\n");
+
+ return steBuffer;
+ }
+
+ public static void printHex(byte [] dataArray, int length)
+ {
+ char[] hexChars = {'0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
+ int hexIndex;
+ int value;
+ int arraylength;
+
+ arraylength = length;
+
+ if (dataArray == null)
+ {
+ System.err.print("printHex: input byte array is null");
+ }
+
+ if (length > dataArray.length || length < 0)
+ arraylength = dataArray.length;
+
+ System.out.print("\n\t");
+
+ int i;
+ for(i = 0; i < arraylength; )
+ {
+ value = dataArray[i] & 0xFF;
+ hexIndex = (value >>> 4);
+ System.out.print(hexChars[hexIndex]);
+ hexIndex = (value & 0x0F);
+ System.out.print(hexChars[hexIndex]);
+
+ i++;
+ /* if done, print a final newline */
+ if (i == arraylength) {
+ if (arraylength < dataArray.length) {
+ System.out.print("...");
+ }
+ System.out.println();
+ }
+ else if ((i % 24) == 0) {
+ System.out.print("\n\t");
+ }
+ else if ((i % 4) == 0) {
+ System.out.print(" ");
+ }
+ }
+
+ return;
+ }
+
+
+ private void writeShortToStream(byte[] stream, short value, int index)
+ throws IOException
+ {
+ int littleEndian = 0;
+ int byteVal;
+
+ if (index + 2 > stream.length)
+ {
+ throw new IOException("Writing beyond stream length: " +
+ stream.length + " writing at locations from: " + index + " to " + (index + 4));
+ }
+
+ if (!LittleEndian)
+ {
+
+ byteVal = value >> 8;
+ stream[index ] = (byte) byteVal;
+
+ byteVal = value;
+ stream[index + 1] = (byte) byteVal;
+ } else {
+ stream[index] = (byte) ((value & 0x00ff) );
+ stream[index + 1] = (byte) ((value & 0xff00) >> 8);
+ }
+ return;
+ }
+
+ private void writeIntToStream(byte[] stream, int value, int index)
+ throws IOException
+ {
+ int littleEndian = 0;
+ int byteVal;
+
+ if (4 > stream.length)
+ {
+ throw new IOException("writeIntToStream: stream length less than 4 bytes " +
+ stream.length);
+ }
+
+ /* Do not Write beyond range */
+ if (index + 4 > stream.length)
+ {
+ throw new IOException("writeIntToStream: writing beyond stream length: " +
+ stream.length + " writing at locations from: " + index + " to " + (index + 4));
+ }
+ if (!LittleEndian)
+ {
+ byteVal = value >>> 24;
+ stream[index] = (byte) byteVal;
+
+ byteVal = value >> 16;
+ stream[index + 1] = (byte) byteVal;
+
+ byteVal = value >> 8;
+ stream[index + 2] = (byte) byteVal;
+
+ byteVal = value;
+ stream[index + 3] = (byte) byteVal;
+ } else {
+ stream[index] = (byte) value;
+ stream[index + 1] = (byte) ((value & 0x0000ff00) >> 8);
+ stream[index + 2] = (byte) ((value & 0x00ff0000) >> 16);
+ stream[index + 3] = (byte) ( value >>> 24);
+ }
+ return;
+ }
+
+ public Document getDomTree(String xmlFileName)
+ throws Exception, SAXException, ParserConfigurationException
+ {
+ javax.xml.parsers.DocumentBuilderFactory dbf =
+ javax.xml.parsers.DocumentBuilderFactory.newInstance();
+
+ /* Turn on namespace aware and validation */
+ dbf.setNamespaceAware(true);
+ dbf.setValidating(true);
+ dbf.setAttribute(JAXP_SCHEMA_LANGUAGE,W3C_XML_SCHEMA);
+
+ /* Checks that the document is well-formed */
+ javax.xml.parsers.DocumentBuilder db = dbf.newDocumentBuilder();
+
+ myHandler errHandler= new myHandler();
+ db.setErrorHandler(errHandler);
+ Document doc = db.parse(xmlFileName);
+
+ /* Checks for validation errors */
+ if (errHandler.isValid)
+ printDebug(" gDT:: Xml file: " + xmlFileName + " is valid");
+ else
+ throw new Exception("Xml file: " + xmlFileName + " is NOT valid");
+
+ return doc;
+ }
+
+ public void processDomTree(
+ Document doc,
+ Vector bagOfSsids,
+ Vector bagOfTypes,
+ Vector bagOfChwSsids,
+ Vector bagOfChwTypes,
+ Vector bagOfConflictSsids)
+ throws Exception, SAXException, ParserConfigurationException
+ {
+ boolean found;
+
+ /* print the root Element */
+ Element root = doc.getDocumentElement();
+ printDebug ("\n pDT:: Document Element: Name = " + root.getNodeName() + ",Value = " + root.getNodeValue());
+
+ /* Go through the list of the root Element's Attributes */
+ NamedNodeMap nnm = root.getAttributes();
+ printDebug (" pDT:: # of Attributes: " + nnm.getLength());
+ for (int i = 0; i < nnm.getLength(); i++)
+ {
+ Node n = nnm.item (i);
+ printDebug (" pDT:: Attribute: Name = " + n.getNodeName() + ", Value = "
+ + n.getNodeValue());
+ }
+
+ /* Retrieve the policy definition */
+ NodeList elementList = root.getElementsByTagName ("url");
+ String definitionFileName = elementList.item(0).getFirstChild().getNodeValue();
+
+ String definitionHash = null;
+
+ /* Note that SecurityPolicySpec.xsd allows for 0 hash value! */
+ elementList = root.getElementsByTagName ("hash");
+ if (0 != elementList.getLength())
+ definitionHash = elementList.item(0).getFirstChild().getNodeValue();
+
+ Document definitionDoc = pGetDomDefinition(definitionFileName,definitionHash);
+ pGetTypes(definitionDoc,bagOfTypes, bagOfChwTypes, bagOfConflictSsids);
+
+
+ /* Get VM security information */
+ elementList = root.getElementsByTagName ("VM");
+ printDebug ("\n pDT:: partition length of NodeList:" + elementList.getLength());
+
+
+ for (int x = 0; x < elementList.getLength(); x++)
+ {
+ found = false;
+
+ Node node = elementList.item (x);
+
+ if (node.getNodeType() == Node.ELEMENT_NODE)
+ {
+ printDebug (" pDT:: child: " + x + " is an element node" );
+ Element e1 = (Element) node;
+
+ /* Get id */
+ NodeList elist = e1.getElementsByTagName ("id");
+ String idStr = elist.item(0).getFirstChild().getNodeValue();
+ printDebug (" pDT:: id:" + idStr);
+
+ /* Get TE */
+ Vector colorTypes = new Vector();
+ pConflictEntries(e1, "TE", bagOfTypes, colorTypes);
+
+ Enumeration e = bagOfSsids.elements();
+ while (e.hasMoreElements())
+ {
+ SecurityLabel elem = (SecurityLabel) e.nextElement();
+ if ( elem.steTypes.size() == colorTypes.size() && elem.steTypes.containsAll(colorTypes))
+ {
+ found = true;
+ elem.ids.add(idStr);
+ }
+
+ }
+ if (!found && (0 < colorTypes.size()))
+ {
+ SecurityLabel entry = new SecurityLabel();
+ entry.steTypes = colorTypes;
+ entry.ids = new Vector();
+ entry.ids.add(idStr);
+ bagOfSsids.add(entry);
+ }
+
+ /* Get Chinese wall type */
+ Vector chwTypes = new Vector();
+ pConflictEntries(e1, "ChWall", bagOfChwTypes, chwTypes);
+
+ found = false;
+ e = bagOfChwSsids.elements();
+
+ while (e.hasMoreElements())
+ {
+ SecurityLabel elem = (SecurityLabel) e.nextElement();
+ if ( elem.chwTypes.size() == chwTypes.size() && elem.chwTypes.containsAll(chwTypes))
+ {
+ found = true;
+ elem.chwIDs.add(idStr);
+ }
+
+ }
+
+ if (!found && (0 < chwTypes.size()))
+ {
+ SecurityLabel entry = new SecurityLabel();
+ entry.chwTypes = chwTypes;
+ entry.chwIDs = new Vector();
+ entry.chwIDs.add(idStr);
+ bagOfChwSsids.add(entry);
+ }
+ }
+ }
+ return;
+ }
+
+ public Document pGetDomDefinition(
+ String definitionFileName,
+ String definitionHash)
+ throws Exception, SAXException, ParserConfigurationException
+ {
+ printDebug("\n pGDD:: definition file name: " + definitionFileName);
+ printDebug("\n pGDD:: definition file hash: " + definitionHash);
+
+ Document doc = getDomTree(definitionFileName);
+ return doc;
+ }
+
+ public void pGetTypes(
+ Document defDoc,
+ Vector bagOfTypes,
+ Vector bagOfChwTypes,
+ Vector bagOfConflictSsids)
+ throws Exception
+ {
+
+
+ if (null == defDoc)
+ throw new Exception(" pGT:: definition file DOM is null ");
+
+ Element root = defDoc.getDocumentElement();
+
+ /* Get list of TE types */
+ NodeList elementList = root.getElementsByTagName ("Types");
+ printDebug ("\n pGT:: Types length of NodeList:" + elementList.getLength());
+ Element e1 = (Element) elementList.item (0);
+ pGetEntries(e1,"TE",bagOfTypes);
+
+ /* Get list of Chinese types */
+ elementList = root.getElementsByTagName ("ChWallTypes");
+ printDebug ("\n pGT:: ChwTypes length of NodeList:" + elementList.getLength());
+ if (0 == elementList.getLength())
+ {
+ printDebug ("\n pGT:: ChWallTypes has zero length: :" + elementList.getLength());
+ } else {
+ e1 = (Element) elementList.item (0);
+ pGetEntries(e1,"ChWall",bagOfChwTypes);
+ }
+ printDebug (" pGT:: Total number of unique chw types: " + bagOfChwTypes.size());
+
+ /* Get Chinese type conflict sets */
+ elementList = root.getElementsByTagName ("ConflictSet");
+ printDebug ("\n pGT:: Conflict sets length of NodeList:" + elementList.getLength());
+ for (int x = 0; x < elementList.getLength(); x++)
+ {
+ Vector conflictEntry = new Vector();
+ e1 = (Element) elementList.item (x);
+ printDebug ("\n pGT:: Conflict sets : " + x);
+
+ pConflictEntries(e1, "ChWall", bagOfChwTypes, conflictEntry);
+
+ if (conflictEntry.size() > 0)
+ {
+ boolean found = false;
+ Enumeration e = bagOfConflictSsids.elements();
+
+ while (e.hasMoreElements())
+ {
+ Vector elem = (Vector) e.nextElement();
+ if (elem.size() == conflictEntry.size() && elem.containsAll(conflictEntry))
+ {
+ found = true;
+ }
+
+ }
+ if (!found)
+ {
+ bagOfConflictSsids.add(conflictEntry);
+ }
+ }
+ }
+
+ }
+
+ public void pGetEntries(Element doc, String tag, Vector typeBag)
+ throws Exception
+ {
+
+ if (null == doc)
+ throw new Exception(" pGE:: Element doc is null");
+
+ if (null == typeBag)
+ throw new Exception(" pGE:: typeBag is null");
+
+ NodeList elist = doc.getElementsByTagName (tag);
+ for (int j = 0; j < elist.getLength(); j++)
+ {
+ Node knode = elist.item (j);
+ Node childNode = knode.getFirstChild();
+ String value = childNode.getNodeValue();
+
+ printDebug (" pGT:: "+ tag +" type: " + value);
+
+ /* Check if value is known */
+ if (!typeBag.contains(value))
+ typeBag.addElement(value);
+ }
+ }
+
+ public void pConflictEntries(Element doc, String tag, Vector typeBag, Vector conflictEntry)
+ throws Exception
+ {
+
+ if (null == doc)
+ throw new Exception(" pGE:: Element doc is null");
+
+ if (null == typeBag)
+ throw new Exception(" pGE:: typeBag is null");
+
+ if (null == conflictEntry)
+ throw new Exception(" pGE:: typeBag is null");
+
+
+ NodeList elist = doc.getElementsByTagName (tag);
+
+ for (int j = 0; j < elist.getLength(); j++)
+ {
+ Node knode = elist.item (j);
+ Node childNode = knode.getFirstChild();
+ String value = childNode.getNodeValue();
+
+ printDebug (" pGE:: "+ tag +" type: " + value);
+
+ /* Check if value is known */
+ if (!typeBag.contains(value))
+ throw new Exception(" pCE:: found undefined type set " + value);
+
+ if (!conflictEntry.contains(value))
+ conflictEntry.addElement(value);
+
+ }
+ }
+
+ public void processDomTreeVlanSlot(
+ Document doc,
+ Vector bagOfSsids,
+ Vector bagOfTypes)
+ throws Exception
+ {
+ boolean found;
+
+ printDebug(" pDTVS::Size of bagOfSsids: "+ bagOfSsids.size());
+ Element root = doc.getDocumentElement();
+
+ NodeList elementList = root.getElementsByTagName ("Vlan");
+ printDebug("\n pDTVS:: Vlan length of NodeList:" + elementList.getLength());
+
+ for (int x = 0; x < elementList.getLength(); x++)
+ {
+ found = false;
+
+ Node node = elementList.item (x);
+
+ if (node.getNodeType() == Node.ELEMENT_NODE)
+ {
+ printDebug(" pDTVS:: child: " + x + " is an element node" );
+ Element e1 = (Element) node;
+
+ /* Get vid */
+ NodeList elist = e1.getElementsByTagName ("vid");
+ String idStr = elist.item(0).getFirstChild().getNodeValue();
+ printDebug ("pDTVS:: vid:" + idStr);
+
+ /* Get TE */
+ elist = e1.getElementsByTagName ("TE");
+ printDebug ("pDTVS:: Total ste types: " + elist.getLength());
+
+ Vector colorTypes = new Vector();
+ for (int j = 0; j < elist.getLength(); j++)
+ {
+ Node knode = elist.item (j);
+ Node childNode = knode.getFirstChild();
+ String value = childNode.getNodeValue();
+
+ printDebug (" pDT:: My color is: " + value);
+ if (!bagOfTypes.contains(value))
+ {
+ throw new IOException("pDT:: Vlan: " + idStr+ " has unknown type : "+ value);
+ }
+
+ if (!colorTypes.contains(value))
+ colorTypes.addElement(value);
+ }
+ Enumeration e = bagOfSsids.elements();
+ while (e.hasMoreElements())
+ {
+ SecurityLabel elem = (SecurityLabel) e.nextElement();
+ if ( elem.steTypes.size() == colorTypes.size() && elem.steTypes.containsAll(colorTypes))
+ {
+ found = true;
+ if (null == elem.vlans)
+ elem.vlans = new Vector();
+ elem.vlans.add(idStr);
+ }
+
+ }
+ if (!found && (0 < colorTypes.size()))
+ {
+ SecurityLabel entry = new SecurityLabel();
+ entry.steTypes = colorTypes;
+ entry.vlans = new Vector();
+ entry.vlans.add(idStr);
+ bagOfSsids.add(entry);
+ }
+
+ }
+ }
+ printDebug(" pDTVS::After slot Size of bagOfSsids: "+ bagOfSsids.size());
+
+ elementList = root.getElementsByTagName ("Slot");
+ printDebug ("\n pDTVS:: Slot length of NodeList:" + elementList.getLength());
+
+ for (int x = 0; x < elementList.getLength(); x++)
+ {
+ found = false;
+
+ Node node = elementList.item (x);
+
+ if (node.getNodeType() == Node.ELEMENT_NODE)
+ {
+ printDebug(" pDT:: child: " + x + " is an element node" );
+ Element e1 = (Element) node;
+
+
+ /* Get slot and bus */
+ SlotInfo item = new SlotInfo();
+
+ NodeList elist = e1.getElementsByTagName ("bus");
+ item.bus = elist.item(0).getFirstChild().getNodeValue();
+ elist = e1.getElementsByTagName ("slot");
+ item.slot = elist.item(0).getFirstChild().getNodeValue();
+ printDebug ("pDT:: bus and slot:" + item.bus + " "+ item.slot);
+
+ /* Get TE */
+ elist = e1.getElementsByTagName ("TE");
+ printDebug ("pDT:: Total ste types: " + elist.getLength());
+
+ Vector colorTypes = new Vector();
+ for (int j = 0; j < elist.getLength(); j++)
+ {
+ Node knode = elist.item (j);
+ Node childNode = knode.getFirstChild();
+ String value = childNode.getNodeValue();
+
+ printDebug ("pDT:: My color is: " + value);
+ if (!bagOfTypes.contains(value))
+ {
+ throw new IOException("pDT:: bus: " + item.bus + " slot: "+ item.slot + " has unknown type : "+ value);
+ }
+
+ if (!colorTypes.contains(value))
+ colorTypes.addElement(value);
+ }
+
+ Enumeration e = bagOfSsids.elements();
+ while (e.hasMoreElements())
+ {
+ SecurityLabel elem = (SecurityLabel) e.nextElement();
+ if ( elem.steTypes.size() == colorTypes.size() && elem.steTypes.containsAll(colorTypes))
+ {
+ found = true;
+ if (null == elem.slots)
+ elem.slots = new Vector();
+ elem.slots.add(item);
+
+ }
+
+ }
+
+ if (!found && (0 < colorTypes.size()))
+ {
+ SecurityLabel entry = new SecurityLabel();
+ entry.steTypes = colorTypes;
+ entry.slots = new Vector();
+ entry.slots.add(item);
+ bagOfSsids.add(entry);
+ }
+
+ }
+ }
+ return;
+ }
+
+ public static void main (String[] args)
+ {
+ String xmlFileName = null; /* policy file */
+ String outputFileName = null; /* binary policy file */
+ String xenSsidOutputFileName = null; /* outputfile ssid to named types */
+ /* outputfile conflicts ssid to named types */
+ String xenSsidConfOutputFileName = null;
+
+ XmlToBin genObj = new XmlToBin();
+
+
+ for (int i = 0 ; i < args.length ; i++) {
+
+ if ( args[i].equals("-help")) {
+ printUsage();
+ System.exit(1);
+
+ } else if ( args[i].equals("-i")) {
+ i++;
+ if (i < args.length) {
+ xmlFileName = args[i];
+ } else {
+ System.out.println("-i argument needs parameter");
+ System.exit(1);
+ }
+
+ } else if ( args[i].equals("-o")) {
+ i++;
+ if (i < args.length) {
+ outputFileName = args[i];
+ } else {
+ System.out.println("-o argument needs parameter");
+ System.exit(1);
+ }
+
+ } else if ( args[i].equals("-xssid")) {
+ i++;
+ if (i < args.length) {
+ xenSsidOutputFileName = args[i];
+ } else {
+ System.out.println("-xssid argument needs parameter");
+ System.exit(1);
+ }
+
+ } else if ( args[i].equals("-xssidconf")) {
+ i++;
+ if (i < args.length) {
+ xenSsidConfOutputFileName = args[i];
+ } else {
+ System.out.println("-xssidconf argument needs parameter");
+ System.exit(1);
+ }
+ } else if ( args[i].equals("-debug")) { /* turn on debug msg */
+ genObj.setDebug(true);
+ } else {
+ System.out.println("bad command line argument: " + args[i]);
+ printUsage();
+ System.exit(1);
+ }
+
+ }
+
+ if (xmlFileName == null)
+ {
+ System.out.println("Need to specify input file -i option");
+ printUsage();
+ System.exit(1);
+ }
+
+
+ try
+ {
+ /* Parse and validate */
+ Document doc = genObj.getDomTree(xmlFileName);
+
+ /* Vectors to hold sets of types */
+ Vector bagOfSsids = new Vector();
+ Vector bagOfTypes = new Vector();
+ Vector bagOfChwSsids = new Vector();
+ Vector bagOfChwTypes = new Vector();
+ Vector bagOfConflictSsids = new Vector();
+
+ Vector vlanMapSsids = new Vector();
+ Vector slotMapSsids = new Vector();
+
+ genObj.processDomTree(doc, bagOfSsids, bagOfTypes, bagOfChwSsids, bagOfChwTypes, bagOfConflictSsids);
+
+ genObj.processDomTreeVlanSlot(doc, bagOfSsids, bagOfTypes);
+
+ /* Get binary representation of policies */
+ byte[] stePolicy = genObj.generateSteBuffer(bagOfSsids, bagOfTypes);
+ byte[] chwPolicy = genObj.generateChwBuffer(bagOfChwSsids, bagOfConflictSsids,bagOfChwTypes);
+
+ byte[] binPolicy = null;
+ byte[] binaryPartionSsid = null;
+ byte[] binaryVlanSsid = null;
+ byte[] binarySlotSsid = null;
+
+ /* Get binary representation of partition to ssid mapping */
+ binaryPartionSsid = genObj.generatePartSsids(bagOfSsids,bagOfChwSsids);
+
+ /* Get binary representation of vlan to ssid mapping */
+ binaryVlanSsid = genObj.generateVlanSsids(bagOfSsids);
+
+ /* Get binary representation of slot to ssid mapping */
+ binarySlotSsid = genObj.generateSlotSsids(bagOfSsids);
+
+ /* Generate binary representation: policy, partition, slot and vlan */
+ binPolicy = genObj.GenBinaryPolicyBuffer(chwPolicy,stePolicy, binaryPartionSsid, binaryVlanSsid, binarySlotSsid);
+
+
+ /* Write binary policy into file */
+ if (null != outputFileName)
+ {
+ genObj.writeBinPolicy(binPolicy, outputFileName);
+ } else {
+ System.out.println (" No binary policy generated, outputFileName: " + outputFileName);
+ }
+
+ /* Print total number of types */
+ System.out.println (" Total number of unique ste types: " + bagOfTypes.size());
+ System.out.println (" Total number of Ssids : " + bagOfSsids.size());
+ System.out.println (" Total number of unique chw types: " + bagOfChwTypes.size());
+ System.out.println (" Total number of conflict ssids : " + bagOfConflictSsids.size());
+ System.out.println (" Total number of chw Ssids : " + bagOfChwSsids.size());
+
+ if (null != xenSsidOutputFileName)
+ genObj.writeXenTypeFile(bagOfSsids, xenSsidOutputFileName, true);
+
+ if (null != xenSsidConfOutputFileName)
+ genObj.writeXenTypeFile(bagOfChwSsids, xenSsidConfOutputFileName, false);
+ }
+ catch (Exception e)
+ {
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/tools/misc/policyprocessor/XmlToBinInterface.java b/tools/misc/policyprocessor/XmlToBinInterface.java
new file mode 100644
index 0000000000..ec63416519
--- /dev/null
+++ b/tools/misc/policyprocessor/XmlToBinInterface.java
@@ -0,0 +1,135 @@
+/**
+ * (C) Copyright IBM Corp. 2005
+ *
+ * $Id: XmlToBinInterface.java,v 1.2 2005/06/17 20:00:04 rvaldez Exp $
+ *
+ * Author: Ray Valdez
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * XmlToBinInterface Class.
+ * <p>
+ *
+ * Defines constants used by XmToBin.
+ *
+ * <p>
+ *
+ * policy binary structures
+ *
+ * typedef struct {
+ * u32 magic;
+ *
+ * u32 policyversion;
+ * u32 len;
+ *
+ * u16 primary_policy_code;
+ * u16 primary_buffer_offset;
+ * u16 secondary_policy_code;
+ * u16 secondary_buffer_offset;
+ * u16 resource_offset;
+ *
+ * } acm_policy_buffer_t;
+ *
+ * typedef struct {
+ * u16 policy_code;
+ * u16 ste_max_types;
+ * u16 ste_max_ssidrefs;
+ * u16 ste_ssid_offset;
+ * } acm_ste_policy_buffer_t;
+ *
+ * typedef struct {
+ * uint16 policy_code;
+ * uint16 chwall_max_types;
+ * uint16 chwall_max_ssidrefs;
+ * uint16 chwall_max_conflictsets;
+ * uint16 chwall_ssid_offset;
+ * uint16 chwall_conflict_sets_offset;
+ * uint16 chwall_running_types_offset;
+ * uint16 chwall_conflict_aggregate_offset;
+ * } acm_chwall_policy_buffer_t;
+ *
+ * typedef struct {
+ * u16 partition_max;
+ * u16 partition_offset;
+ * u16 vlan_max;
+ * u16 vlan_offset;
+ * u16 slot_max;
+ * u16 slot_offset;
+ * } acm_resource_buffer_t;
+ *
+ * typedef struct {
+ * u16 id;
+ * u16 ssid_ste;
+ * u16 ssid_chwall;
+ * } acm_partition_entry_t;
+ *
+ * typedef struct {
+ * u16 vlan;
+ * u16 ssid_ste;
+ * } acm_vlan_entry_t;
+ *
+ * typedef struct {
+ * u16 bus;
+ * u16 slot;
+ * u16 ssid_ste;
+ * } acm_slot_entry_t;
+ *
+ *
+ *
+ */
+public interface XmlToBinInterface
+{
+ /* policy code (uint16) */
+ final int policyCodeSize = 2;
+
+ /* max_types (uint16) */
+ final int maxTypesSize = 2;
+
+ /* max_ssidrefs (uint16) */
+ final int maxSsidrefSize = 2;
+
+ /* ssid_offset (uint32) */
+ final int ssidOffsetSize = 2;
+
+ final short markSymbol = 0x0001;
+
+ final int u32Size = 4;
+ final int u16Size = 2;
+
+ /* num of bytes for acm_ste_policy_buffer_t */
+ final short steHeaderSize = (4 * u16Size);
+ /* byte for acm_chinese_wall_policy_buffer_t */
+ final short chwHeaderSize = (8 * u16Size);
+
+ final short primaryPolicyCodeSize = u16Size;
+ final short primaryBufferOffsetSize = u16Size ;
+
+ final int secondaryPolicyCodeSz = u16Size;
+ final int secondaryBufferOffsetSz = u16Size;
+ final short resourceOffsetSz = u16Size;
+
+ final short partitionBufferSz = (2 * u16Size);
+ final short partitionEntrySz = (3 * u16Size);
+
+ final short slotBufferSz = (2 * u16Size);
+ final short slotEntrySz = (3 * u16Size);
+
+ final short vlanBufferSz = (2 * u16Size);
+ final short vlanEntrySz = (2 * u16Size);
+
+ final short binaryBufferHeaderSz = (3 * u32Size + 4* u16Size);
+
+ /* copied directlty from policy_ops.h */
+ final int POLICY_INTERFACE_VERSION = 0xAAAA0000;
+
+ /* copied directly from acm.h */
+ final int ACM_MAGIC = 0x0001debc;
+ final short ACM_NULL_POLICY = 0;
+ final short ACM_CHINESE_WALL_POLICY = 1;
+ final short ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY = 2;
+ final short ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY = 3;
+ final short ACM_EMPTY_POLICY = 4;
+}
diff --git a/tools/misc/policyprocessor/myHandler.java b/tools/misc/policyprocessor/myHandler.java
new file mode 100644
index 0000000000..b972c20605
--- /dev/null
+++ b/tools/misc/policyprocessor/myHandler.java
@@ -0,0 +1,47 @@
+/**
+ * (C) Copyright IBM Corp. 2005
+ *
+ * $Id: myHandler.java,v 1.2 2005/06/17 20:00:04 rvaldez Exp $
+ *
+ * Author: Ray Valdez
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * myHandler Class.
+ *
+ * <p>
+ *
+ * A dummy class used for detecting XML validating/parsing errors.
+ *
+ * <p>
+ *
+ *
+ */
+import org.xml.sax.helpers.*;
+import org.xml.sax.SAXParseException;
+
+class myHandler extends DefaultHandler
+{
+ public boolean isValid = true;
+
+ /* Notification of a recoverable error. */
+ public void error(SAXParseException se)
+ {
+ isValid = false;
+ }
+
+ /* Notification of a non-recoverable error. */
+ public void fatalError(SAXParseException se)
+ {
+ isValid = false;
+ }
+
+ /* Notification of a warning. */
+ public void warning(SAXParseException se)
+ {
+ isValid = false;
+ }
+}
diff --git a/tools/misc/policyprocessor/readme.install b/tools/misc/policyprocessor/readme.install
new file mode 100644
index 0000000000..058ab8212a
--- /dev/null
+++ b/tools/misc/policyprocessor/readme.install
@@ -0,0 +1,33 @@
+# Author: Ray Valdez, rvaldez@us.ibm.com
+# Version: 1.0
+#
+# install readme
+#
+PREREQUISITES:
+
+Prior to installation of the policy processor tool (XmlToBin) you must have...
+
+ 1. Java version 1.4.2
+ 2. xmlParserAPIs.jar and xercesImpl.jar
+
+The above can be obtained from the Sun Developer Network web site at
+http://java.sun.com/j2se/1.4.2/download.html.
+
+XmlParserAPIs and xercesImpl jars can be obtained from
+http://www.apache.org/dist/xml/xerces-j (Xerces-J-bin.2.6.2.tar.gz,
+for example).
+
+The tool has been tested with J2SE v1.4.2_08 JRE on Linux (32-bit
+INTEL).
+
+INSTALLATION
+
+1. Set PATH to include $HOME_JAVA/bin and $HOME_JAVA/jre/bin
+ where $HOME_JAVA is your java installation directory
+
+2. Compile XmlToBin:
+ javac XmlToBin.java
+
+USAGE
+
+ See readme.xen
diff --git a/tools/misc/policyprocessor/readme.xen b/tools/misc/policyprocessor/readme.xen
new file mode 100644
index 0000000000..e7f9fa4c9a
--- /dev/null
+++ b/tools/misc/policyprocessor/readme.xen
@@ -0,0 +1,65 @@
+# Author: Ray Valdez, rvaldez@us.ibm.com
+# Version: 1.0
+#
+# This readme describes the policy processor tool for sHype.
+#
+
+Java program:
+
+ java XmlToBin -i [file.xml] -o <file.bin> -xssid <SsidFile> -xssidconf <SsidConf>
+
+ Command line options:
+
+ -i inputFile: name of policyfile (.xml)
+ -o outputFile: name of binary policy file (Big Endian)
+ -xssid SsidFile: xen ssids to named types text file
+ -xssidconf SsidConf: xen conflict ssids to types text file
+ -debug turn on debug messages
+ -help help. This printout
+
+Where:
+
+file.xml is the (input) xml policy file to be parsed and validated.
+The syntax for file.xml is defined in the SecurityPolicySpec.xsd file.
+file.bin is the (output) binary policy file generated by XmlToBin.
+This binary policy can be activated in sHype. The binary policy file
+is laid out in network byte order (i.e., big endian). The SsidFile
+file contains the mapping of type enforcement (TE) ssids to the "named
+types". Similarly, the SsidConf file contains the mapping of Chinese
+Wall (ChWall) ssids to conflict named types. The ssidFile and SsidConf
+files are used by Xen.
+
+Xml Schema and policy:
+
+The SecurityPolicySpec.xsd defines the syntax of a policy file. It
+declares the tags that are used by XmlToBin to generate the binary
+policy file. The tags that XmlToBin keys on are TE, ChWall, id, vid,
+etc. The xml files that describe a policy are simple. Semantic
+checking of a policy is performed mostly by XmlToBin. A type, for
+example, is a string. No fixed values are defined for types in Xml.
+
+A policy consists of two Xml files: definition and policy. The
+definition Xml declares the types that are permitted in the policy
+Xml. The policy Xml contains the assignment of labels to
+subject/object (e.g., vm). This Xml file contains an explicit
+reference to the definition Xml (e.g., <url>xen_sample_def.xml</url>).
+The policy Xml is the one provided as a command line argument.
+
+
+Files:
+
+*.java - policy processor source
+xen_sample_policy.xml - sample xml policy file
+xen_sample_def.xml - sample user defined types
+SecurityPolicySpec.xsd - schema definition file
+
+
+To generate the sample binary policy:
+
+export CLASSPATH=$XERCES_HOME/xercesImpl.jar:$XERCES_HOME/xmlParserAPIs.jar:.
+
+java XmlToBin -i xen_sample_policy.xml -o xen_sample_policy.bin
+
+where $XERCES_HOME is the installation directory of the Apache Xerces-J
+
+
diff --git a/tools/misc/policyprocessor/xen_sample_def.xml b/tools/misc/policyprocessor/xen_sample_def.xml
new file mode 100644
index 0000000000..e64fbd068f
--- /dev/null
+++ b/tools/misc/policyprocessor/xen_sample_def.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!-- Author: Ray Valdez, rvaldez@us.ibm.com -->
+<!-- example policy type definition -->
+<SecurityPolicySpec
+xmlns="http://www.ibm.com"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://www.ibm.com SecurityPolicySpec.xsd">
+
+<Definition>
+<!-- an example of a simple type enforcement type definition -->
+ <Types>
+ <TE>LOCAL-management</TE>
+ <TE>R-Company-development</TE>
+ <TE>S-Company-order</TE>
+ <TE>T-Company-advertising</TE>
+ <TE>U-Company-computing</TE>
+ <!-- TE nondevelopment -->
+ </Types>
+
+<!-- an example of a chinese wall type definition along with conflict sets-->
+ <ChWallTypes>
+ <ChWall>Q-Company</ChWall>
+ <ChWall>R-Company</ChWall>
+ <ChWall>S-Company</ChWall>
+ <ChWall>T-Company</ChWall>
+ <ChWall>U-Company</ChWall>
+ <ChWall>V-Company</ChWall>
+ <ChWall>W-Company</ChWall>
+ <ChWall>X-Company</ChWall>
+ <ChWall>Y-Company</ChWall>
+ <ChWall>Z-Company</ChWall>
+ </ChWallTypes>
+
+ <ConflictSet>
+ <ChWall>T-Company</ChWall>
+ <ChWall>S-Company</ChWall>
+ </ConflictSet>
+
+ <ConflictSet>
+ <ChWall>Q-Company</ChWall>
+ <ChWall>V-Company</ChWall>
+ <ChWall>W-Company</ChWall>
+ </ConflictSet>
+
+</Definition>
+</SecurityPolicySpec>
diff --git a/tools/misc/policyprocessor/xen_sample_policy.xml b/tools/misc/policyprocessor/xen_sample_policy.xml
new file mode 100644
index 0000000000..cebb303c4f
--- /dev/null
+++ b/tools/misc/policyprocessor/xen_sample_policy.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0"?>
+<!-- Author: Ray Valdez, rvaldez@us.ibm.com -->
+<!-- example xen policy file -->
+
+<SecurityPolicySpec
+xmlns="http://www.ibm.com"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://www.ibm.com SecurityPolicySpec.xsd">
+<Policy>
+ <PolicyHeader>
+ <Name>xen sample policy</Name>
+ <DateTime>2005-05-20T16:56:00</DateTime>
+ <Tag>foobar</Tag>
+ <TypeDefinition>
+ <url>xen_sample_def.xml</url>
+ <hash>abcdef123456abcdef</hash>
+ </TypeDefinition>
+ </PolicyHeader>
+
+ <VM>
+ <id> 0 </id>
+ <TE>LOCAL-management</TE>
+ <TE>R-Company-development</TE>
+ <TE>S-Company-order</TE>
+ <TE>T-Company-advertising</TE>
+ <TE>U-Company-computing</TE>
+ <ChWall>Q-Company</ChWall>
+ </VM>
+
+ <VM>
+ <id> 1 </id>
+ <TE>R-Company-development</TE>
+ <ChWall>R-Company</ChWall>
+ </VM>
+
+ <VM>
+ <id> 2 </id>
+ <TE>S-Company-order</TE>
+ <ChWall>S-Company</ChWall>
+
+ </VM>
+
+ <VM>
+ <id> 3 </id>
+ <TE>T-Company-advertising</TE>
+ <ChWall>T-Company</ChWall>
+ </VM>
+
+
+ <VM>
+ <id> 4 </id>
+ <TE>U-Company-computing</TE>
+ <ChWall>U-Company</ChWall>
+ </VM>
+
+
+</Policy>
+</SecurityPolicySpec>
diff --git a/tools/policy/Makefile b/tools/policy/Makefile
new file mode 100644
index 0000000000..b8d67471ae
--- /dev/null
+++ b/tools/policy/Makefile
@@ -0,0 +1,36 @@
+XEN_ROOT = ../..
+include $(XEN_ROOT)/tools/Rules.mk
+
+SRCS = policy_tool.c
+CFLAGS += -static
+CFLAGS += -Wall
+CFLAGS += -Werror
+CFLAGS += -O3
+CFLAGS += -fno-strict-aliasing
+CFLAGS += -I.
+
+all: build
+build: mk-symlinks
+ $(MAKE) policy_tool
+
+default: all
+
+install: all
+
+policy_tool : policy_tool.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) -o $@ $<
+
+clean:
+ rm -rf policy_tool xen
+
+
+LINUX_ROOT := $(wildcard $(XEN_ROOT)/linux-2.6.*-xen-sparse)
+mk-symlinks:
+ [ -e xen/linux ] || mkdir -p xen/linux
+ [ -e xen/io ] || mkdir -p xen/io
+ ( cd xen >/dev/null ; \
+ ln -sf ../$(XEN_ROOT)/xen/include/public/*.h . )
+ ( cd xen/io >/dev/null ; \
+ ln -sf ../../$(XEN_ROOT)/xen/include/public/io/*.h . )
+ ( cd xen/linux >/dev/null ; \
+ ln -sf ../../$(LINUX_ROOT)/include/asm-xen/linux-public/*.h . )
diff --git a/tools/policy/policy_tool.c b/tools/policy/policy_tool.c
new file mode 100644
index 0000000000..696a70c282
--- /dev/null
+++ b/tools/policy/policy_tool.c
@@ -0,0 +1,557 @@
+/****************************************************************
+ * policy_tool.c
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Authors:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Stefan Berger <stefanb@watson.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * sHype policy management tool. This code runs in a domain and
+ * manages the Xen security policy by interacting with the
+ * Xen access control module via a /proc/xen/policycmd proc-ioctl,
+ * which is translated into a policy_op hypercall into Xen.
+ *
+ * todo: implement setpolicy to dynamically set a policy cache.
+ */
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <string.h>
+#include <stdint.h>
+#include <netinet/in.h>
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+
+#include <xen/acm.h>
+
+#include <xen/policy_ops.h>
+
+#include <xen/linux/privcmd.h>
+
+#define ERROR(_m, _a...) \
+ fprintf(stderr, "ERROR: " _m "\n" , ## _a )
+
+#define PERROR(_m, _a...) \
+ fprintf(stderr, "ERROR: " _m " (%d = %s)\n" , ## _a , \
+ errno, strerror(errno))
+
+static inline int do_policycmd(int xc_handle,
+ unsigned int cmd,
+ unsigned long data)
+{
+ return ioctl(xc_handle, cmd, data);
+}
+
+static inline int do_xen_hypercall(int xc_handle,
+ privcmd_hypercall_t *hypercall)
+{
+ return do_policycmd(xc_handle,
+ IOCTL_PRIVCMD_HYPERCALL,
+ (unsigned long)hypercall);
+}
+
+static inline int do_policy_op(int xc_handle, policy_op_t *op)
+{
+ int ret = -1;
+ privcmd_hypercall_t hypercall;
+
+ op->interface_version = POLICY_INTERFACE_VERSION;
+
+ hypercall.op = __HYPERVISOR_policy_op;
+ hypercall.arg[0] = (unsigned long)op;
+
+ if ( mlock(op, sizeof(*op)) != 0 )
+ {
+ PERROR("Could not lock memory for Xen policy hypercall");
+ goto out1;
+ }
+
+ if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
+ {
+ if ( errno == EACCES )
+ fprintf(stderr, "POLICY operation failed -- need to"
+ " rebuild the user-space tool set?\n");
+ goto out2;
+ }
+
+ out2: (void)munlock(op, sizeof(*op));
+ out1: return ret;
+}
+
+/*************************** DUMPS *******************************/
+
+void acm_dump_chinesewall_buffer(void *buf, int buflen) {
+
+ struct acm_chwall_policy_buffer *cwbuf = (struct acm_chwall_policy_buffer *)buf;
+ domaintype_t *ssids, *conflicts, *running_types, *conflict_aggregate;
+ int i,j;
+
+
+ if (htons(cwbuf->policy_code) != ACM_CHINESE_WALL_POLICY) {
+ printf("CHINESE WALL POLICY CODE not found ERROR!!\n");
+ return;
+ }
+ printf("\n\nChinese Wall policy:\n");
+ printf("====================\n");
+ printf("Max Types = %x.\n", ntohs(cwbuf->chwall_max_types));
+ printf("Max Ssidrefs = %x.\n", ntohs(cwbuf->chwall_max_ssidrefs));
+ printf("Max ConfSets = %x.\n", ntohs(cwbuf->chwall_max_conflictsets));
+ printf("Ssidrefs Off = %x.\n", ntohs(cwbuf->chwall_ssid_offset));
+ printf("Conflicts Off = %x.\n", ntohs(cwbuf->chwall_conflict_sets_offset));
+ printf("Runing T. Off = %x.\n", ntohs(cwbuf->chwall_running_types_offset));
+ printf("C. Agg. Off = %x.\n", ntohs(cwbuf->chwall_conflict_aggregate_offset));
+ printf("\nSSID To CHWALL-Type matrix:\n");
+
+ ssids = (domaintype_t *)(buf + ntohs(cwbuf->chwall_ssid_offset));
+ for(i=0; i< ntohs(cwbuf->chwall_max_ssidrefs); i++) {
+ printf("\n ssidref%2x: ", i);
+ for(j=0; j< ntohs(cwbuf->chwall_max_types); j++)
+ printf("%02x ", ntohs(ssids[i*ntohs(cwbuf->chwall_max_types) + j]));
+ }
+ printf("\n\nConfict Sets:\n");
+ conflicts = (domaintype_t *)(buf + ntohs(cwbuf->chwall_conflict_sets_offset));
+ for(i=0; i< ntohs(cwbuf->chwall_max_conflictsets); i++) {
+ printf("\n c-set%2x: ", i);
+ for(j=0; j< ntohs(cwbuf->chwall_max_types); j++)
+ printf("%02x ", ntohs(conflicts[i*ntohs(cwbuf->chwall_max_types) +j]));
+ }
+ printf("\n");
+
+ printf("\nRunning\nTypes: ");
+ if (ntohs(cwbuf->chwall_running_types_offset)) {
+ running_types = (domaintype_t *)(buf + ntohs(cwbuf->chwall_running_types_offset));
+ for(i=0; i< ntohs(cwbuf->chwall_max_types); i++) {
+ printf("%02x ", ntohs(running_types[i]));
+ }
+ printf("\n");
+ } else {
+ printf("Not Reported!\n");
+ }
+ printf("\nConflict\nAggregate Set: ");
+ if (ntohs(cwbuf->chwall_conflict_aggregate_offset)) {
+ conflict_aggregate = (domaintype_t *)(buf + ntohs(cwbuf->chwall_conflict_aggregate_offset));
+ for(i=0; i< ntohs(cwbuf->chwall_max_types); i++) {
+ printf("%02x ", ntohs(conflict_aggregate[i]));
+ }
+ printf("\n\n");
+ } else {
+ printf("Not Reported!\n");
+ }
+}
+
+void acm_dump_ste_buffer(void *buf, int buflen) {
+
+ struct acm_ste_policy_buffer *stebuf = (struct acm_ste_policy_buffer *)buf;
+ domaintype_t *ssids;
+ int i,j;
+
+
+ if (ntohs(stebuf->policy_code) != ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY) {
+ printf("SIMPLE TYPE ENFORCEMENT POLICY CODE not found ERROR!!\n");
+ return;
+ }
+ printf("\nSimple Type Enforcement policy:\n");
+ printf("===============================\n");
+ printf("Max Types = %x.\n", ntohs(stebuf->ste_max_types));
+ printf("Max Ssidrefs = %x.\n", ntohs(stebuf->ste_max_ssidrefs));
+ printf("Ssidrefs Off = %x.\n", ntohs(stebuf->ste_ssid_offset));
+ printf("\nSSID To STE-Type matrix:\n");
+
+ ssids = (domaintype_t *)(buf + ntohs(stebuf->ste_ssid_offset));
+ for(i=0; i< ntohs(stebuf->ste_max_ssidrefs); i++) {
+ printf("\n ssidref%2x: ", i);
+ for(j=0; j< ntohs(stebuf->ste_max_types); j++)
+ printf("%02x ", ntohs(ssids[i*ntohs(stebuf->ste_max_types) +j]));
+ }
+ printf("\n\n");
+}
+
+void acm_dump_policy_buffer(void *buf, int buflen) {
+ struct acm_policy_buffer *pol = (struct acm_policy_buffer *)buf;
+
+ printf("\nPolicy dump:\n");
+ printf("============\n");
+ printf("Magic = %x.\n", ntohl(pol->magic));
+ printf("PolVer = %x.\n", ntohl(pol->policyversion));
+ printf("Len = %x.\n", ntohl(pol->len));
+ printf("Primary = %s (c=%x, off=%x).\n",
+ ACM_POLICY_NAME(ntohs(pol->primary_policy_code)),
+ ntohs(pol->primary_policy_code), ntohs(pol->primary_buffer_offset));
+ printf("Secondary = %s (c=%x, off=%x).\n",
+ ACM_POLICY_NAME(ntohs(pol->secondary_policy_code)),
+ ntohs(pol->secondary_policy_code), ntohs(pol->secondary_buffer_offset));
+ switch (ntohs(pol->primary_policy_code)) {
+ case ACM_CHINESE_WALL_POLICY:
+ acm_dump_chinesewall_buffer(buf+ntohs(pol->primary_buffer_offset),
+ ntohl(pol->len) - ntohs(pol->primary_buffer_offset));
+ break;
+ case ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY:
+ acm_dump_ste_buffer(buf+ntohs(pol->primary_buffer_offset),
+ ntohl(pol->len) - ntohs(pol->primary_buffer_offset));
+ break;
+ case ACM_NULL_POLICY:
+ printf("Primary policy is NULL Policy (n/a).\n");
+ break;
+ default:
+ printf("UNKNOWN POLICY!\n");
+ }
+ switch (ntohs(pol->secondary_policy_code)) {
+ case ACM_CHINESE_WALL_POLICY:
+ acm_dump_chinesewall_buffer(buf+ntohs(pol->secondary_buffer_offset),
+ ntohl(pol->len) - ntohs(pol->secondary_buffer_offset));
+ break;
+ case ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY:
+ acm_dump_ste_buffer(buf+ntohs(pol->secondary_buffer_offset),
+ ntohl(pol->len) - ntohs(pol->secondary_buffer_offset));
+ break;
+ case ACM_NULL_POLICY:
+ printf("Secondary policy is NULL Policy (n/a).\n");
+ break;
+ default:
+ printf("UNKNOWN POLICY!\n");
+ }
+ printf("\nPolicy dump End.\n\n");
+}
+
+/*************************** set policy ****************************/
+
+int acm_domain_set_chwallpolicy(void *bufstart, int buflen) {
+#define CWALL_MAX_SSIDREFS 5
+#define CWALL_MAX_TYPES 10
+#define CWALL_MAX_CONFLICTSETS 2
+
+ struct acm_chwall_policy_buffer *chwall_bin_pol = (struct acm_chwall_policy_buffer *)bufstart;
+ domaintype_t *ssidrefs, *conflicts;
+ int ret = 0;
+ int i,j;
+
+ chwall_bin_pol->chwall_max_types = htons(CWALL_MAX_TYPES);
+ chwall_bin_pol->chwall_max_ssidrefs = htons(CWALL_MAX_SSIDREFS);
+ chwall_bin_pol->policy_code = htons(ACM_CHINESE_WALL_POLICY);
+ chwall_bin_pol->chwall_ssid_offset = htons(sizeof(struct acm_chwall_policy_buffer));
+ chwall_bin_pol->chwall_max_conflictsets = htons(CWALL_MAX_CONFLICTSETS);
+ chwall_bin_pol->chwall_conflict_sets_offset =
+ htons(
+ ntohs(chwall_bin_pol->chwall_ssid_offset) +
+ sizeof(domaintype_t)*CWALL_MAX_SSIDREFS*CWALL_MAX_TYPES);
+ chwall_bin_pol->chwall_running_types_offset = 0; /* not set */
+ chwall_bin_pol->chwall_conflict_aggregate_offset = 0; /* not set */
+ ret += sizeof(struct acm_chwall_policy_buffer);
+ /* now push example ssids into the buffer (max_ssidrefs x max_types entries) */
+ /* check buffer size */
+ if ((buflen - ret) < (CWALL_MAX_TYPES*CWALL_MAX_SSIDREFS*sizeof(domaintype_t)))
+ return -1; /* not enough space */
+
+ ssidrefs = (domaintype_t *)(bufstart+ntohs(chwall_bin_pol->chwall_ssid_offset));
+ for(i=0; i< CWALL_MAX_SSIDREFS; i++) {
+ for (j=0; j< CWALL_MAX_TYPES; j++)
+ ssidrefs[i*CWALL_MAX_TYPES + j] = htons(0);
+ /* here, set type i for ssidref i; generally, a ssidref can have multiple chwall types */
+ if (i < CWALL_MAX_SSIDREFS)
+ ssidrefs[i*CWALL_MAX_TYPES + i] = htons(1);
+ }
+ ret += CWALL_MAX_TYPES*CWALL_MAX_SSIDREFS*sizeof(domaintype_t);
+ if ((buflen - ret) < (CWALL_MAX_CONFLICTSETS*CWALL_MAX_TYPES*sizeof(domaintype_t)))
+ return -1; /* not enough space */
+
+ /* now the chinese wall policy conflict sets*/
+ conflicts = (domaintype_t *)(bufstart +
+ ntohs(chwall_bin_pol->chwall_conflict_sets_offset));
+ memset((void *)conflicts, 0, CWALL_MAX_CONFLICTSETS*CWALL_MAX_TYPES*sizeof(domaintype_t));
+ /* just 1 conflict set [0]={2,3}, [1]={0,5,6} */
+ if (CWALL_MAX_TYPES > 3) {
+ conflicts[2] = htons(1); conflicts[3] = htons(1); /* {2,3} */
+ conflicts[CWALL_MAX_TYPES] = htons(1); conflicts[CWALL_MAX_TYPES+5] = htons(1);
+ conflicts[CWALL_MAX_TYPES+6] = htons(1);/* {0,5,6} */
+ }
+ ret += sizeof(domaintype_t)*CWALL_MAX_CONFLICTSETS*CWALL_MAX_TYPES;
+ return ret;
+}
+
+int acm_domain_set_stepolicy(void *bufstart, int buflen) {
+#define STE_MAX_SSIDREFS 5
+#define STE_MAX_TYPES 5
+
+ struct acm_ste_policy_buffer *ste_bin_pol = (struct acm_ste_policy_buffer *)bufstart;
+ domaintype_t *ssidrefs;
+ int i,j, ret = 0;
+
+ ste_bin_pol->ste_max_types = htons(STE_MAX_TYPES);
+ ste_bin_pol->ste_max_ssidrefs = htons(STE_MAX_SSIDREFS);
+ ste_bin_pol->policy_code = htons(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY);
+ ste_bin_pol->ste_ssid_offset = htons(sizeof(struct acm_ste_policy_buffer));
+ ret += sizeof(struct acm_ste_policy_buffer);
+ /* check buffer size */
+ if ((buflen - ret) < (STE_MAX_TYPES*STE_MAX_SSIDREFS*sizeof(domaintype_t)))
+ return -1; /* not enough space */
+
+ ssidrefs = (domaintype_t *)(bufstart+ntohs(ste_bin_pol->ste_ssid_offset));
+ for(i=0; i< STE_MAX_SSIDREFS; i++) {
+ for (j=0; j< STE_MAX_TYPES; j++)
+ ssidrefs[i*STE_MAX_TYPES + j] = htons(0);
+ /* set type i in ssidref 0 and ssidref i */
+ ssidrefs[i] = htons(1); /* ssidref 0 has all types set */
+ if (i < STE_MAX_SSIDREFS)
+ ssidrefs[i*STE_MAX_TYPES + i] = htons(1);
+ }
+ ret += STE_MAX_TYPES*STE_MAX_SSIDREFS*sizeof(domaintype_t);
+ return ret;
+}
+
+#define MAX_PUSH_BUFFER 16384
+u8 push_buffer[MAX_PUSH_BUFFER];
+
+int acm_domain_setpolicy(int xc_handle)
+{
+ int ret;
+ struct acm_policy_buffer *bin_pol;
+ policy_op_t op;
+
+ /* future: read policy from file and set it */
+ bin_pol = (struct acm_policy_buffer *)push_buffer;
+ bin_pol->magic = htonl(ACM_MAGIC);
+ bin_pol->policyversion = htonl(POLICY_INTERFACE_VERSION);
+ bin_pol->primary_policy_code = htons(ACM_CHINESE_WALL_POLICY);
+ bin_pol->secondary_policy_code = htons(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY);
+
+ bin_pol->len = htonl(sizeof(struct acm_policy_buffer));
+ bin_pol->primary_buffer_offset = htons(ntohl(bin_pol->len));
+ ret = acm_domain_set_chwallpolicy(push_buffer + ntohs(bin_pol->primary_buffer_offset),
+ MAX_PUSH_BUFFER - ntohs(bin_pol->primary_buffer_offset));
+ if (ret < 0) {
+ printf("ERROR creating chwallpolicy buffer.\n");
+ return -1;
+ }
+ bin_pol->len = htonl(ntohl(bin_pol->len) + ret);
+ bin_pol->secondary_buffer_offset = htons(ntohl(bin_pol->len));
+ ret = acm_domain_set_stepolicy(push_buffer + ntohs(bin_pol->secondary_buffer_offset),
+ MAX_PUSH_BUFFER - ntohs(bin_pol->secondary_buffer_offset));
+ if (ret < 0) {
+ printf("ERROR creating chwallpolicy buffer.\n");
+ return -1;
+ }
+ bin_pol->len = htonl(ntohl(bin_pol->len) + ret);
+
+ /* dump it and then push it down into xen/acm */
+ acm_dump_policy_buffer(push_buffer, ntohl(bin_pol->len));
+ op.cmd = POLICY_SETPOLICY;
+ op.u.setpolicy.pushcache = (void *)push_buffer;
+ op.u.setpolicy.pushcache_size = ntohl(bin_pol->len);
+ op.u.setpolicy.policy_type = ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY;
+ ret = do_policy_op(xc_handle, &op);
+
+ if (ret)
+ printf("ERROR setting policy. Use 'xm dmesg' to see details.\n");
+ else
+ printf("Successfully changed policy.\n");
+ return ret;
+}
+
+/******************************* get policy ******************************/
+
+#define PULL_CACHE_SIZE 8192
+u8 pull_buffer[PULL_CACHE_SIZE];
+int acm_domain_getpolicy(int xc_handle)
+{
+ policy_op_t op;
+ int ret;
+
+ memset(pull_buffer, 0x00, sizeof(pull_buffer));
+ op.cmd = POLICY_GETPOLICY;
+ op.u.getpolicy.pullcache = (void *)pull_buffer;
+ op.u.getpolicy.pullcache_size = sizeof(pull_buffer);
+ ret = do_policy_op(xc_handle, &op);
+ /* dump policy */
+ acm_dump_policy_buffer(pull_buffer, sizeof(pull_buffer));
+ return ret;
+}
+
+/************************ load binary policy ******************************/
+
+int acm_domain_loadpolicy(int xc_handle,
+ const char *filename)
+{
+ struct stat mystat;
+ int ret, fd;
+ off_t len;
+ u8 *buffer;
+
+ if ((ret = stat(filename, &mystat))) {
+ printf("File %s not found.\n",filename);
+ goto out;
+ }
+
+ len = mystat.st_size;
+ if ((buffer = malloc(len)) == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if ((fd = open(filename, O_RDONLY)) <= 0) {
+ ret = -ENOENT;
+ printf("File %s not found.\n",filename);
+ goto free_out;
+ }
+ if (len == read(fd, buffer, len)) {
+ policy_op_t op;
+ /* dump it and then push it down into xen/acm */
+ acm_dump_policy_buffer(buffer, len);
+ op.cmd = POLICY_SETPOLICY;
+ op.u.setpolicy.pushcache = (void *)buffer;
+ op.u.setpolicy.pushcache_size = len;
+ op.u.setpolicy.policy_type = ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY;
+ ret = do_policy_op(xc_handle, &op);
+
+ if (ret)
+ printf("ERROR setting policy. Use 'xm dmesg' to see details.\n");
+ else
+ printf("Successfully changed policy.\n");
+
+ } else {
+ ret = -1;
+ }
+ close(fd);
+ free_out:
+ free(buffer);
+ out:
+ return ret;
+}
+
+/************************ dump hook statistics ******************************/
+void
+dump_ste_stats(struct acm_ste_stats_buffer *ste_stats)
+{
+ printf("STE-Policy Security Hook Statistics:\n");
+ printf("ste: event_channel eval_count = %d\n", ntohl(ste_stats->ec_eval_count));
+ printf("ste: event_channel denied_count = %d\n", ntohl(ste_stats->ec_denied_count));
+ printf("ste: event_channel cache_hit_count = %d\n", ntohl(ste_stats->ec_cachehit_count));
+ printf("ste:\n");
+ printf("ste: grant_table eval_count = %d\n", ntohl(ste_stats->gt_eval_count));
+ printf("ste: grant_table denied_count = %d\n", ntohl(ste_stats->gt_denied_count));
+ printf("ste: grant_table cache_hit_count = %d\n", ntohl(ste_stats->gt_cachehit_count));
+}
+
+#define PULL_STATS_SIZE 8192
+int acm_domain_dumpstats(int xc_handle)
+{
+ u8 stats_buffer[PULL_STATS_SIZE];
+ policy_op_t op;
+ int ret;
+ struct acm_stats_buffer *stats;
+
+ memset(stats_buffer, 0x00, sizeof(stats_buffer));
+ op.cmd = POLICY_DUMPSTATS;
+ op.u.dumpstats.pullcache = (void *)stats_buffer;
+ op.u.dumpstats.pullcache_size = sizeof(stats_buffer);
+ ret = do_policy_op(xc_handle, &op);
+
+ if (ret < 0) {
+ printf("ERROR dumping policy stats. Use 'xm dmesg' to see details.\n");
+ return ret;
+ }
+ stats = (struct acm_stats_buffer *)stats_buffer;
+
+ printf("\nPolicy dump:\n");
+ printf("============\n");
+ printf("Magic = %x.\n", ntohl(stats->magic));
+ printf("PolVer = %x.\n", ntohl(stats->policyversion));
+ printf("Len = %x.\n", ntohl(stats->len));
+
+ switch(ntohs(stats->primary_policy_code)) {
+ case ACM_NULL_POLICY:
+ printf("NULL Policy: No statistics apply.\n");
+ break;
+ case ACM_CHINESE_WALL_POLICY:
+ printf("Chinese Wall Policy: No statistics apply.\n");
+ break;
+ case ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY:
+ dump_ste_stats((struct acm_ste_stats_buffer *)(stats_buffer + ntohs(stats->primary_stats_offset)));
+ break;
+ default:
+ printf("UNKNOWN PRIMARY POLICY ERROR!\n");
+ }
+ switch(ntohs(stats->secondary_policy_code)) {
+ case ACM_NULL_POLICY:
+ printf("NULL Policy: No statistics apply.\n");
+ break;
+ case ACM_CHINESE_WALL_POLICY:
+ printf("Chinese Wall Policy: No statistics apply.\n");
+ break;
+ case ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY:
+ dump_ste_stats((struct acm_ste_stats_buffer *)(stats_buffer + ntohs(stats->secondary_stats_offset)));
+ break;
+ default:
+ printf("UNKNOWN SECONDARY POLICY ERROR!\n");
+ }
+ return ret;
+}
+
+/***************************** main **************************************/
+
+void
+usage(char *progname){
+ printf("Use: %s \n"
+ "\t setpolicy\n"
+ "\t getpolicy\n"
+ "\t dumpstats\n"
+ "\t loadpolicy <binary policy file>\n", progname);
+ exit(-1);
+}
+
+int
+main(int argc, char **argv) {
+
+ int policycmd_fd;
+
+ if (argc < 2)
+ usage(argv[0]);
+
+ if ((policycmd_fd = open("/proc/xen/privcmd", O_RDONLY)) <= 0) {
+ printf("ERROR: Could not open xen policycmd device!\n");
+ exit(-1);
+ }
+
+ if (!strcmp(argv[1], "setpolicy")) {
+ if (argc != 2)
+ usage(argv[0]);
+ acm_domain_setpolicy(policycmd_fd);
+
+ } else if (!strcmp(argv[1], "getpolicy")) {
+ if (argc != 2)
+ usage(argv[0]);
+ acm_domain_getpolicy(policycmd_fd);
+
+ } else if (!strcmp(argv[1], "loadpolicy")) {
+ if (argc != 3)
+ usage(argv[0]);
+ acm_domain_loadpolicy(policycmd_fd, argv[2]);
+
+ } else if (!strcmp(argv[1], "dumpstats")) {
+ if (argc != 2)
+ usage(argv[0]);
+ acm_domain_dumpstats(policycmd_fd);
+
+ } else
+ usage(argv[0]);
+
+ close(policycmd_fd);
+ return 0;
+}
diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c
index 13d60be08e..81721d961e 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -78,13 +78,14 @@ static PyObject *pyxc_domain_create(PyObject *self,
u32 dom = 0;
int ret;
+ u32 ssidref = 0xFFFFFFFF;
- static char *kwd_list[] = { "dom", NULL };
+ static char *kwd_list[] = { "dom", "ssidref", NULL };
- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|i", kwd_list, &dom))
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list, &dom, &ssidref))
return NULL;
- if ( (ret = xc_domain_create(xc->xc_handle, &dom)) < 0 )
+ if ( (ret = xc_domain_create(xc->xc_handle, ssidref, &dom)) < 0 )
return PyErr_SetFromErrno(xc_error);
return PyInt_FromLong(dom);
@@ -230,7 +231,7 @@ static PyObject *pyxc_domain_getinfo(PyObject *self,
}
info_dict = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i"
- ",s:l,s:L,s:l,s:i}",
+ ",s:l,s:L,s:l,s:i,s:i}",
"dom", info[i].domid,
"vcpus", info[i].vcpus,
"dying", info[i].dying,
@@ -242,6 +243,7 @@ static PyObject *pyxc_domain_getinfo(PyObject *self,
"mem_kb", info[i].nr_pages*4,
"cpu_time", info[i].cpu_time,
"maxmem_kb", info[i].max_memkb,
+ "ssidref", info[i].ssidref,
"shutdown_reason", info[i].shutdown_reason);
PyDict_SetItemString( info_dict, "vcpu_to_cpu", vcpu_list );
PyDict_SetItemString( info_dict, "cpumap", cpumap_list );
diff --git a/tools/python/xen/lowlevel/xs/xs.c b/tools/python/xen/lowlevel/xs/xs.c
index 1b86a03f65..726b83e8e1 100644
--- a/tools/python/xen/lowlevel/xs/xs.c
+++ b/tools/python/xen/lowlevel/xs/xs.c
@@ -1,7 +1,7 @@
/*
- Python interface to the Xen Store Daemon.
- Copyright (C) 2005 Mike Wray Hewlett-Packard
-*/
+ * Python interface to the Xen Store Daemon.
+ * Copyright (C) 2005 Mike Wray Hewlett-Packard
+ */
#include <Python.h>
@@ -196,6 +196,7 @@ static PyObject *xspy_mkdir(PyObject *self, PyObject *args, PyObject *kwds)
#define xspy_rm_doc "\n" \
"Remove a path.\n" \
" path [string] : path to remove\n" \
+ "\n" \
"Returns: [int] 0 on success.\n" \
"Raises RuntimeError on error.\n" \
"\n"
@@ -339,13 +340,14 @@ static PyObject *xspy_set_permissions(PyObject *self, PyObject *args,
return val;
}
-#define xspy_watch_doc "\n" \
- "Watch a path, get notifications when it changes.\n" \
- " path [string] : xenstore path.\n" \
- " token [string] : returned in watch notification\n" \
- "\n" \
- "Returns: [int] 0 on success.\n" \
- "Raises RuntimeError on error.\n" \
+#define xspy_watch_doc "\n" \
+ "Watch a path, get notifications when it changes.\n" \
+ " path [string] : xenstore path.\n" \
+ " priority [int] : watch priority (default 0).\n" \
+ " token [string] : returned in watch notification.\n" \
+ "\n" \
+ "Returns: [int] 0 on success.\n" \
+ "Raises RuntimeError on error.\n" \
"\n"
static PyObject *xspy_watch(PyObject *self, PyObject *args, PyObject *kwds)
@@ -371,12 +373,14 @@ static PyObject *xspy_watch(PyObject *self, PyObject *args, PyObject *kwds)
return val;
}
-#define xspy_read_watch_doc "\n" \
- "Read a watch notification.\n" \
- " path [string]: xenstore path.\n" \
- "\n" \
- "Returns: [tuple] (path, token).\n" \
- "Raises RuntimeError on error.\n" \
+#define xspy_read_watch_doc "\n" \
+ "Read a watch notification.\n" \
+ "The notification must be acknowledged by passing\n" \
+ "the token to acknowledge_watch().\n" \
+ " path [string]: xenstore path.\n" \
+ "\n" \
+ "Returns: [tuple] (path, token).\n" \
+ "Raises RuntimeError on error.\n" \
"\n"
static PyObject *xspy_read_watch(PyObject *self, PyObject *args,
@@ -408,7 +412,7 @@ static PyObject *xspy_read_watch(PyObject *self, PyObject *args,
#define xspy_acknowledge_watch_doc "\n" \
"Acknowledge a watch notification that has been read.\n" \
- " token [string] : returned in watch notification\n" \
+ " token [string] : from the watch notification\n" \
"\n" \
"Returns: [int] 0 on success.\n" \
"Raises RuntimeError on error.\n" \
@@ -499,7 +503,7 @@ static PyObject *xspy_transaction_start(PyObject *self, PyObject *args,
#define xspy_transaction_end_doc "\n" \
"End the current transaction.\n" \
"Attempts to commit the transaction unless abort is true.\n" \
- " abort [int]: Abort flag..\n" \
+ " abort [int]: abort flag (default 0).\n" \
"\n" \
"Returns: [int] 0 on success.\n" \
"Raises RuntimeError on error.\n" \
@@ -556,10 +560,7 @@ static PyObject *xspy_introduce_domain(PyObject *self, PyObject *args,
if (!PyArg_ParseTupleAndKeywords(args, kwds, arg_spec, kwd_spec,
&dom, &page, &port, &path))
goto exit;
- printf("%s> dom=%u page=0x%08lx port=%u path=%s\n", __FUNCTION__, dom,
- page, port, path);
xsval = xs_introduce_domain(xh, dom, page, port, path);
- printf("%s> xsval=%d\n", __FUNCTION__, xsval);
val = pyvalue_int(xsval);
exit:
return val;
@@ -590,9 +591,7 @@ static PyObject *xspy_release_domain(PyObject *self, PyObject *args,
if (!PyArg_ParseTupleAndKeywords(args, kwds, arg_spec, kwd_spec,
&dom))
goto exit;
- printf("%s> dom=%u\n", __FUNCTION__, dom);
xsval = xs_release_domain(xh, dom);
- printf("%s> xsval=%d\n", __FUNCTION__, xsval);
val = pyvalue_int(xsval);
exit:
return val;
@@ -651,6 +650,28 @@ static PyObject *xspy_shutdown(PyObject *self, PyObject *args, PyObject *kwds)
return val;
}
+#define xspy_fileno_doc "\n" \
+ "Get the file descriptor of the xenstore socket.\n" \
+ "Allows an xs object to be passed to select().\n" \
+ "\n" \
+ "Returns: [int] file descriptor.\n" \
+ "\n"
+
+static PyObject *xspy_fileno(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ static char *kwd_spec[] = { NULL };
+ static char *arg_spec = "";
+
+ struct xs_handle *xh = xshandle(self);
+ PyObject *val = NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, arg_spec, kwd_spec))
+ goto exit;
+ val = PyInt_FromLong((xh ? xs_fileno(xh) : -1));
+ exit:
+ return val;
+}
+
#define XSPY_METH(_name) { \
.ml_name = #_name, \
.ml_meth = (PyCFunction) xspy_ ## _name, \
@@ -675,17 +696,14 @@ static PyMethodDef xshandle_methods[] = {
XSPY_METH(release_domain),
XSPY_METH(close),
XSPY_METH(shutdown),
+ XSPY_METH(fileno),
{ /* Terminator. */ },
};
static PyObject *xshandle_getattr(PyObject *self, char *name)
{
PyObject *val = NULL;
- if (strcmp(name, "fileno") == 0) {
- struct xs_handle *xh = xshandle(self);
- val = PyInt_FromLong((xh ? xs_fileno(xh) : -1));
- } else
- val = Py_FindMethod(xshandle_methods, self, name);
+ val = Py_FindMethod(xshandle_methods, self, name);
return val;
}
@@ -754,7 +772,7 @@ static PyMethodDef xs_methods[] = {
"Raises RuntimeError on error.\n"
"\n"
},
- { NULL, NULL, 0, NULL }
+ { /* Terminator. */ }
};
PyMODINIT_FUNC initxs (void)
diff --git a/tools/python/xen/xend/XendDomainInfo.py b/tools/python/xen/xend/XendDomainInfo.py
index 4e05ac0861..0383b9c981 100644
--- a/tools/python/xen/xend/XendDomainInfo.py
+++ b/tools/python/xen/xend/XendDomainInfo.py
@@ -202,7 +202,9 @@ class XendDomainInfo:
"""
db = parentdb.addChild(uuid)
vm = cls(db)
- id = xc.domain_create()
+ ssidref = int(sxp.child_value(config, 'ssidref'))
+ log.debug('restoring with ssidref='+str(ssidref))
+ id = xc.domain_create(ssidref = ssidref)
vm.setdom(id)
try:
vm.restore = True
@@ -241,6 +243,7 @@ class XendDomainInfo:
self.start_time = None
self.name = None
self.memory = None
+ self.ssidref = None
self.image = None
self.channel = None
@@ -316,6 +319,7 @@ class XendDomainInfo:
"""
self.info = info
self.memory = self.info['mem_kb'] / 1024
+ self.ssidref = self.info['ssidref']
def state_set(self, state):
self.state_updated.acquire()
@@ -336,6 +340,7 @@ class XendDomainInfo:
s += " id=" + str(self.id)
s += " name=" + self.name
s += " memory=" + str(self.memory)
+ s += " ssidref=" + str(self.ssidref)
console = self.getConsole()
if console:
s += " console=" + str(console.console_port)
@@ -398,7 +403,8 @@ class XendDomainInfo:
sxpr = ['domain',
['id', self.id],
['name', self.name],
- ['memory', self.memory] ]
+ ['memory', self.memory],
+ ['ssidref', self.ssidref] ]
if self.uuid:
sxpr.append(['uuid', self.uuid])
if self.info:
@@ -511,7 +517,7 @@ class XendDomainInfo:
self.configure_restart()
self.construct_image()
self.configure()
- self.exportToDB()
+ self.exportToDB(save=True)
except Exception, ex:
# Catch errors, cleanup and re-raise.
print 'Domain construction error:', ex
@@ -523,7 +529,7 @@ class XendDomainInfo:
def register_domain(self):
xd = get_component('xen.xend.XendDomain')
xd._add_domain(self)
- self.exportToDB()
+ self.exportToDB(save=True)
def configure_cpus(self, config):
try:
@@ -533,6 +539,7 @@ class XendDomainInfo:
self.memory = int(sxp.child_value(config, 'memory'))
if self.memory is None:
raise VmError('missing memory size')
+ self.ssidref = int(sxp.child_value(config, 'ssidref'))
cpu = sxp.child_value(config, 'cpu')
if self.recreate and self.id and cpu is not None and int(cpu) >= 0:
xc.domain_pincpu(self.id, 0, 1<<int(cpu))
@@ -644,7 +651,7 @@ class XendDomainInfo:
def show(self):
"""Print virtual machine info.
"""
- print "[VM dom=%d name=%s memory=%d" % (self.id, self.name, self.memory)
+ print "[VM dom=%d name=%s memory=%d ssidref=%d" % (self.id, self.name, self.memory, self.ssidref)
print "image:"
sxp.show(self.image)
print "]"
@@ -660,7 +667,7 @@ class XendDomainInfo:
cpu = int(sxp.child_value(self.config, 'cpu', '-1'))
except:
raise VmError('invalid cpu')
- id = self.image.initDomain(self.id, self.memory, cpu, self.cpu_weight)
+ id = self.image.initDomain(self.id, self.memory, self.ssidref, cpu, self.cpu_weight)
log.debug('init_domain> Created domain=%d name=%s memory=%d',
id, self.name, self.memory)
self.setdom(id)
@@ -1011,6 +1018,7 @@ addImageHandlerClass(VmxImageHandler)
# Ignore the fields we already handle.
add_config_handler('name', vm_field_ignore)
add_config_handler('memory', vm_field_ignore)
+add_config_handler('ssidref', vm_field_ignore)
add_config_handler('cpu', vm_field_ignore)
add_config_handler('cpu_weight', vm_field_ignore)
add_config_handler('console', vm_field_ignore)
diff --git a/tools/python/xen/xend/image.py b/tools/python/xen/xend/image.py
index f3b8642a5f..5abc121e86 100644
--- a/tools/python/xen/xend/image.py
+++ b/tools/python/xen/xend/image.py
@@ -111,7 +111,7 @@ class ImageHandler:
except OSError, ex:
log.warning("error removing bootloader file '%s': %s", f, ex)
- def initDomain(self, dom, memory, cpu, cpu_weight):
+ def initDomain(self, dom, memory, ssidref, cpu, cpu_weight):
"""Initial domain create.
@return domain id
@@ -119,14 +119,14 @@ class ImageHandler:
mem_kb = self.getDomainMemory(memory)
if not self.vm.restore:
- dom = xc.domain_create(dom = dom or 0)
+ dom = xc.domain_create(dom = dom or 0, ssidref = ssidref)
# if bootloader, unlink here. But should go after buildDomain() ?
if self.vm.bootloader:
self.unlink(self.kernel)
self.unlink(self.ramdisk)
if dom <= 0:
raise VmError('Creating domain failed: name=%s' % self.vm.name)
- log.debug("initDomain: cpu=%d mem_kb=%d dom=%d", cpu, mem_kb, dom)
+ log.debug("initDomain: cpu=%d mem_kb=%d ssidref=%d dom=%d", cpu, mem_kb, ssidref, dom)
# xc.domain_setuuid(dom, uuid)
xc.domain_setcpuweight(dom, cpu_weight)
xc.domain_setmaxmem(dom, mem_kb)
diff --git a/tools/python/xen/xend/server/SrvDomainDir.py b/tools/python/xen/xend/server/SrvDomainDir.py
index d6f6291716..7fcc7c5cf7 100644
--- a/tools/python/xen/xend/server/SrvDomainDir.py
+++ b/tools/python/xen/xend/server/SrvDomainDir.py
@@ -142,6 +142,7 @@ class SrvDomainDir(SrvDir):
% (url, d.name, d.name))
req.write('id=%s' % d.id)
req.write('memory=%d'% d.memory)
+ req.write('ssidref=%d'% d.ssidref)
req.write('</li>')
req.write('</ul>')
diff --git a/tools/python/xen/xend/server/blkif.py b/tools/python/xen/xend/server/blkif.py
index 59932a3d8b..dac65c426b 100755
--- a/tools/python/xen/xend/server/blkif.py
+++ b/tools/python/xen/xend/server/blkif.py
@@ -50,6 +50,9 @@ class BlkifBackend:
def getId(self):
return self.id
+ def getEvtchn(self):
+ return self.evtchn
+
def closeEvtchn(self):
if self.evtchn:
channel.eventChannelClose(self.evtchn)
@@ -198,7 +201,7 @@ class BlkDev(Dev):
backend = self.getBackend()
if backend and backend.evtchn:
db = self.db.addChild("evtchn")
- backend.evtchn.exportToDB(db, save=save)
+ backend.evtchn.saveToDB(db, save=save)
def init(self, recreate=False, reboot=False):
self.frontendDomain = self.getDomain()
diff --git a/tools/python/xen/xend/server/netif.py b/tools/python/xen/xend/server/netif.py
index 8c60904ec5..8d89f73605 100755
--- a/tools/python/xen/xend/server/netif.py
+++ b/tools/python/xen/xend/server/netif.py
@@ -95,7 +95,7 @@ class NetDev(Dev):
Dev.exportToDB(self, save=save)
if self.evtchn:
db = self.db.addChild("evtchn")
- self.evtchn.exportToDB(db, save=save)
+ self.evtchn.saveToDB(db, save=save)
def init(self, recreate=False, reboot=False):
self.destroyed = False
diff --git a/tools/python/xen/xend/xenstore/xsnode.py b/tools/python/xen/xend/xenstore/xsnode.py
index fee14c395c..94b264c3cf 100644
--- a/tools/python/xen/xend/xenstore/xsnode.py
+++ b/tools/python/xen/xend/xenstore/xsnode.py
@@ -64,7 +64,7 @@ class Watcher:
def fileno(self):
if self.xs:
- return self.xs.fileno
+ return self.xs.fileno()
else:
return -1
diff --git a/tools/python/xen/xm/create.py b/tools/python/xen/xm/create.py
index 23001cc458..d2219f9668 100644
--- a/tools/python/xen/xm/create.py
+++ b/tools/python/xen/xm/create.py
@@ -120,6 +120,10 @@ gopts.var('memory', val='MEMORY',
fn=set_int, default=128,
use="Domain memory in MB.")
+gopts.var('ssidref', val='SSIDREF',
+ fn=set_u32, default=0xffffffff,
+ use="Security Identifier.")
+
gopts.var('maxmem', val='MEMORY',
fn=set_int, default=None,
use="Maximum domain memory in MB.")
@@ -405,7 +409,8 @@ def make_config(opts, vals):
config = ['vm',
['name', vals.name ],
- ['memory', vals.memory ]]
+ ['memory', vals.memory ],
+ ['ssidref', vals.ssidref ]]
if vals.maxmem:
config.append(['maxmem', vals.maxmem])
if vals.cpu is not None:
diff --git a/tools/python/xen/xm/main.py b/tools/python/xen/xm/main.py
index d02a190ac4..6eda17e2a9 100644
--- a/tools/python/xen/xm/main.py
+++ b/tools/python/xen/xm/main.py
@@ -383,7 +383,7 @@ class ProgList(Prog):
self.brief_list(doms)
def brief_list(self, doms):
- print 'Name Id Mem(MB) CPU VCPU(s) State Time(s) Console'
+ print 'Name Id Mem(MB) CPU VCPU(s) State Time(s) Console SSID-REF'
for dom in doms:
info = server.xend_domain(dom)
d = {}
@@ -399,8 +399,12 @@ class ProgList(Prog):
d['port'] = sxp.child_value(console, 'console_port')
else:
d['port'] = ''
- print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3d %(vcpus)5d %(state)5s %(cpu_time)7.1f %(port)4s"
- % d)
+ if ((int(sxp.child_value(info, 'ssidref', '-1'))) != -1):
+ d['ssidref1'] = int(sxp.child_value(info, 'ssidref', '-1')) & 0xffff
+ d['ssidref2'] = (int(sxp.child_value(info, 'ssidref', '-1')) >> 16) & 0xffff
+ print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3d %(vcpus)5d %(state)5s %(cpu_time)7.1f %(port)4s s:%(ssidref2)02x/p:%(ssidref1)02x" % d)
+ else:
+ print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3d %(vcpus)5d %(state)5s %(cpu_time)7.1f %(port)4s default" % d)
def show_vcpus(self, doms):
print 'Name Id VCPU CPU CPUMAP'
diff --git a/tools/python/xen/xm/opts.py b/tools/python/xen/xm/opts.py
index f92c82dfe6..30900450dc 100644
--- a/tools/python/xen/xm/opts.py
+++ b/tools/python/xen/xm/opts.py
@@ -451,6 +451,13 @@ def set_bool(opt, k, v):
else:
opt.opts.err('Invalid value:' +v)
+def set_u32(opt, k, v):
+ """Set an option to an u32 value."""
+ try:
+ v = u32(v)
+ except:
+ opt.opts.err('Invalid value: ' + str(v))
+ opt.set(v)
def set_value(opt, k, v):
"""Set an option to a value."""
diff --git a/xen/Makefile b/xen/Makefile
index e71898cf4d..15b55fb24d 100644
--- a/xen/Makefile
+++ b/xen/Makefile
@@ -46,6 +46,7 @@ clean: delete-unfresh-files
$(MAKE) -C tools clean
$(MAKE) -C common clean
$(MAKE) -C drivers clean
+ $(MAKE) -C acm clean
$(MAKE) -C arch/$(TARGET_ARCH) clean
rm -f include/asm *.o $(TARGET)* *~ core
rm -f include/asm-*/asm-offsets.h
@@ -58,6 +59,7 @@ $(TARGET): delete-unfresh-files
$(MAKE) include/asm-$(TARGET_ARCH)/asm-offsets.h
$(MAKE) -C common
$(MAKE) -C drivers
+ $(MAKE) -C acm
$(MAKE) -C arch/$(TARGET_ARCH)
# drivers/char/console.o may contain static banner/compile info. Blow it away.
@@ -109,7 +111,7 @@ include/asm-$(TARGET_ARCH)/asm-offsets.h: arch/$(TARGET_ARCH)/asm-offsets.s
.PHONY: default debug install dist clean delete-unfresh-files TAGS tags
-SUBDIRS = arch/$(TARGET_ARCH) common drivers
+SUBDIRS = acm arch/$(TARGET_ARCH) common drivers
define all_sources
( find include/asm-$(TARGET_ARCH) -name SCCS -prune -o -name '*.h' -print; \
find include -type d -name SCCS -prune -o \( -name "asm-*" -o \
diff --git a/xen/Rules.mk b/xen/Rules.mk
index 221882814a..c0b13ae368 100644
--- a/xen/Rules.mk
+++ b/xen/Rules.mk
@@ -35,6 +35,7 @@ OBJS += $(patsubst %.c,%.o,$(C_SRCS))
ALL_OBJS := $(BASEDIR)/common/common.o
ALL_OBJS += $(BASEDIR)/drivers/char/driver.o
ALL_OBJS += $(BASEDIR)/drivers/acpi/driver.o
+ALL_OBJS += $(BASEDIR)/acm/acm.o
ALL_OBJS += $(BASEDIR)/arch/$(TARGET_ARCH)/arch.o
diff --git a/xen/acm/Makefile b/xen/acm/Makefile
new file mode 100644
index 0000000000..b212041afa
--- /dev/null
+++ b/xen/acm/Makefile
@@ -0,0 +1,15 @@
+
+include $(BASEDIR)/Rules.mk
+OBJS = acm_core.o
+OBJS += acm_policy.o
+OBJS += acm_simple_type_enforcement_hooks.o
+OBJS += acm_chinesewall_hooks.o
+OBJS += acm_null_hooks.o
+
+default: acm.o
+
+acm.o: $(OBJS)
+ $(LD) $(LDFLAGS) -r -o acm.o $(OBJS)
+
+clean:
+ rm -f *.o *~ core
diff --git a/xen/acm/acm_chinesewall_hooks.c b/xen/acm/acm_chinesewall_hooks.c
new file mode 100644
index 0000000000..938716d3f9
--- /dev/null
+++ b/xen/acm/acm_chinesewall_hooks.c
@@ -0,0 +1,503 @@
+/****************************************************************
+ * acm_chinesewall_hooks.c
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Author:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ *
+ * Contributions:
+ * Stefan Berger <stefanb@watson.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * sHype Chinese Wall Policy for Xen
+ * This code implements the hooks that are called
+ * throughout Xen operations and decides authorization
+ * based on domain types and Chinese Wall conflict type
+ * sets. The CHWALL policy decides if a new domain can be started
+ * based on the types of running domains and the type of the
+ * new domain to be started. If the new domain's type is in
+ * conflict with types of running domains, then this new domain
+ * is not allowed to be created. A domain can have multiple types,
+ * in which case all types of a new domain must be conflict-free
+ * with all types of already running domains.
+ *
+ */
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <xen/types.h>
+#include <xen/lib.h>
+#include <xen/delay.h>
+#include <xen/sched.h>
+#include <public/acm.h>
+#include <asm/atomic.h>
+#include <acm/acm_core.h>
+#include <acm/acm_hooks.h>
+#include <acm/acm_endian.h>
+
+/* local cache structures for chinese wall policy */
+struct chwall_binary_policy chwall_bin_pol;
+
+/*
+ * Initializing chinese wall policy (will be filled by policy partition
+ * using setpolicy command)
+ */
+int acm_init_chwall_policy(void)
+{
+ /* minimal startup policy; policy write-locked already */
+ chwall_bin_pol.max_types = 1;
+ chwall_bin_pol.max_ssidrefs = 1;
+ chwall_bin_pol.max_conflictsets = 1;
+ chwall_bin_pol.ssidrefs = (domaintype_t *)xmalloc_array(domaintype_t, chwall_bin_pol.max_ssidrefs*chwall_bin_pol.max_types);
+ chwall_bin_pol.conflict_sets = (domaintype_t *)xmalloc_array(domaintype_t, chwall_bin_pol.max_conflictsets*chwall_bin_pol.max_types);
+ chwall_bin_pol.running_types = (domaintype_t *)xmalloc_array(domaintype_t, chwall_bin_pol.max_types);
+ chwall_bin_pol.conflict_aggregate_set = (domaintype_t *)xmalloc_array(domaintype_t, chwall_bin_pol.max_types);
+
+ if ((chwall_bin_pol.conflict_sets == NULL) || (chwall_bin_pol.running_types == NULL) ||
+ (chwall_bin_pol.ssidrefs == NULL) || (chwall_bin_pol.conflict_aggregate_set == NULL))
+ return ACM_INIT_SSID_ERROR;
+
+ /* initialize state */
+ memset((void *)chwall_bin_pol.ssidrefs, 0, chwall_bin_pol.max_ssidrefs*chwall_bin_pol.max_types*sizeof(domaintype_t));
+ memset((void *)chwall_bin_pol.conflict_sets, 0, chwall_bin_pol.max_conflictsets*chwall_bin_pol.max_types*sizeof(domaintype_t));
+ memset((void *)chwall_bin_pol.running_types, 0, chwall_bin_pol.max_types*sizeof(domaintype_t));
+ memset((void *)chwall_bin_pol.conflict_aggregate_set, 0, chwall_bin_pol.max_types*sizeof(domaintype_t));
+ return ACM_OK;
+}
+
+static int
+chwall_init_domain_ssid(void **chwall_ssid, ssidref_t ssidref)
+{
+ struct chwall_ssid *chwall_ssidp = xmalloc(struct chwall_ssid);
+ traceprintk("%s.\n", __func__);
+ if (chwall_ssidp == NULL)
+ return ACM_INIT_SSID_ERROR;
+ /*
+ * depending on wheter chwall is primary or secondary, get the respective
+ * part of the global ssidref (same way we'll get the partial ssid pointer)
+ */
+ chwall_ssidp->chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
+ if (chwall_ssidp->chwall_ssidref >= chwall_bin_pol.max_ssidrefs) {
+ printkd("%s: ERROR chwall_ssidref(%x) > max(%x).\n",
+ __func__, chwall_ssidp->chwall_ssidref, chwall_bin_pol.max_ssidrefs-1);
+ xfree(chwall_ssidp);
+ return ACM_INIT_SSID_ERROR;
+ }
+ (*chwall_ssid) = chwall_ssidp;
+ printkd("%s: determined chwall_ssidref to %x.\n",
+ __func__, chwall_ssidp->chwall_ssidref);
+ return ACM_OK;
+}
+
+static void
+chwall_free_domain_ssid(void *chwall_ssid)
+{
+ traceprintk("%s.\n", __func__);
+ if (chwall_ssid != NULL)
+ xfree(chwall_ssid);
+ return;
+}
+
+
+/* dump chinese wall cache; policy read-locked already */
+static int
+chwall_dump_policy(u8 *buf, u16 buf_size) {
+ struct acm_chwall_policy_buffer *chwall_buf = (struct acm_chwall_policy_buffer *)buf;
+ int ret = 0;
+
+ chwall_buf->chwall_max_types = htons(chwall_bin_pol.max_types);
+ chwall_buf->chwall_max_ssidrefs = htons(chwall_bin_pol.max_ssidrefs);
+ chwall_buf->policy_code = htons(ACM_CHINESE_WALL_POLICY);
+ chwall_buf->chwall_ssid_offset = htons(sizeof(struct acm_chwall_policy_buffer));
+ chwall_buf->chwall_max_conflictsets = htons(chwall_bin_pol.max_conflictsets);
+ chwall_buf->chwall_conflict_sets_offset =
+ htons(
+ ntohs(chwall_buf->chwall_ssid_offset) +
+ sizeof(domaintype_t) * chwall_bin_pol.max_ssidrefs *
+ chwall_bin_pol.max_types);
+
+ chwall_buf->chwall_running_types_offset =
+ htons(
+ ntohs(chwall_buf->chwall_conflict_sets_offset) +
+ sizeof(domaintype_t) * chwall_bin_pol.max_conflictsets *
+ chwall_bin_pol.max_types);
+
+ chwall_buf->chwall_conflict_aggregate_offset =
+ htons(
+ ntohs(chwall_buf->chwall_running_types_offset) +
+ sizeof(domaintype_t) * chwall_bin_pol.max_types);
+
+ ret = ntohs(chwall_buf->chwall_conflict_aggregate_offset) +
+ sizeof(domaintype_t) * chwall_bin_pol.max_types;
+
+ /* now copy buffers over */
+ arrcpy16((u16 *)(buf + ntohs(chwall_buf->chwall_ssid_offset)),
+ chwall_bin_pol.ssidrefs,
+ chwall_bin_pol.max_ssidrefs * chwall_bin_pol.max_types);
+
+ arrcpy16((u16 *)(buf + ntohs(chwall_buf->chwall_conflict_sets_offset)),
+ chwall_bin_pol.conflict_sets,
+ chwall_bin_pol.max_conflictsets * chwall_bin_pol.max_types);
+
+ arrcpy16((u16 *)(buf + ntohs(chwall_buf->chwall_running_types_offset)),
+ chwall_bin_pol.running_types,
+ chwall_bin_pol.max_types);
+
+ arrcpy16((u16 *)(buf + ntohs(chwall_buf->chwall_conflict_aggregate_offset)),
+ chwall_bin_pol.conflict_aggregate_set,
+ chwall_bin_pol.max_types);
+ return ret;
+}
+
+/* adapt security state (running_types and conflict_aggregate_set) to all running
+ * domains; chwall_init_state is called when a policy is changed to bring the security
+ * information into a consistent state and to detect violations (return != 0).
+ * from a security point of view, we simulate that all running domains are re-started
+ */
+static int
+chwall_init_state(struct acm_chwall_policy_buffer *chwall_buf, domaintype_t *ssidrefs, domaintype_t *conflict_sets,
+ domaintype_t *running_types, domaintype_t *conflict_aggregate_set)
+{
+ int violation = 0, i, j;
+ struct chwall_ssid *chwall_ssid;
+ ssidref_t chwall_ssidref;
+ struct domain **pd;
+
+ write_lock(&domlist_lock);
+ /* go through all domains and adjust policy as if this domain was started now */
+ pd = &domain_list;
+ for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
+ chwall_ssid = GET_SSIDP(ACM_CHINESE_WALL_POLICY, (struct acm_ssid_domain *)(*pd)->ssid);
+ chwall_ssidref = chwall_ssid->chwall_ssidref;
+ traceprintk("%s: validating policy for domain %x (chwall-REF=%x).\n",
+ __func__, (*pd)->domain_id, chwall_ssidref);
+ /* a) adjust types ref-count for running domains */
+ for (i=0; i< chwall_buf->chwall_max_types; i++)
+ running_types[i] +=
+ ssidrefs[chwall_ssidref*chwall_buf->chwall_max_types + i];
+
+ /* b) check for conflict */
+ for (i=0; i< chwall_buf->chwall_max_types; i++)
+ if (conflict_aggregate_set[i] &&
+ ssidrefs[chwall_ssidref*chwall_buf->chwall_max_types + i]) {
+ printk("%s: CHINESE WALL CONFLICT in type %02x.\n", __func__, i);
+ violation = 1;
+ goto out;
+ }
+ /* set violation and break out of the loop */
+ /* c) adapt conflict aggregate set for this domain (notice conflicts) */
+ for (i=0; i<chwall_buf->chwall_max_conflictsets; i++) {
+ int common = 0;
+ /* check if conflict_set_i and ssidref have common types */
+ for (j=0; j<chwall_buf->chwall_max_types; j++)
+ if (conflict_sets[i*chwall_buf->chwall_max_types + j] &&
+ ssidrefs[chwall_ssidref*chwall_buf->chwall_max_types + j]) {
+ common = 1;
+ break;
+ }
+ if (common == 0)
+ continue; /* try next conflict set */
+ /* now add types of the conflict set to conflict_aggregate_set (except types in chwall_ssidref) */
+ for (j=0; j<chwall_buf->chwall_max_types; j++)
+ if (conflict_sets[i*chwall_buf->chwall_max_types + j] &&
+ !ssidrefs[chwall_ssidref*chwall_buf->chwall_max_types + j])
+ conflict_aggregate_set[j]++;
+ }
+ }
+ out:
+ write_unlock(&domlist_lock);
+ return violation;
+ /* returning "violation != 0" means that the currently running set of domains would
+ * not be possible if the new policy had been enforced before starting them; for chinese
+ * wall, this means that the new policy includes at least one conflict set of which
+ * more than one type is currently running */
+}
+
+static int
+chwall_set_policy(u8 *buf, u16 buf_size)
+{
+ /* policy write-locked already */
+ struct acm_chwall_policy_buffer *chwall_buf = (struct acm_chwall_policy_buffer *)buf;
+ void *ssids = NULL, *conflict_sets = NULL, *running_types = NULL, *conflict_aggregate_set = NULL;
+
+ /* rewrite the policy due to endianess */
+ chwall_buf->policy_code = ntohs(chwall_buf->policy_code);
+ chwall_buf->chwall_max_types = ntohs(chwall_buf->chwall_max_types);
+ chwall_buf->chwall_max_ssidrefs = ntohs(chwall_buf->chwall_max_ssidrefs);
+ chwall_buf->chwall_max_conflictsets = ntohs(chwall_buf->chwall_max_conflictsets);
+ chwall_buf->chwall_ssid_offset = ntohs(chwall_buf->chwall_ssid_offset);
+ chwall_buf->chwall_conflict_sets_offset = ntohs(chwall_buf->chwall_conflict_sets_offset);
+ chwall_buf->chwall_running_types_offset = ntohs(chwall_buf->chwall_running_types_offset);
+ chwall_buf->chwall_conflict_aggregate_offset = ntohs(chwall_buf->chwall_conflict_aggregate_offset);
+
+ /* 1. allocate new buffers */
+ ssids = xmalloc_array(domaintype_t, chwall_buf->chwall_max_types*chwall_buf->chwall_max_ssidrefs);
+ conflict_sets = xmalloc_array(domaintype_t, chwall_buf->chwall_max_conflictsets*chwall_buf->chwall_max_types);
+ running_types = xmalloc_array(domaintype_t,chwall_buf->chwall_max_types);
+ conflict_aggregate_set = xmalloc_array(domaintype_t, chwall_buf->chwall_max_types);
+
+ if ((ssids == NULL)||(conflict_sets == NULL)||(running_types == NULL)||(conflict_aggregate_set == NULL))
+ goto error_free;
+
+ /* 2. set new policy */
+ if (chwall_buf->chwall_ssid_offset + sizeof(domaintype_t) *
+ chwall_buf->chwall_max_types * chwall_buf->chwall_max_ssidrefs > buf_size)
+ goto error_free;
+ arrcpy(ssids, buf + chwall_buf->chwall_ssid_offset,
+ sizeof(domaintype_t),
+ chwall_buf->chwall_max_types * chwall_buf->chwall_max_ssidrefs);
+
+ if (chwall_buf->chwall_conflict_sets_offset + sizeof(domaintype_t) *
+ chwall_buf->chwall_max_types * chwall_buf->chwall_max_conflictsets > buf_size)
+ goto error_free;
+
+ arrcpy(conflict_sets, buf + chwall_buf->chwall_conflict_sets_offset,
+ sizeof(domaintype_t),
+ chwall_buf->chwall_max_types * chwall_buf->chwall_max_conflictsets);
+
+ /* we also use new state buffers since max_types can change */
+ memset(running_types, 0, sizeof(domaintype_t)*chwall_buf->chwall_max_types);
+ memset(conflict_aggregate_set, 0, sizeof(domaintype_t)*chwall_buf->chwall_max_types);
+
+ /* 3. now re-calculate the state for the new policy based on running domains;
+ * this can fail if new policy is conflicting with running domains */
+ if (chwall_init_state(chwall_buf, ssids, conflict_sets, running_types, conflict_aggregate_set)) {
+ printk("%s: New policy conflicts with running domains. Policy load aborted.\n", __func__);
+ goto error_free; /* new policy conflicts with running domains */
+ }
+ /* 4. free old policy buffers, replace with new ones */
+ chwall_bin_pol.max_types = chwall_buf->chwall_max_types;
+ chwall_bin_pol.max_ssidrefs = chwall_buf->chwall_max_ssidrefs;
+ chwall_bin_pol.max_conflictsets = chwall_buf->chwall_max_conflictsets;
+ if (chwall_bin_pol.ssidrefs != NULL)
+ xfree(chwall_bin_pol.ssidrefs);
+ if (chwall_bin_pol.conflict_aggregate_set != NULL)
+ xfree(chwall_bin_pol.conflict_aggregate_set);
+ if (chwall_bin_pol.running_types != NULL)
+ xfree(chwall_bin_pol.running_types);
+ if (chwall_bin_pol.conflict_sets != NULL)
+ xfree(chwall_bin_pol.conflict_sets);
+ chwall_bin_pol.ssidrefs = ssids;
+ chwall_bin_pol.conflict_aggregate_set = conflict_aggregate_set;
+ chwall_bin_pol.running_types = running_types;
+ chwall_bin_pol.conflict_sets = conflict_sets;
+ return ACM_OK;
+
+error_free:
+ printk("%s: ERROR setting policy.\n", __func__);
+ if (ssids != NULL) xfree(ssids);
+ if (conflict_sets != NULL) xfree(conflict_sets);
+ if (running_types != NULL) xfree(running_types);
+ if (conflict_aggregate_set != NULL) xfree(conflict_aggregate_set);
+ return -EFAULT;
+}
+
+static int
+chwall_dump_stats(u8 *buf, u16 len)
+{
+ /* no stats for Chinese Wall Policy */
+ return 0;
+}
+
+/***************************
+ * Authorization functions
+ ***************************/
+
+
+/* -------- DOMAIN OPERATION HOOKS -----------*/
+
+static int
+chwall_pre_domain_create(void *subject_ssid, ssidref_t ssidref)
+{
+ ssidref_t chwall_ssidref;
+ int i,j;
+ traceprintk("%s.\n", __func__);
+
+ read_lock(&acm_bin_pol_rwlock);
+ chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
+ if (chwall_ssidref == ACM_DEFAULT_LOCAL_SSID) {
+ printk("%s: ERROR CHWALL SSID is NOT SET but policy enforced.\n", __func__);
+ read_unlock(&acm_bin_pol_rwlock);
+ return ACM_ACCESS_DENIED; /* catching and indicating config error */
+ }
+ if (chwall_ssidref >= chwall_bin_pol.max_ssidrefs) {
+ printk("%s: ERROR chwall_ssidref > max(%x).\n",
+ __func__, chwall_bin_pol.max_ssidrefs-1);
+ read_unlock(&acm_bin_pol_rwlock);
+ return ACM_ACCESS_DENIED;
+ }
+ /* A: chinese wall check for conflicts */
+ for (i=0; i< chwall_bin_pol.max_types; i++)
+ if (chwall_bin_pol.conflict_aggregate_set[i] &&
+ chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + i]) {
+ printk("%s: CHINESE WALL CONFLICT in type %02x.\n", __func__, i);
+ read_unlock(&acm_bin_pol_rwlock);
+ return ACM_ACCESS_DENIED;
+ }
+
+ /* B: chinese wall conflict set adjustment (so that other
+ * other domains simultaneously created are evaluated against this new set)*/
+ for (i=0; i<chwall_bin_pol.max_conflictsets; i++) {
+ int common = 0;
+ /* check if conflict_set_i and ssidref have common types */
+ for (j=0; j<chwall_bin_pol.max_types; j++)
+ if (chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
+ chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j]) {
+ common = 1;
+ break;
+ }
+ if (common == 0)
+ continue; /* try next conflict set */
+ /* now add types of the conflict set to conflict_aggregate_set (except types in chwall_ssidref) */
+ for (j=0; j<chwall_bin_pol.max_types; j++)
+ if (chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
+ !chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j])
+ chwall_bin_pol.conflict_aggregate_set[j]++;
+ }
+ read_unlock(&acm_bin_pol_rwlock);
+ return ACM_ACCESS_PERMITTED;
+}
+
+static void
+chwall_post_domain_create(domid_t domid, ssidref_t ssidref)
+{
+ int i,j;
+ ssidref_t chwall_ssidref;
+ traceprintk("%s.\n", __func__);
+
+ read_lock(&acm_bin_pol_rwlock);
+ chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
+ /* adjust types ref-count for running domains */
+ for (i=0; i< chwall_bin_pol.max_types; i++)
+ chwall_bin_pol.running_types[i] +=
+ chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + i];
+ if (domid) {
+ read_unlock(&acm_bin_pol_rwlock);
+ return;
+ }
+ /* Xen does not call pre-create hook for DOM0;
+ * to consider type conflicts of any domain with DOM0, we need
+ * to adjust the conflict_aggregate for DOM0 here the same way it
+ * is done for non-DOM0 domains in the pre-hook */
+ printkd("%s: adjusting security state for DOM0 (ssidref=%x, chwall_ssidref=%x).\n",
+ __func__, ssidref, chwall_ssidref);
+
+ /* chinese wall conflict set adjustment (so that other
+ * other domains simultaneously created are evaluated against this new set)*/
+ for (i=0; i<chwall_bin_pol.max_conflictsets; i++) {
+ int common = 0;
+ /* check if conflict_set_i and ssidref have common types */
+ for (j=0; j<chwall_bin_pol.max_types; j++)
+ if (chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
+ chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j]) {
+ common = 1;
+ break;
+ }
+ if (common == 0)
+ continue; /* try next conflict set */
+ /* now add types of the conflict set to conflict_aggregate_set (except types in chwall_ssidref) */
+ for (j=0; j<chwall_bin_pol.max_types; j++)
+ if (chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
+ !chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j])
+ chwall_bin_pol.conflict_aggregate_set[j]++;
+ }
+ read_unlock(&acm_bin_pol_rwlock);
+ return;
+}
+
+static void
+chwall_fail_domain_create(void *subject_ssid, ssidref_t ssidref)
+{
+ int i, j;
+ ssidref_t chwall_ssidref;
+ traceprintk("%s.\n", __func__);
+
+ read_lock(&acm_bin_pol_rwlock);
+ chwall_ssidref = GET_SSIDREF(ACM_CHINESE_WALL_POLICY, ssidref);
+ /* roll-back: re-adjust conflicting types aggregate */
+ for (i=0; i<chwall_bin_pol.max_conflictsets; i++) {
+ int common = 0;
+ /* check if conflict_set_i and ssidref have common types */
+ for (j=0; j<chwall_bin_pol.max_types; j++)
+ if (chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
+ chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j]) {
+ common = 1;
+ break;
+ }
+ if (common == 0)
+ continue; /* try next conflict set, this one does not include any type of chwall_ssidref */
+ /* now add types of the conflict set to conflict_aggregate_set (except types in chwall_ssidref) */
+ for (j=0; j<chwall_bin_pol.max_types; j++)
+ if (chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
+ !chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j])
+ chwall_bin_pol.conflict_aggregate_set[j]--;
+ }
+ read_unlock(&acm_bin_pol_rwlock);
+}
+
+
+static void
+chwall_post_domain_destroy(void *object_ssid, domid_t id)
+{
+ int i,j;
+ struct chwall_ssid *chwall_ssidp =
+ GET_SSIDP(ACM_CHINESE_WALL_POLICY, (struct acm_ssid_domain *)object_ssid);
+ ssidref_t chwall_ssidref = chwall_ssidp->chwall_ssidref;
+
+ traceprintk("%s.\n", __func__);
+
+ read_lock(&acm_bin_pol_rwlock);
+ /* adjust running types set */
+ for (i=0; i< chwall_bin_pol.max_types; i++)
+ chwall_bin_pol.running_types[i] -=
+ chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + i];
+
+ /* roll-back: re-adjust conflicting types aggregate */
+ for (i=0; i<chwall_bin_pol.max_conflictsets; i++) {
+ int common = 0;
+ /* check if conflict_set_i and ssidref have common types */
+ for (j=0; j<chwall_bin_pol.max_types; j++)
+ if (chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
+ chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j]) {
+ common = 1;
+ break;
+ }
+ if (common == 0)
+ continue; /* try next conflict set, this one does not include any type of chwall_ssidref */
+ /* now add types of the conflict set to conflict_aggregate_set (except types in chwall_ssidref) */
+ for (j=0; j<chwall_bin_pol.max_types; j++)
+ if (chwall_bin_pol.conflict_sets[i*chwall_bin_pol.max_types + j] &&
+ !chwall_bin_pol.ssidrefs[chwall_ssidref*chwall_bin_pol.max_types + j])
+ chwall_bin_pol.conflict_aggregate_set[j]--;
+ }
+ read_unlock(&acm_bin_pol_rwlock);
+ return;
+}
+
+struct acm_operations acm_chinesewall_ops = {
+ /* policy management services */
+ .init_domain_ssid = chwall_init_domain_ssid,
+ .free_domain_ssid = chwall_free_domain_ssid,
+ .dump_binary_policy = chwall_dump_policy,
+ .set_binary_policy = chwall_set_policy,
+ .dump_statistics = chwall_dump_stats,
+ /* domain management control hooks */
+ .pre_domain_create = chwall_pre_domain_create,
+ .post_domain_create = chwall_post_domain_create,
+ .fail_domain_create = chwall_fail_domain_create,
+ .post_domain_destroy = chwall_post_domain_destroy,
+ /* event channel control hooks */
+ .pre_eventchannel_unbound = NULL,
+ .fail_eventchannel_unbound = NULL,
+ .pre_eventchannel_interdomain = NULL,
+ .fail_eventchannel_interdomain = NULL,
+ /* grant table control hooks */
+ .pre_grant_map_ref = NULL,
+ .fail_grant_map_ref = NULL,
+ .pre_grant_setup = NULL,
+ .fail_grant_setup = NULL,
+};
diff --git a/xen/acm/acm_core.c b/xen/acm/acm_core.c
new file mode 100644
index 0000000000..fe5bacdb6d
--- /dev/null
+++ b/xen/acm/acm_core.c
@@ -0,0 +1,205 @@
+/****************************************************************
+ * acm_core.c
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Author:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * sHype access control module (ACM)
+ * This file handles initialization of the ACM
+ * as well as initializing/freeing security
+ * identifiers for domains (it calls on active
+ * policy hook functions).
+ *
+ */
+
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <xen/types.h>
+#include <xen/lib.h>
+#include <xen/delay.h>
+#include <xen/sched.h>
+#include <acm/acm_hooks.h>
+#include <acm/acm_endian.h>
+
+/* debug:
+ * include/acm/acm_hooks.h defines a constant ACM_TRACE_MODE;
+ * define/undefine this constant to receive / suppress any
+ * security hook debug output of sHype
+ *
+ * include/public/acm.h defines a constant ACM_DEBUG
+ * define/undefine this constant to receive non-hook-related
+ * debug output.
+ */
+
+/* function prototypes */
+void acm_init_chwall_policy(void);
+void acm_init_ste_policy(void);
+
+extern struct acm_operations acm_chinesewall_ops,
+ acm_simple_type_enforcement_ops, acm_null_ops;
+
+/* global ops structs called by the hooks */
+struct acm_operations *acm_primary_ops = NULL;
+/* called in hook if-and-only-if primary succeeds */
+struct acm_operations *acm_secondary_ops = NULL;
+
+/* acm global binary policy (points to 'local' primary and secondary policies */
+struct acm_binary_policy acm_bin_pol;
+/* acm binary policy lock */
+rwlock_t acm_bin_pol_rwlock = RW_LOCK_UNLOCKED;
+
+/* until we have endian support in Xen, we discover it at runtime */
+u8 little_endian = 1;
+void acm_set_endian(void)
+{
+ u32 test = 1;
+ if (*((u8 *)&test) == 1) {
+ printk("ACM module running in LITTLE ENDIAN.\n");
+ little_endian = 1;
+ } else {
+ printk("ACM module running in BIG ENDIAN.\n");
+ little_endian = 0;
+ }
+}
+
+/* initialize global security policy for Xen; policy write-locked already */
+static void
+acm_init_binary_policy(void *primary, void *secondary)
+{
+ acm_bin_pol.primary_policy_code = 0;
+ acm_bin_pol.secondary_policy_code = 0;
+ acm_bin_pol.primary_binary_policy = primary;
+ acm_bin_pol.secondary_binary_policy = secondary;
+}
+
+int
+acm_init(void)
+{
+ int ret = -EINVAL;
+
+ acm_set_endian();
+ write_lock(&acm_bin_pol_rwlock);
+
+ if (ACM_USE_SECURITY_POLICY == ACM_CHINESE_WALL_POLICY) {
+ acm_init_binary_policy(NULL, NULL);
+ acm_init_chwall_policy();
+ acm_bin_pol.primary_policy_code = ACM_CHINESE_WALL_POLICY;
+ acm_primary_ops = &acm_chinesewall_ops;
+ acm_bin_pol.secondary_policy_code = ACM_NULL_POLICY;
+ acm_secondary_ops = &acm_null_ops;
+ ret = ACM_OK;
+ } else if (ACM_USE_SECURITY_POLICY == ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY) {
+ acm_init_binary_policy(NULL, NULL);
+ acm_init_ste_policy();
+ acm_bin_pol.primary_policy_code = ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY;
+ acm_primary_ops = &acm_simple_type_enforcement_ops;
+ acm_bin_pol.secondary_policy_code = ACM_NULL_POLICY;
+ acm_secondary_ops = &acm_null_ops;
+ ret = ACM_OK;
+ } else if (ACM_USE_SECURITY_POLICY == ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY) {
+ acm_init_binary_policy(NULL, NULL);
+ acm_init_chwall_policy();
+ acm_init_ste_policy();
+ acm_bin_pol.primary_policy_code = ACM_CHINESE_WALL_POLICY;
+ acm_primary_ops = &acm_chinesewall_ops;
+ acm_bin_pol.secondary_policy_code = ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY;
+ acm_secondary_ops = &acm_simple_type_enforcement_ops;
+ ret = ACM_OK;
+ } else if (ACM_USE_SECURITY_POLICY == ACM_NULL_POLICY) {
+ acm_init_binary_policy(NULL, NULL);
+ acm_bin_pol.primary_policy_code = ACM_NULL_POLICY;
+ acm_primary_ops = &acm_null_ops;
+ acm_bin_pol.secondary_policy_code = ACM_NULL_POLICY;
+ acm_secondary_ops = &acm_null_ops;
+ ret = ACM_OK;
+ }
+ write_unlock(&acm_bin_pol_rwlock);
+
+ if (ret != ACM_OK)
+ return -EINVAL;
+ printk("%s: Enforcing Primary %s, Secondary %s.\n", __func__,
+ ACM_POLICY_NAME(acm_bin_pol.primary_policy_code), ACM_POLICY_NAME(acm_bin_pol.secondary_policy_code));
+ return ACM_OK;
+}
+
+
+int
+acm_init_domain_ssid(domid_t id, ssidref_t ssidref)
+{
+ struct acm_ssid_domain *ssid;
+ struct domain *subj = find_domain_by_id(id);
+ int ret1, ret2;
+
+ if (subj == NULL) {
+ printk("%s: ACM_NULL_POINTER ERROR (id=%x).\n", __func__, id);
+ return ACM_NULL_POINTER_ERROR;
+ }
+ if ((ssid = xmalloc(struct acm_ssid_domain)) == NULL)
+ return ACM_INIT_SSID_ERROR;
+
+ ssid->datatype = DOMAIN;
+ ssid->subject = subj;
+ ssid->domainid = subj->domain_id;
+ ssid->primary_ssid = NULL;
+ ssid->secondary_ssid = NULL;
+
+ if (ACM_USE_SECURITY_POLICY != ACM_NULL_POLICY)
+ ssid->ssidref = ssidref;
+ else
+ ssid->ssidref = ACM_DEFAULT_SSID;
+
+ subj->ssid = ssid;
+ /* now fill in primary and secondary parts; we only get here through hooks */
+ if (acm_primary_ops->init_domain_ssid != NULL)
+ ret1 = acm_primary_ops->init_domain_ssid(&(ssid->primary_ssid), ssidref);
+ else
+ ret1 = ACM_OK;
+
+ if (acm_secondary_ops->init_domain_ssid != NULL)
+ ret2 = acm_secondary_ops->init_domain_ssid(&(ssid->secondary_ssid), ssidref);
+ else
+ ret2 = ACM_OK;
+
+ if ((ret1 != ACM_OK) || (ret2 != ACM_OK)) {
+ printk("%s: ERROR instantiating individual ssids for domain 0x%02x.\n",
+ __func__, subj->domain_id);
+ acm_free_domain_ssid(ssid);
+ put_domain(subj);
+ return ACM_INIT_SSID_ERROR;
+ }
+ printk("%s: assigned domain %x the ssidref=%x.\n", __func__, id, ssid->ssidref);
+ put_domain(subj);
+ return ACM_OK;
+}
+
+
+int
+acm_free_domain_ssid(struct acm_ssid_domain *ssid)
+{
+ domid_t id;
+
+ /* domain is already gone, just ssid is left */
+ if (ssid == NULL) {
+ printk("%s: ACM_NULL_POINTER ERROR.\n", __func__);
+ return ACM_NULL_POINTER_ERROR;
+ }
+ id = ssid->domainid;
+ ssid->subject = NULL;
+
+ if (acm_primary_ops->free_domain_ssid != NULL) /* null policy */
+ acm_primary_ops->free_domain_ssid(ssid->primary_ssid);
+ ssid->primary_ssid = NULL;
+ if (acm_secondary_ops->free_domain_ssid != NULL)
+ acm_secondary_ops->free_domain_ssid(ssid->secondary_ssid);
+ ssid->secondary_ssid = NULL;
+ xfree(ssid);
+ printkd("%s: Freed individual domain ssid (domain=%02x).\n",__func__, id);
+ return ACM_OK;
+}
diff --git a/xen/acm/acm_null_hooks.c b/xen/acm/acm_null_hooks.c
new file mode 100644
index 0000000000..6433cbfed6
--- /dev/null
+++ b/xen/acm/acm_null_hooks.c
@@ -0,0 +1,76 @@
+/****************************************************************
+ * acm_null_hooks.c
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Author:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+#include <acm/acm_hooks.h>
+
+static int
+null_init_domain_ssid(void **chwall_ssid, ssidref_t ssidref)
+{
+ return ACM_OK;
+}
+
+
+static void
+null_free_domain_ssid(void *chwall_ssid)
+{
+ return;
+}
+
+
+static int
+null_dump_binary_policy(u8 *buf, u16 buf_size)
+{
+ return 0;
+}
+
+
+
+static int
+null_set_binary_policy(u8 *buf, u16 buf_size)
+{
+ return -1;
+}
+
+
+static int
+null_dump_stats(u8 *buf, u16 buf_size)
+{
+ /* no stats for NULL policy */
+ return 0;
+}
+
+
+/* now define the hook structure similarly to LSM */
+struct acm_operations acm_null_ops = {
+ .init_domain_ssid = null_init_domain_ssid,
+ .free_domain_ssid = null_free_domain_ssid,
+ .dump_binary_policy = null_dump_binary_policy,
+ .set_binary_policy = null_set_binary_policy,
+ .dump_statistics = null_dump_stats,
+ /* domain management control hooks */
+ .pre_domain_create = NULL,
+ .post_domain_create = NULL,
+ .fail_domain_create = NULL,
+ .post_domain_destroy = NULL,
+ /* event channel control hooks */
+ .pre_eventchannel_unbound = NULL,
+ .fail_eventchannel_unbound = NULL,
+ .pre_eventchannel_interdomain = NULL,
+ .fail_eventchannel_interdomain = NULL,
+ /* grant table control hooks */
+ .pre_grant_map_ref = NULL,
+ .fail_grant_map_ref = NULL,
+ .pre_grant_setup = NULL,
+ .fail_grant_setup = NULL
+
+};
diff --git a/xen/acm/acm_policy.c b/xen/acm/acm_policy.c
new file mode 100644
index 0000000000..3e08130d43
--- /dev/null
+++ b/xen/acm/acm_policy.c
@@ -0,0 +1,197 @@
+/****************************************************************
+ * acm_policy.c
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Author:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ *
+ * Contributions:
+ * Stefan Berger <stefanb@watson.ibm.com>
+ * support for network-byte-order binary policies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * sHype access control policy management for Xen.
+ * This interface allows policy tools in authorized
+ * domains to interact with the Xen access control module
+ *
+ */
+
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <xen/types.h>
+#include <xen/lib.h>
+#include <xen/delay.h>
+#include <xen/sched.h>
+#include <public/policy_ops.h>
+#include <acm/acm_core.h>
+#include <acm/acm_hooks.h>
+#include <acm/acm_endian.h>
+
+int
+acm_set_policy(void *buf, u16 buf_size, u16 policy)
+{
+ u8 *policy_buffer = NULL;
+ struct acm_policy_buffer *pol;
+
+ if (policy != ACM_USE_SECURITY_POLICY) {
+ printk("%s: Loading incompatible policy (running: %s).\n", __func__,
+ ACM_POLICY_NAME(ACM_USE_SECURITY_POLICY));
+ return -EFAULT;
+ }
+ /* now check correct buffer sizes for policy combinations */
+ if (policy == ACM_NULL_POLICY) {
+ printkd("%s: NULL Policy, no policy needed.\n", __func__);
+ goto out;
+ }
+ if (buf_size < sizeof(struct acm_policy_buffer))
+ return -EFAULT;
+ /* 1. copy buffer from domain */
+ if ((policy_buffer = xmalloc_array(u8, buf_size)) == NULL)
+ goto error_free;
+ if (copy_from_user(policy_buffer, buf, buf_size)) {
+ printk("%s: Error copying!\n",__func__);
+ goto error_free;
+ }
+ /* 2. some sanity checking */
+ pol = (struct acm_policy_buffer *)policy_buffer;
+
+ if ((ntohl(pol->magic) != ACM_MAGIC) ||
+ (ntohs(pol->primary_policy_code) != acm_bin_pol.primary_policy_code) ||
+ (ntohs(pol->secondary_policy_code) != acm_bin_pol.secondary_policy_code)) {
+ printkd("%s: Wrong policy magics!\n", __func__);
+ goto error_free;
+ }
+ if (buf_size != ntohl(pol->len)) {
+ printk("%s: ERROR in buf size.\n", __func__);
+ goto error_free;
+ }
+
+ /* get bin_policy lock and rewrite policy (release old one) */
+ write_lock(&acm_bin_pol_rwlock);
+
+ /* 3. now get/set primary policy data */
+ if (acm_primary_ops->set_binary_policy(buf + ntohs(pol->primary_buffer_offset),
+ ntohs(pol->secondary_buffer_offset) -
+ ntohs(pol->primary_buffer_offset))) {
+ goto error_lock_free;
+ }
+ /* 4. now get/set secondary policy data */
+ if (acm_secondary_ops->set_binary_policy(buf + ntohs(pol->secondary_buffer_offset),
+ ntohl(pol->len) -
+ ntohs(pol->secondary_buffer_offset))) {
+ goto error_lock_free;
+ }
+ write_unlock(&acm_bin_pol_rwlock);
+ out:
+ printk("%s: Done .\n", __func__);
+ if (policy_buffer != NULL)
+ xfree(policy_buffer);
+ return ACM_OK;
+
+ error_lock_free:
+ write_unlock(&acm_bin_pol_rwlock);
+ error_free:
+ printk("%s: Error setting policy.\n", __func__);
+ if (policy_buffer != NULL)
+ xfree(policy_buffer);
+ return -ENOMEM;
+}
+
+int
+acm_get_policy(void *buf, u16 buf_size)
+{
+ u8 *policy_buffer;
+ int ret;
+ struct acm_policy_buffer *bin_pol;
+
+ if ((policy_buffer = xmalloc_array(u8, buf_size)) == NULL)
+ return -ENOMEM;
+
+ read_lock(&acm_bin_pol_rwlock);
+ /* future: read policy from file and set it */
+ bin_pol = (struct acm_policy_buffer *)policy_buffer;
+ bin_pol->magic = htonl(ACM_MAGIC);
+ bin_pol->policyversion = htonl(POLICY_INTERFACE_VERSION);
+ bin_pol->primary_policy_code = htons(acm_bin_pol.primary_policy_code);
+ bin_pol->secondary_policy_code = htons(acm_bin_pol.secondary_policy_code);
+
+ bin_pol->len = htonl(sizeof(struct acm_policy_buffer));
+ bin_pol->primary_buffer_offset = htons(ntohl(bin_pol->len));
+ bin_pol->secondary_buffer_offset = htons(ntohl(bin_pol->len));
+
+ ret = acm_primary_ops->dump_binary_policy (policy_buffer + ntohs(bin_pol->primary_buffer_offset),
+ buf_size - ntohs(bin_pol->primary_buffer_offset));
+ if (ret < 0) {
+ printk("%s: ERROR creating chwallpolicy buffer.\n", __func__);
+ read_unlock(&acm_bin_pol_rwlock);
+ return -1;
+ }
+ bin_pol->len = htonl(ntohl(bin_pol->len) + ret);
+ bin_pol->secondary_buffer_offset = htons(ntohl(bin_pol->len));
+
+ ret = acm_secondary_ops->dump_binary_policy(policy_buffer + ntohs(bin_pol->secondary_buffer_offset),
+ buf_size - ntohs(bin_pol->secondary_buffer_offset));
+ if (ret < 0) {
+ printk("%s: ERROR creating chwallpolicy buffer.\n", __func__);
+ read_unlock(&acm_bin_pol_rwlock);
+ return -1;
+ }
+ bin_pol->len = htonl(ntohl(bin_pol->len) + ret);
+ read_unlock(&acm_bin_pol_rwlock);
+ if (copy_to_user(buf, policy_buffer, ntohl(bin_pol->len)))
+ return -EFAULT;
+ xfree(policy_buffer);
+ return ACM_OK;
+}
+
+int
+acm_dump_statistics(void *buf, u16 buf_size)
+{
+ /* send stats to user space */
+ u8 *stats_buffer;
+ int len1, len2;
+ struct acm_stats_buffer acm_stats;
+
+ if ((stats_buffer = xmalloc_array(u8, buf_size)) == NULL)
+ return -ENOMEM;
+
+ read_lock(&acm_bin_pol_rwlock);
+
+ len1 = acm_primary_ops->dump_statistics(stats_buffer + sizeof(struct acm_stats_buffer),
+ buf_size - sizeof(struct acm_stats_buffer));
+ if (len1 < 0)
+ goto error_lock_free;
+
+ len2 = acm_secondary_ops->dump_statistics(stats_buffer + sizeof(struct acm_stats_buffer) + len1,
+ buf_size - sizeof(struct acm_stats_buffer) - len1);
+ if (len2 < 0)
+ goto error_lock_free;
+
+ acm_stats.magic = htonl(ACM_MAGIC);
+ acm_stats.policyversion = htonl(POLICY_INTERFACE_VERSION);
+ acm_stats.primary_policy_code = htons(acm_bin_pol.primary_policy_code);
+ acm_stats.secondary_policy_code = htons(acm_bin_pol.secondary_policy_code);
+ acm_stats.primary_stats_offset = htons(sizeof(struct acm_stats_buffer));
+ acm_stats.secondary_stats_offset = htons(sizeof(struct acm_stats_buffer) + len1);
+ acm_stats.len = htonl(sizeof(struct acm_stats_buffer) + len1 + len2);
+ memcpy(stats_buffer, &acm_stats, sizeof(struct acm_stats_buffer));
+
+ if (copy_to_user(buf, stats_buffer, sizeof(struct acm_stats_buffer) + len1 + len2))
+ goto error_lock_free;
+
+ read_unlock(&acm_bin_pol_rwlock);
+ xfree(stats_buffer);
+ return ACM_OK;
+
+ error_lock_free:
+ read_unlock(&acm_bin_pol_rwlock);
+ xfree(stats_buffer);
+ return -EFAULT;
+}
+
+/*eof*/
diff --git a/xen/acm/acm_simple_type_enforcement_hooks.c b/xen/acm/acm_simple_type_enforcement_hooks.c
new file mode 100644
index 0000000000..17e75deca8
--- /dev/null
+++ b/xen/acm/acm_simple_type_enforcement_hooks.c
@@ -0,0 +1,638 @@
+/****************************************************************
+ * acm_simple_type_enforcement_hooks.c
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Author:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ *
+ * Contributors:
+ * Stefan Berger <stefanb@watson.ibm.com>
+ * support for network order binary policies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * sHype Simple Type Enforcement for Xen
+ * STE allows to control which domains can setup sharing
+ * (eventchannels right now) with which other domains. Hooks
+ * are defined and called throughout Xen when domains bind to
+ * shared resources (setup eventchannels) and a domain is allowed
+ * to setup sharing with another domain if and only if both domains
+ * share at least on common type.
+ *
+ */
+#include <xen/lib.h>
+#include <asm/types.h>
+#include <asm/current.h>
+#include <acm/acm_hooks.h>
+#include <asm/atomic.h>
+#include <acm/acm_endian.h>
+
+/* local cache structures for chinese wall policy */
+struct ste_binary_policy ste_bin_pol;
+
+static inline int have_common_type (ssidref_t ref1, ssidref_t ref2) {
+ int i;
+ for(i=0; i< ste_bin_pol.max_types; i++)
+ if ( ste_bin_pol.ssidrefs[ref1*ste_bin_pol.max_types + i] &&
+ ste_bin_pol.ssidrefs[ref2*ste_bin_pol.max_types + i]) {
+ printkd("%s: common type #%02x.\n", __func__, i);
+ return 1;
+ }
+ return 0;
+}
+
+/* Helper function: return = (subj and obj share a common type) */
+static int share_common_type(struct domain *subj, struct domain *obj)
+{
+ ssidref_t ref_s, ref_o;
+ int ret;
+
+ if ((subj == NULL) || (obj == NULL) || (subj->ssid == NULL) || (obj->ssid == NULL))
+ return 0;
+ read_lock(&acm_bin_pol_rwlock);
+ /* lookup the policy-local ssids */
+ ref_s = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+ (struct acm_ssid_domain *)subj->ssid)))->ste_ssidref;
+ ref_o = ((struct ste_ssid *)(GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+ (struct acm_ssid_domain *)obj->ssid)))->ste_ssidref;
+ /* check whether subj and obj share a common ste type */
+ ret = have_common_type(ref_s, ref_o);
+ read_unlock(&acm_bin_pol_rwlock);
+ return ret;
+}
+
+/*
+ * Initializing chinese wall policy (will be filled by policy partition
+ * using setpolicy command)
+ */
+int acm_init_ste_policy(void)
+{
+ /* minimal startup policy; policy write-locked already */
+ ste_bin_pol.max_types = 1;
+ ste_bin_pol.max_ssidrefs = 1;
+ ste_bin_pol.ssidrefs = (domaintype_t *)xmalloc_array(domaintype_t, 1);
+
+ if (ste_bin_pol.ssidrefs == NULL)
+ return ACM_INIT_SSID_ERROR;
+
+ /* initialize state */
+ ste_bin_pol.ssidrefs[0] = 1;
+
+ /* init stats */
+ atomic_set(&(ste_bin_pol.ec_eval_count), 0);
+ atomic_set(&(ste_bin_pol.ec_denied_count), 0);
+ atomic_set(&(ste_bin_pol.ec_cachehit_count), 0);
+ atomic_set(&(ste_bin_pol.gt_eval_count), 0);
+ atomic_set(&(ste_bin_pol.gt_denied_count), 0);
+ atomic_set(&(ste_bin_pol.gt_cachehit_count), 0);
+ return ACM_OK;
+}
+
+
+/* ste initialization function hooks */
+static int
+ste_init_domain_ssid(void **ste_ssid, ssidref_t ssidref)
+{
+ int i;
+ struct ste_ssid *ste_ssidp = xmalloc(struct ste_ssid);
+ traceprintk("%s.\n", __func__);
+
+ if (ste_ssidp == NULL)
+ return ACM_INIT_SSID_ERROR;
+
+ /* get policy-local ssid reference */
+ ste_ssidp->ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
+ if (ste_ssidp->ste_ssidref >= ste_bin_pol.max_ssidrefs) {
+ printkd("%s: ERROR ste_ssidref (%x) > max(%x).\n",
+ __func__, ste_ssidp->ste_ssidref, ste_bin_pol.max_ssidrefs-1);
+ xfree(ste_ssidp);
+ return ACM_INIT_SSID_ERROR;
+ }
+ /* clean ste cache */
+ for (i=0; i<ACM_TE_CACHE_SIZE; i++)
+ ste_ssidp->ste_cache[i].valid = FREE;
+
+ (*ste_ssid) = ste_ssidp;
+ printkd("%s: determined ste_ssidref to %x.\n",
+ __func__, ste_ssidp->ste_ssidref);
+ return ACM_OK;
+}
+
+
+static void
+ste_free_domain_ssid(void *ste_ssid)
+{
+ traceprintk("%s.\n", __func__);
+ if (ste_ssid != NULL)
+ xfree(ste_ssid);
+ return;
+}
+
+/* dump type enforcement cache; policy read-locked already */
+static int
+ste_dump_policy(u8 *buf, u16 buf_size) {
+ struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer *)buf;
+ int ret = 0;
+
+ ste_buf->ste_max_types = htons(ste_bin_pol.max_types);
+ ste_buf->ste_max_ssidrefs = htons(ste_bin_pol.max_ssidrefs);
+ ste_buf->policy_code = htons(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY);
+ ste_buf->ste_ssid_offset = htons(sizeof(struct acm_ste_policy_buffer));
+ ret = ntohs(ste_buf->ste_ssid_offset) +
+ sizeof(domaintype_t)*ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types;
+
+ /* now copy buffer over */
+ arrcpy(buf + ntohs(ste_buf->ste_ssid_offset),
+ ste_bin_pol.ssidrefs,
+ sizeof(domaintype_t),
+ ste_bin_pol.max_ssidrefs*ste_bin_pol.max_types);
+
+ return ret;
+}
+
+/* ste_init_state is called when a policy is changed to detect violations (return != 0).
+ * from a security point of view, we simulate that all running domains are re-started and
+ * all sharing decisions are replayed to detect violations or current sharing behavior
+ * (right now: event_channels, future: also grant_tables)
+ */
+static int
+ste_init_state(struct acm_ste_policy_buffer *ste_buf, domaintype_t *ssidrefs)
+{
+ int violation = 1;
+ struct ste_ssid *ste_ssid, *ste_rssid;
+ ssidref_t ste_ssidref, ste_rssidref;
+ struct domain **pd, *rdom;
+ domid_t rdomid;
+ grant_entry_t sha_copy;
+ int port, i;
+
+ read_lock(&domlist_lock); /* go by domain? or directly by global? event/grant list */
+ /* go through all domains and adjust policy as if this domain was started now */
+ pd = &domain_list;
+ for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
+ ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+ (struct acm_ssid_domain *)(*pd)->ssid);
+ ste_ssidref = ste_ssid->ste_ssidref;
+ traceprintk("%s: validating policy for eventch domain %x (ste-Ref=%x).\n",
+ __func__, (*pd)->domain_id, ste_ssidref);
+ /* a) check for event channel conflicts */
+ for (port=0; port < NR_EVTCHN_BUCKETS; port++) {
+ spin_lock(&(*pd)->evtchn_lock);
+ if ((*pd)->evtchn[port] == NULL) {
+ spin_unlock(&(*pd)->evtchn_lock);
+ continue;
+ }
+ if ((*pd)->evtchn[port]->state == ECS_INTERDOMAIN) {
+ rdom = (*pd)->evtchn[port]->u.interdomain.remote_dom;
+ rdomid = rdom->domain_id;
+ /* rdom now has remote domain */
+ ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+ (struct acm_ssid_domain *)(rdom->ssid));
+ ste_rssidref = ste_rssid->ste_ssidref;
+ } else if ((*pd)->evtchn[port]->state == ECS_UNBOUND) {
+ rdomid = (*pd)->evtchn[port]->u.unbound.remote_domid;
+ if ((rdom = find_domain_by_id(rdomid)) == NULL) {
+ printk("%s: Error finding domain to id %x!\n", __func__, rdomid);
+ goto out;
+ }
+ /* rdom now has remote domain */
+ ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+ (struct acm_ssid_domain *)(rdom->ssid));
+ ste_rssidref = ste_rssid->ste_ssidref;
+ put_domain(rdom);
+ } else {
+ spin_unlock(&(*pd)->evtchn_lock);
+ continue; /* port unused */
+ }
+ spin_unlock(&(*pd)->evtchn_lock);
+
+ /* rdom now has remote domain */
+ ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+ (struct acm_ssid_domain *)(rdom->ssid));
+ ste_rssidref = ste_rssid->ste_ssidref;
+ traceprintk("%s: eventch: domain %x (ssidref %x) --> domain %x (rssidref %x) used (port %x).\n",
+ __func__, (*pd)->domain_id, ste_ssidref, rdom->domain_id, ste_rssidref, port);
+ /* check whether on subj->ssid, obj->ssid share a common type*/
+ if (!have_common_type(ste_ssidref, ste_rssidref)) {
+ printkd("%s: Policy violation in event channel domain %x -> domain %x.\n",
+ __func__, (*pd)->domain_id, rdomid);
+ goto out;
+ }
+ }
+ /* b) check for grant table conflicts on shared pages */
+ if ((*pd)->grant_table->shared == NULL) {
+ printkd("%s: Grant ... sharing for domain %x not setup!\n", __func__, (*pd)->domain_id);
+ continue;
+ }
+ for ( i = 0; i < NR_GRANT_ENTRIES; i++ ) {
+ sha_copy = (*pd)->grant_table->shared[i];
+ if ( sha_copy.flags ) {
+ printkd("%s: grant dom (%hu) SHARED (%d) flags:(%hx) dom:(%hu) frame:(%lx)\n",
+ __func__, (*pd)->domain_id, i, sha_copy.flags, sha_copy.domid,
+ (unsigned long)sha_copy.frame);
+ rdomid = sha_copy.domid;
+ if ((rdom = find_domain_by_id(rdomid)) == NULL) {
+ printkd("%s: domain not found ERROR!\n", __func__);
+ goto out;
+ };
+ /* rdom now has remote domain */
+ ste_rssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+ (struct acm_ssid_domain *)(rdom->ssid));
+ ste_rssidref = ste_rssid->ste_ssidref;
+ put_domain(rdom);
+ if (!have_common_type(ste_ssidref, ste_rssidref)) {
+ printkd("%s: Policy violation in grant table sharing domain %x -> domain %x.\n",
+ __func__, (*pd)->domain_id, rdomid);
+ goto out;
+ }
+ }
+ }
+ }
+ violation = 0;
+ out:
+ read_unlock(&domlist_lock);
+ return violation;
+ /* returning "violation != 0" means that existing sharing between domains would not
+ * have been allowed if the new policy had been enforced before the sharing; for ste,
+ * this means that there are at least 2 domains that have established sharing through
+ * event-channels or grant-tables but these two domains don't have no longer a common
+ * type in their typesets referenced by their ssidrefs */
+}
+
+/* set new policy; policy write-locked already */
+static int
+ste_set_policy(u8 *buf, u16 buf_size)
+{
+ struct acm_ste_policy_buffer *ste_buf = (struct acm_ste_policy_buffer *)buf;
+ void *ssidrefsbuf;
+ struct ste_ssid *ste_ssid;
+ struct domain **pd;
+ int i;
+
+ /* Convert endianess of policy */
+ ste_buf->policy_code = ntohs(ste_buf->policy_code);
+ ste_buf->ste_max_types = ntohs(ste_buf->ste_max_types);
+ ste_buf->ste_max_ssidrefs = ntohs(ste_buf->ste_max_ssidrefs);
+ ste_buf->ste_ssid_offset = ntohs(ste_buf->ste_ssid_offset);
+
+ /* 1. create and copy-in new ssidrefs buffer */
+ ssidrefsbuf = xmalloc_array(u8, sizeof(domaintype_t)*ste_buf->ste_max_types*ste_buf->ste_max_ssidrefs);
+ if (ssidrefsbuf == NULL) {
+ return -ENOMEM;
+ }
+ if (ste_buf->ste_ssid_offset + sizeof(domaintype_t) * ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types > buf_size)
+ goto error_free;
+
+ arrcpy(ssidrefsbuf,
+ buf + ste_buf->ste_ssid_offset,
+ sizeof(domaintype_t),
+ ste_buf->ste_max_ssidrefs*ste_buf->ste_max_types);
+
+ /* 2. now re-calculate sharing decisions based on running domains;
+ * this can fail if new policy is conflicting with sharing of running domains
+ * now: reject violating new policy; future: adjust sharing through revoking sharing */
+ if (ste_init_state(ste_buf, (domaintype_t *)ssidrefsbuf)) {
+ printk("%s: New policy conflicts with running domains. Policy load aborted.\n", __func__);
+ goto error_free; /* new policy conflicts with sharing of running domains */
+ }
+ /* 3. replace old policy (activate new policy) */
+ ste_bin_pol.max_types = ste_buf->ste_max_types;
+ ste_bin_pol.max_ssidrefs = ste_buf->ste_max_ssidrefs;
+ if (ste_bin_pol.ssidrefs)
+ xfree(ste_bin_pol.ssidrefs);
+ ste_bin_pol.ssidrefs = (domaintype_t *)ssidrefsbuf;
+
+ /* clear all ste caches */
+ read_lock(&domlist_lock);
+ pd = &domain_list;
+ for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
+ ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+ (struct acm_ssid_domain *)(*pd)->ssid);
+ for (i=0; i<ACM_TE_CACHE_SIZE; i++)
+ ste_ssid->ste_cache[i].valid = FREE;
+ }
+ read_unlock(&domlist_lock);
+ return ACM_OK;
+
+error_free:
+ printk("%s: ERROR setting policy.\n", __func__);
+ if (ssidrefsbuf != NULL) xfree(ssidrefsbuf);
+ return -EFAULT;
+}
+
+static int
+ste_dump_stats(u8 *buf, u16 buf_len)
+{
+ struct acm_ste_stats_buffer stats;
+
+#ifdef ACM_DEBUG
+ int i;
+ struct ste_ssid *ste_ssid;
+ struct domain **pd;
+
+ printk("ste: Decision caches:\n");
+ /* go through all domains and adjust policy as if this domain was started now */
+ read_lock(&domlist_lock); /* go by domain? or directly by global? event/grant list */
+ pd = &domain_list;
+ for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
+ printk("ste: Cache Domain %02x.\n", (*pd)->domain_id);
+ ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+ (struct acm_ssid_domain *)(*pd)->ssid);
+ for (i=0; i<ACM_TE_CACHE_SIZE; i++)
+ printk("\t\tcache[%02x] = %s, domid=%x.\n", i,
+ (ste_ssid->ste_cache[i].valid == VALID) ?
+ "VALID" : "FREE",
+ (ste_ssid->ste_cache[i].valid == VALID) ?
+ ste_ssid->ste_cache[i].id : 0xffffffff);
+ }
+ read_unlock(&domlist_lock);
+ /* init stats */
+ printk("STE-Policy Security Hook Statistics:\n");
+ printk("ste: event_channel eval_count = %x\n", atomic_read(&(ste_bin_pol.ec_eval_count)));
+ printk("ste: event_channel denied_count = %x\n", atomic_read(&(ste_bin_pol.ec_denied_count)));
+ printk("ste: event_channel cache_hit_count = %x\n", atomic_read(&(ste_bin_pol.ec_cachehit_count)));
+ printk("ste:\n");
+ printk("ste: grant_table eval_count = %x\n", atomic_read(&(ste_bin_pol.gt_eval_count)));
+ printk("ste: grant_table denied_count = %x\n", atomic_read(&(ste_bin_pol.gt_denied_count)));
+ printk("ste: grant_table cache_hit_count = %x\n", atomic_read(&(ste_bin_pol.gt_cachehit_count)));
+#endif
+
+ if (buf_len < sizeof(struct acm_ste_stats_buffer))
+ return -ENOMEM;
+
+ /* now send the hook counts to user space */
+ stats.ec_eval_count = htonl(atomic_read(&ste_bin_pol.ec_eval_count));
+ stats.gt_eval_count = htonl(atomic_read(&ste_bin_pol.gt_eval_count));
+ stats.ec_denied_count = htonl(atomic_read(&ste_bin_pol.ec_denied_count));
+ stats.gt_denied_count = htonl(atomic_read(&ste_bin_pol.gt_denied_count));
+ stats.ec_cachehit_count = htonl(atomic_read(&ste_bin_pol.ec_cachehit_count));
+ stats.gt_cachehit_count = htonl(atomic_read(&ste_bin_pol.gt_cachehit_count));
+ memcpy(buf, &stats, sizeof(struct acm_ste_stats_buffer));
+ return sizeof(struct acm_ste_stats_buffer);
+}
+
+
+/* we need to go through this before calling the hooks,
+ * returns 1 == cache hit */
+static int inline
+check_cache(struct domain *dom, domid_t rdom) {
+ struct ste_ssid *ste_ssid;
+ int i;
+
+ printkd("checking cache: %x --> %x.\n", dom->domain_id, rdom);
+ ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+ (struct acm_ssid_domain *)(dom)->ssid);
+
+ for(i=0; i< ACM_TE_CACHE_SIZE; i++) {
+ if ((ste_ssid->ste_cache[i].valid == VALID) &&
+ (ste_ssid->ste_cache[i].id == rdom)) {
+ printkd("cache hit (entry %x, id= %x!\n", i, ste_ssid->ste_cache[i].id);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+/* we only get here if there is NO entry yet; no duplication check! */
+static void inline
+cache_result(struct domain *subj, struct domain *obj) {
+ struct ste_ssid *ste_ssid;
+ int i;
+ printkd("caching from doms: %x --> %x.\n", subj->domain_id, obj->domain_id);
+ ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+ (struct acm_ssid_domain *)(subj)->ssid);
+ for(i=0; i< ACM_TE_CACHE_SIZE; i++)
+ if (ste_ssid->ste_cache[i].valid == FREE)
+ break;
+ if (i< ACM_TE_CACHE_SIZE) {
+ ste_ssid->ste_cache[i].valid = VALID;
+ ste_ssid->ste_cache[i].id = obj->domain_id;
+ } else
+ printk ("Cache of dom %x is full!\n", subj->domain_id);
+}
+
+/* deletes entries for domain 'id' from all caches (re-use) */
+static void inline
+clean_id_from_cache(domid_t id)
+{
+ struct ste_ssid *ste_ssid;
+ int i;
+ struct domain **pd;
+
+ printkd("deleting cache for dom %x.\n", id);
+
+ read_lock(&domlist_lock); /* look through caches of all domains */
+ pd = &domain_list;
+ for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) {
+ ste_ssid = GET_SSIDP(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY,
+ (struct acm_ssid_domain *)(*pd)->ssid);
+ for (i=0; i<ACM_TE_CACHE_SIZE; i++)
+ if ((ste_ssid->ste_cache[i].valid == VALID) &&
+ (ste_ssid->ste_cache[i].id = id))
+ ste_ssid->ste_cache[i].valid = FREE;
+ }
+ read_unlock(&domlist_lock);
+}
+
+/***************************
+ * Authorization functions
+ **************************/
+
+static int
+ste_pre_domain_create(void *subject_ssid, ssidref_t ssidref)
+{
+ /* check for ssidref in range for policy */
+ ssidref_t ste_ssidref;
+ traceprintk("%s.\n", __func__);
+
+ read_lock(&acm_bin_pol_rwlock);
+ ste_ssidref = GET_SSIDREF(ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY, ssidref);
+ if (ste_ssidref == ACM_DEFAULT_LOCAL_SSID) {
+ printk("%s: ERROR STE SSID is NOT SET but policy enforced.\n", __func__);
+ read_unlock(&acm_bin_pol_rwlock);
+ return ACM_ACCESS_DENIED; /* catching and indicating config error */
+ }
+ if (ste_ssidref >= ste_bin_pol.max_ssidrefs) {
+ printk("%s: ERROR ste_ssidref > max(%x).\n",
+ __func__, ste_bin_pol.max_ssidrefs-1);
+ read_unlock(&acm_bin_pol_rwlock);
+ return ACM_ACCESS_DENIED;
+ }
+ read_unlock(&acm_bin_pol_rwlock);
+ return ACM_ACCESS_PERMITTED;
+}
+
+static void
+ste_post_domain_destroy(void *subject_ssid, domid_t id)
+{
+ /* clean all cache entries for destroyed domain (might be re-used) */
+ clean_id_from_cache(id);
+}
+
+/* -------- EVENTCHANNEL OPERATIONS -----------*/
+static int
+ste_pre_eventchannel_unbound(domid_t id) {
+ struct domain *subj, *obj;
+ int ret;
+ traceprintk("%s: dom%x-->dom%x.\n",
+ __func__, current->domain->domain_id, id);
+
+ if (check_cache(current->domain, id)) {
+ atomic_inc(&ste_bin_pol.ec_cachehit_count);
+ return ACM_ACCESS_PERMITTED;
+ }
+ atomic_inc(&ste_bin_pol.ec_eval_count);
+ subj = current->domain;
+ obj = find_domain_by_id(id);
+
+ if (share_common_type(subj, obj)) {
+ cache_result(subj, obj);
+ ret = ACM_ACCESS_PERMITTED;
+ } else {
+ atomic_inc(&ste_bin_pol.ec_denied_count);
+ ret = ACM_ACCESS_DENIED;
+ }
+ if (obj != NULL)
+ put_domain(obj);
+ return ret;
+}
+
+static int
+ste_pre_eventchannel_interdomain(domid_t id1, domid_t id2)
+{
+ struct domain *subj, *obj;
+ int ret;
+ traceprintk("%s: dom%x-->dom%x.\n", __func__,
+ (id1 == DOMID_SELF) ? current->domain->domain_id : id1,
+ (id2 == DOMID_SELF) ? current->domain->domain_id : id2);
+
+ /* following is a bit longer but ensures that we
+ * "put" only domains that we where "find"-ing
+ */
+ if (id1 == DOMID_SELF) id1 = current->domain->domain_id;
+ if (id2 == DOMID_SELF) id2 = current->domain->domain_id;
+
+ subj = find_domain_by_id(id1);
+ obj = find_domain_by_id(id2);
+ if ((subj == NULL) || (obj == NULL)) {
+ ret = ACM_ACCESS_DENIED;
+ goto out;
+ }
+ /* cache check late, but evtchn is not on performance critical path */
+ if (check_cache(subj, obj->domain_id)) {
+ atomic_inc(&ste_bin_pol.ec_cachehit_count);
+ ret = ACM_ACCESS_PERMITTED;
+ goto out;
+ }
+ atomic_inc(&ste_bin_pol.ec_eval_count);
+
+ if (share_common_type(subj, obj)) {
+ cache_result(subj, obj);
+ ret = ACM_ACCESS_PERMITTED;
+ } else {
+ atomic_inc(&ste_bin_pol.ec_denied_count);
+ ret = ACM_ACCESS_DENIED;
+ }
+ out:
+ if (obj != NULL)
+ put_domain(obj);
+ if (subj != NULL)
+ put_domain(subj);
+ return ret;
+}
+
+/* -------- SHARED MEMORY OPERATIONS -----------*/
+
+static int
+ste_pre_grant_map_ref (domid_t id) {
+ struct domain *obj, *subj;
+ int ret;
+ traceprintk("%s: dom%x-->dom%x.\n", __func__,
+ current->domain->domain_id, id);
+
+ if (check_cache(current->domain, id)) {
+ atomic_inc(&ste_bin_pol.gt_cachehit_count);
+ return ACM_ACCESS_PERMITTED;
+ }
+ atomic_inc(&ste_bin_pol.gt_eval_count);
+ subj = current->domain;
+ obj = find_domain_by_id(id);
+
+ if (share_common_type(subj, obj)) {
+ cache_result(subj, obj);
+ ret = ACM_ACCESS_PERMITTED;
+ } else {
+ atomic_inc(&ste_bin_pol.gt_denied_count);
+ printkd("%s: ACCESS DENIED!\n", __func__);
+ ret = ACM_ACCESS_DENIED;
+ }
+ if (obj != NULL)
+ put_domain(obj);
+ return ret;
+}
+
+/* since setting up grant tables involves some implicit information
+ flow from the creating domain to the domain that is setup, we
+ check types in addition to the general authorization */
+static int
+ste_pre_grant_setup (domid_t id) {
+ struct domain *obj, *subj;
+ int ret;
+ traceprintk("%s: dom%x-->dom%x.\n", __func__,
+ current->domain->domain_id, id);
+
+ if (check_cache(current->domain, id)) {
+ atomic_inc(&ste_bin_pol.gt_cachehit_count);
+ return ACM_ACCESS_PERMITTED;
+ }
+ atomic_inc(&ste_bin_pol.gt_eval_count);
+ /* a) check authorization (eventually use specific capabilities) */
+ if (!IS_PRIV(current->domain)) {
+ printk("%s: Grant table management authorization denied ERROR!\n", __func__);
+ return ACM_ACCESS_DENIED;
+ }
+ /* b) check types */
+ subj = current->domain;
+ obj = find_domain_by_id(id);
+
+ if (share_common_type(subj, obj)) {
+ cache_result(subj, obj);
+ ret = ACM_ACCESS_PERMITTED;
+ } else {
+ atomic_inc(&ste_bin_pol.gt_denied_count);
+ ret = ACM_ACCESS_DENIED;
+ }
+ if (obj != NULL)
+ put_domain(obj);
+ return ret;
+}
+
+/* now define the hook structure similarly to LSM */
+struct acm_operations acm_simple_type_enforcement_ops = {
+ /* policy management services */
+ .init_domain_ssid = ste_init_domain_ssid,
+ .free_domain_ssid = ste_free_domain_ssid,
+ .dump_binary_policy = ste_dump_policy,
+ .set_binary_policy = ste_set_policy,
+ .dump_statistics = ste_dump_stats,
+ /* domain management control hooks */
+ .pre_domain_create = ste_pre_domain_create,
+ .post_domain_create = NULL,
+ .fail_domain_create = NULL,
+ .post_domain_destroy = ste_post_domain_destroy,
+ /* event channel control hooks */
+ .pre_eventchannel_unbound = ste_pre_eventchannel_unbound,
+ .fail_eventchannel_unbound = NULL,
+ .pre_eventchannel_interdomain = ste_pre_eventchannel_interdomain,
+ .fail_eventchannel_interdomain = NULL,
+ /* grant table control hooks */
+ .pre_grant_map_ref = ste_pre_grant_map_ref,
+ .fail_grant_map_ref = NULL,
+ .pre_grant_setup = ste_pre_grant_setup,
+ .fail_grant_setup = NULL,
+};
diff --git a/xen/arch/ia64/Makefile b/xen/arch/ia64/Makefile
index 2e59a7d19d..03f56326fb 100644
--- a/xen/arch/ia64/Makefile
+++ b/xen/arch/ia64/Makefile
@@ -15,7 +15,7 @@ OBJS = xensetup.o setup.o time.o irq.o ia64_ksyms.o process.o smp.o \
ifeq ($(CONFIG_VTI),y)
OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
- vtlb.o mmio.o vlsapic.o
+ vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o
endif
# perfmon.o
# unwind.o needed for kernel unwinding (rare)
diff --git a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c
index 41bbbc7d5b..4b019209d5 100644
--- a/xen/arch/ia64/asm-offsets.c
+++ b/xen/arch/ia64/asm-offsets.c
@@ -75,6 +75,9 @@ void foo(void)
DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_saved_rr0));
DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu, arch.breakimm));
DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
+ DEFINE(IA64_VCPU_IRR0_OFFSET, offsetof (struct vcpu, arch.irr[0]));
+ DEFINE(IA64_VCPU_IRR3_OFFSET, offsetof (struct vcpu, arch.irr[3]));
+ DEFINE(IA64_VCPU_INSVC3_OFFSET, offsetof (struct vcpu, arch.insvc[3]));
BLANK();
diff --git a/xen/arch/ia64/dom0_ops.c b/xen/arch/ia64/dom0_ops.c
index e0b48080bc..c1b1d5c241 100644
--- a/xen/arch/ia64/dom0_ops.c
+++ b/xen/arch/ia64/dom0_ops.c
@@ -18,14 +18,6 @@
#include <xen/console.h>
#include <public/sched_ctl.h>
-#define TRC_DOM0OP_ENTER_BASE 0x00020000
-#define TRC_DOM0OP_LEAVE_BASE 0x00030000
-
-static int msr_cpu_mask;
-static unsigned long msr_addr;
-static unsigned long msr_lo;
-static unsigned long msr_hi;
-
long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
{
long ret = 0;
@@ -35,6 +27,49 @@ long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
switch ( op->cmd )
{
+ /*
+ * NOTE: DOM0_GETMEMLIST has somewhat different semantics on IA64 -
+ * it actually allocates and maps pages.
+ */
+ case DOM0_GETMEMLIST:
+ {
+ unsigned long i;
+ struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
+ unsigned long start_page = op->u.getmemlist.max_pfns >> 32;
+ unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
+ unsigned long pfn;
+ unsigned long *buffer = op->u.getmemlist.buffer;
+ struct page *page;
+
+ ret = -EINVAL;
+ if ( d != NULL )
+ {
+ ret = 0;
+
+ for ( i = start_page; i < (start_page + nr_pages); i++ )
+ {
+ page = map_new_domain_page(d, i << PAGE_SHIFT);
+ if ( page == NULL )
+ {
+ ret = -ENOMEM;
+ break;
+ }
+ pfn = page_to_pfn(page);
+ if ( put_user(pfn, buffer) )
+ {
+ ret = -EFAULT;
+ break;
+ }
+ buffer++;
+ }
+
+ op->u.getmemlist.num_pfns = i - start_page;
+ copy_to_user(u_dom0_op, op, sizeof(*op));
+
+ put_domain(d);
+ }
+ }
+ break;
default:
ret = -ENOSYS;
@@ -43,10 +78,3 @@ long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
return ret;
}
-
-void arch_getdomaininfo_ctxt(struct domain *d, struct vcpu_guest_context *c)
-{
- int i;
-
- dummy();
-}
diff --git a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c
index 869396ed06..40a38b2e07 100644
--- a/xen/arch/ia64/domain.c
+++ b/xen/arch/ia64/domain.c
@@ -76,7 +76,7 @@ extern unsigned long dom_fw_setup(struct domain *, char *, int);
/* this belongs in include/asm, but there doesn't seem to be a suitable place */
void free_perdomain_pt(struct domain *d)
{
- dummy();
+ printf("free_perdomain_pt: not implemented\n");
//free_page((unsigned long)d->mm.perdomain_pt);
}
@@ -166,27 +166,49 @@ void arch_free_vcpu_struct(struct vcpu *v)
free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
}
+static void init_switch_stack(struct vcpu *v)
+{
+ struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
+ struct switch_stack *sw = (struct switch_stack *) regs - 1;
+ extern void ia64_ret_from_clone;
+
+ memset(sw, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs));
+ sw->ar_bspstore = (unsigned long)v + IA64_RBS_OFFSET;
+ sw->b0 = (unsigned long) &ia64_ret_from_clone;
+ sw->ar_fpsr = FPSR_DEFAULT;
+ v->arch._thread.ksp = (unsigned long) sw - 16;
+ // stay on kernel stack because may get interrupts!
+ // ia64_ret_from_clone (which b0 gets in new_thread) switches
+ // to user stack
+ v->arch._thread.on_ustack = 0;
+ memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);
+}
+
#ifdef CONFIG_VTI
void arch_do_createdomain(struct vcpu *v)
{
struct domain *d = v->domain;
struct thread_info *ti = alloc_thread_info(v);
- /* If domain is VMX domain, shared info area is created
- * by domain and then domain notifies HV by specific hypercall.
- * If domain is xenolinux, shared info area is created by
- * HV.
- * Since we have no idea about whether domain is VMX now,
- * (dom0 when parse and domN when build), postpone possible
- * allocation.
- */
+ /* Clear thread_info to clear some important fields, like preempt_count */
+ memset(ti, 0, sizeof(struct thread_info));
+ init_switch_stack(v);
+
+ /* Shared info area is required to be allocated at domain
+ * creation, since control panel will write some I/O info
+ * between front end and back end to that area. However for
+ * vmx domain, our design is to let domain itself to allcoate
+ * shared info area, to keep machine page contiguous. So this
+ * page will be released later when domainN issues request
+ * after up.
+ */
+ d->shared_info = (void *)alloc_xenheap_page();
/* FIXME: Because full virtual cpu info is placed in this area,
* it's unlikely to put it into one shareinfo page. Later
* need split vcpu context from vcpu_info and conforms to
* normal xen convention.
*/
- d->shared_info = NULL;
v->vcpu_info = (void *)alloc_xenheap_page();
if (!v->vcpu_info) {
printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
@@ -194,9 +216,6 @@ void arch_do_createdomain(struct vcpu *v)
}
memset(v->vcpu_info, 0, PAGE_SIZE);
- /* Clear thread_info to clear some important fields, like preempt_count */
- memset(ti, 0, sizeof(struct thread_info));
-
/* Allocate per-domain vTLB and vhpt */
v->arch.vtlb = init_domain_tlb(v);
@@ -211,38 +230,25 @@ void arch_do_createdomain(struct vcpu *v)
d->xen_vastart = 0xf000000000000000;
d->xen_vaend = 0xf300000000000000;
d->arch.breakimm = 0x1000;
-
- // stay on kernel stack because may get interrupts!
- // ia64_ret_from_clone (which b0 gets in new_thread) switches
- // to user stack
- v->arch._thread.on_ustack = 0;
}
#else // CONFIG_VTI
void arch_do_createdomain(struct vcpu *v)
{
struct domain *d = v->domain;
+ struct thread_info *ti = alloc_thread_info(v);
+
+ /* Clear thread_info to clear some important fields, like preempt_count */
+ memset(ti, 0, sizeof(struct thread_info));
+ init_switch_stack(v);
d->shared_info = (void *)alloc_xenheap_page();
- v->vcpu_info = (void *)alloc_xenheap_page();
- if (!v->vcpu_info) {
+ if (!d->shared_info) {
printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
while (1);
}
- memset(v->vcpu_info, 0, PAGE_SIZE);
- /* pin mapping */
- // FIXME: Does this belong here? Or do only at domain switch time?
-#if 0
- // this is now done in ia64_new_rr7
- {
- /* WARNING: following must be inlined to avoid nested fault */
- unsigned long psr = ia64_clear_ic();
- ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR,
- pte_val(pfn_pte(ia64_tpa(d->shared_info) >> PAGE_SHIFT, PAGE_KERNEL)),
- PAGE_SHIFT);
- ia64_set_psr(psr);
- ia64_srlz_i();
- }
-#endif
+ memset(d->shared_info, 0, PAGE_SIZE);
+ v->vcpu_info = &(d->shared_info->vcpu_data[0]);
+
d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
BUG();
@@ -258,33 +264,63 @@ void arch_do_createdomain(struct vcpu *v)
d->shared_info_va = 0xf100000000000000;
d->arch.breakimm = 0x1000;
v->arch.breakimm = d->arch.breakimm;
- // stay on kernel stack because may get interrupts!
- // ia64_ret_from_clone (which b0 gets in new_thread) switches
- // to user stack
- v->arch._thread.on_ustack = 0;
+
+ d->arch.mm = xmalloc(struct mm_struct);
+ if (unlikely(!d->arch.mm)) {
+ printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
+ return -ENOMEM;
+ }
+ memset(d->arch.mm, 0, sizeof(*d->arch.mm));
+ d->arch.mm->pgd = pgd_alloc(d->arch.mm);
+ if (unlikely(!d->arch.mm->pgd)) {
+ printk("Can't allocate pgd for domain %d\n",d->domain_id);
+ return -ENOMEM;
+ }
}
#endif // CONFIG_VTI
-void arch_do_boot_vcpu(struct vcpu *v)
+void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)
{
- return;
+ struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
+
+ printf("arch_getdomaininfo_ctxt\n");
+ c->regs = *regs;
+ c->vcpu = v->vcpu_info->arch;
+ c->shared = v->domain->shared_info->arch;
}
int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
{
- dummy();
- return 1;
+ struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
+
+ printf("arch_set_info_guest\n");
+ *regs = c->regs;
+ regs->cr_ipsr = IA64_PSR_IT|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IC|IA64_PSR_I|IA64_PSR_DFH|IA64_PSR_BN|IA64_PSR_SP|IA64_PSR_DI;
+ regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
+ regs->ar_rsc |= (2 << 2); /* force PL2/3 */
+
+ v->vcpu_info->arch = c->vcpu;
+ init_all_rr(v);
+
+ // this should be in userspace
+ regs->r28 = dom_fw_setup(v->domain,"nomca nosmp xencons=ttyS console=ttyS0",256L); //FIXME
+ v->vcpu_info->arch.banknum = 1;
+ v->vcpu_info->arch.metaphysical_mode = 1;
+
+ v->domain->shared_info->arch = c->shared;
+ return 0;
}
-int arch_final_setup_guest(struct vcpu *v, struct vcpu_guest_context *c)
+void arch_do_boot_vcpu(struct vcpu *v)
{
- dummy();
- return 1;
+ printf("arch_do_boot_vcpu: not implemented\n");
+ return;
}
void domain_relinquish_resources(struct domain *d)
{
- dummy();
+ /* FIXME */
+ printf("domain_relinquish_resources: not implemented\n");
}
#ifdef CONFIG_VTI
@@ -294,10 +330,8 @@ void new_thread(struct vcpu *v,
unsigned long start_info)
{
struct domain *d = v->domain;
- struct switch_stack *sw;
struct xen_regs *regs;
struct ia64_boot_param *bp;
- extern char ia64_ret_from_clone;
extern char saved_command_line[];
//char *dom0_cmdline = "BOOT_IMAGE=scsi0:\EFI\redhat\xenlinux nomca root=/dev/sdb1 ro";
@@ -305,11 +339,8 @@ void new_thread(struct vcpu *v,
#ifdef CONFIG_DOMAIN0_CONTIGUOUS
if (d == dom0) start_pc += dom0_start;
#endif
- regs = (struct xen_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
- sw = (struct switch_stack *) regs - 1;
- /* Sanity Clear */
- memset(sw, 0, sizeof(struct xen_regs) + sizeof(struct switch_stack));
+ regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
if (VMX_DOMAIN(v)) {
/* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
regs->cr_ipsr = 0x501008826008; /* Need to be expanded as macro */
@@ -320,33 +351,23 @@ void new_thread(struct vcpu *v,
regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
}
regs->cr_iip = start_pc;
- regs->ar_rsc = 0x0;
- regs->cr_ifs = 0x0;
- regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
- sw->ar_bspstore = (unsigned long)v + IA64_RBS_OFFSET;
- printf("new_thread: v=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
- v,regs,sw,sw->ar_bspstore,IA64_STK_OFFSET,&regs->r8);
- printf("iip:0x%lx,ipsr:0x%lx\n", regs->cr_iip, regs->cr_ipsr);
-
- sw->b0 = (unsigned long) &ia64_ret_from_clone;
- v->arch._thread.ksp = (unsigned long) sw - 16;
- printk("new_thread, about to call init_all_rr\n");
+ regs->cr_ifs = 0; /* why? - matthewc */
+ regs->ar_fpsr = FPSR_DEFAULT;
if (VMX_DOMAIN(v)) {
vmx_init_all_rr(v);
} else
init_all_rr(v);
- // set up boot parameters (and fake firmware)
- printk("new_thread, about to call dom_fw_setup\n");
- VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L); //FIXME
- printk("new_thread, done with dom_fw_setup\n");
if (VMX_DOMAIN(v)) {
+ VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
/* Virtual processor context setup */
VMX_VPD(v, vpsr) = IA64_PSR_BN;
VPD_CR(v, dcr) = 0;
} else {
- // don't forget to set this!
+ regs->r28 = dom_fw_setup(d,saved_command_line,256L);
v->vcpu_info->arch.banknum = 1;
+ v->vcpu_info->arch.metaphysical_mode = 1;
+ d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0;
}
}
#else // CONFIG_VTI
@@ -359,54 +380,27 @@ void new_thread(struct vcpu *v,
unsigned long start_info)
{
struct domain *d = v->domain;
- struct switch_stack *sw;
struct pt_regs *regs;
- unsigned long new_rbs;
struct ia64_boot_param *bp;
- extern char ia64_ret_from_clone;
extern char saved_command_line[];
#ifdef CONFIG_DOMAIN0_CONTIGUOUS
if (d == dom0) start_pc += dom0_start;
#endif
+
regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
- sw = (struct switch_stack *) regs - 1;
- memset(sw,0,sizeof(struct switch_stack)+sizeof(struct pt_regs));
- new_rbs = (unsigned long) v + IA64_RBS_OFFSET;
regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
| IA64_PSR_BITS_TO_SET | IA64_PSR_BN
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS);
regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
regs->cr_iip = start_pc;
- regs->ar_rsc = 0; /* lazy mode */
- regs->ar_rnat = 0;
- regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
- regs->loadrs = 0;
- //regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */
- //regs->r8 = 0x01234567890abcdef; // FIXME: temp marker
- //regs->r12 = ((unsigned long) regs - 16); /* 16 byte scratch */
regs->cr_ifs = 1UL << 63;
- regs->pr = 0;
- sw->pr = 0;
- regs->ar_pfs = 0;
- sw->caller_unat = 0;
- sw->ar_pfs = 0;
- sw->ar_bspstore = new_rbs;
- //regs->r13 = (unsigned long) v;
-printf("new_thread: v=%p, start_pc=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
-v,start_pc,regs,sw,new_rbs,IA64_STK_OFFSET,&regs->r8);
- sw->b0 = (unsigned long) &ia64_ret_from_clone;
- v->arch._thread.ksp = (unsigned long) sw - 16;
- //v->thread_info->flags = 0;
-printk("new_thread, about to call init_all_rr\n");
+ regs->ar_fpsr = FPSR_DEFAULT;
init_all_rr(v);
- // set up boot parameters (and fake firmware)
-printk("new_thread, about to call dom_fw_setup\n");
regs->r28 = dom_fw_setup(d,saved_command_line,256L); //FIXME
-printk("new_thread, done with dom_fw_setup\n");
- // don't forget to set this!
v->vcpu_info->arch.banknum = 1;
- memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);
+ v->vcpu_info->arch.metaphysical_mode = 1;
+ d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0;
}
#endif // CONFIG_VTI
@@ -1037,21 +1031,6 @@ int construct_dom0(struct domain *d,
strcpy(d->name,"Domain0");
#endif
- // prepare domain0 pagetable (maps METAphysical to physical)
- // following is roughly mm_init() in linux/kernel/fork.c
- d->arch.mm = xmalloc(struct mm_struct);
- if (unlikely(!d->arch.mm)) {
- printk("Can't allocate mm_struct for domain0\n");
- return -ENOMEM;
- }
- memset(d->arch.mm, 0, sizeof(*d->arch.mm));
- d->arch.mm->pgd = pgd_alloc(d->arch.mm);
- if (unlikely(!d->arch.mm->pgd)) {
- printk("Can't allocate pgd for domain0\n");
- return -ENOMEM;
- }
-
-
/* Mask all upcalls... */
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
@@ -1146,19 +1125,6 @@ int construct_domU(struct domain *d,
printk("parsedomainelfimage returns %d\n",rc);
if ( rc != 0 ) return rc;
- d->arch.mm = xmalloc(struct mm_struct);
- if (unlikely(!d->arch.mm)) {
- printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
- return -ENOMEM;
- }
- memset(d->arch.mm, 0, sizeof(*d->arch.mm));
- d->arch.mm->pgd = pgd_alloc(d->arch.mm);
- if (unlikely(!d->arch.mm->pgd)) {
- printk("Can't allocate pgd for domain %d\n",d->domain_id);
- return -ENOMEM;
- }
-
-
/* Mask all upcalls... */
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
@@ -1231,10 +1197,10 @@ void machine_halt(void)
while(1);
}
-void dummy(void)
+void dummy_called(char *function)
{
if (platform_is_hp_ski()) asm("break 0;;");
- printf("dummy called: spinning....\n");
+ printf("dummy called in %s: spinning....\n", function);
while(1);
}
diff --git a/xen/arch/ia64/hypercall.c b/xen/arch/ia64/hypercall.c
index 0fcc6f7cf8..648bbfbbe8 100644
--- a/xen/arch/ia64/hypercall.c
+++ b/xen/arch/ia64/hypercall.c
@@ -19,8 +19,6 @@ extern unsigned long translate_domain_mpaddr(unsigned long);
extern struct ia64_sal_retval pal_emulator_static(UINT64);
extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
-void fooefi(void) {}
-
int
ia64_hypercall (struct pt_regs *regs)
{
@@ -122,6 +120,31 @@ ia64_hypercall (struct pt_regs *regs)
case 0xfffb: // test dummy hypercall
regs->r8 = domU_staging_read_8(vcpu_get_gr(v,32));
break;
+
+ case __HYPERVISOR_dom0_op:
+ regs->r8 = do_dom0_op(regs->r14);
+ break;
+
+ case __HYPERVISOR_dom_mem_op:
+#ifdef CONFIG_VTI
+ regs->r8 = do_dom_mem_op(regs->r14, regs->r15, regs->r16, regs->r17, regs->r18);
+#else
+ /* we don't handle reservations; just return success */
+ regs->r8 = regs->r16;
+#endif
+ break;
+
+ case __HYPERVISOR_event_channel_op:
+ regs->r8 = do_event_channel_op(regs->r14);
+ break;
+
+ case __HYPERVISOR_console_io:
+ regs->r8 = do_console_io(regs->r14, regs->r15, regs->r16);
+ break;
+
+ default:
+ printf("unknown hypercall %x\n", regs->r2);
+ regs->r8 = (unsigned long)-1;
}
return 1;
}
diff --git a/xen/arch/ia64/hyperprivop.S b/xen/arch/ia64/hyperprivop.S
index 6903c66782..235c8322eb 100644
--- a/xen/arch/ia64/hyperprivop.S
+++ b/xen/arch/ia64/hyperprivop.S
@@ -41,40 +41,46 @@
// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
// r31 == pr
GLOBAL_ENTRY(fast_hyperprivop)
-#if 1
// HYPERPRIVOP_SSM_I?
// assumes domain interrupts pending, so just do it
cmp.eq p7,p6=XEN_HYPER_SSM_I,r17
(p7) br.sptk.many hyper_ssm_i;;
-#endif
-#if 1
- // if domain interrupts pending, give up for now and do it the slow way
+
+ // FIXME. This algorithm gives up (goes to the slow path) if there
+ // are ANY interrupts pending, even if they are currently
+ // undeliverable. This should be improved later...
adds r20=XSI_PEND_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld8 r20=[r20] ;;
- cmp.ne p7,p0=r0,r20
-(p7) br.sptk.many dispatch_break_fault ;;
+ ld4 r20=[r20] ;;
+ cmp.eq p7,p0=r0,r20
+(p7) br.cond.sptk.many 1f
+ mov r20=IA64_KR(CURRENT);;
+ adds r21=IA64_VCPU_IRR0_OFFSET,r20;
+ adds r22=IA64_VCPU_IRR0_OFFSET+8,r20;;
+ ld8 r23=[r21],16; ld8 r24=[r22],16;;
+ ld8 r21=[r21]; ld8 r22=[r22];;
+ or r23=r23,r24; or r21=r21,r22;;
+ or r20=r23,r21;;
+1: // when we get to here r20=~=interrupts pending
// HYPERPRIVOP_RFI?
cmp.eq p7,p6=XEN_HYPER_RFI,r17
(p7) br.sptk.many hyper_rfi;;
+ cmp.ne p7,p0=r20,r0
+(p7) br.spnt.many dispatch_break_fault ;;
+
// hard to test, because only called from rbs_switch
// HYPERPRIVOP_COVER?
cmp.eq p7,p6=XEN_HYPER_COVER,r17
(p7) br.sptk.many hyper_cover;;
-#endif
-#if 1
// HYPERPRIVOP_SSM_DT?
cmp.eq p7,p6=XEN_HYPER_SSM_DT,r17
(p7) br.sptk.many hyper_ssm_dt;;
-#endif
-#if 1
// HYPERPRIVOP_RSM_DT?
cmp.eq p7,p6=XEN_HYPER_RSM_DT,r17
(p7) br.sptk.many hyper_rsm_dt;;
-#endif
// if not one of the above, give up for now and do it the slow way
br.sptk.many dispatch_break_fault ;;
@@ -336,12 +342,16 @@ GLOBAL_ENTRY(fast_break_reflect)
// ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
ENTRY(hyper_rfi)
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_RFI);;
- ld8 r21=[r20];;
- adds r21=1,r21;;
- st8 [r20]=r21;;
-#endif
+ // if no interrupts pending, proceed
+ cmp.eq p7,p0=r20,r0
+(p7) br.sptk.many 1f
+ // interrupts pending, if rfi'ing to interrupts on, go slow way
+ adds r20=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ ld8 r21=[r20];; // r21 = vcr.ipsr
+ extr.u r22=r21,IA64_PSR_I_BIT,1 ;;
+ cmp.ne p7,p0=r22,r0 ;;
+(p7) br.spnt.many dispatch_break_fault ;;
+1:
adds r20=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r21=[r20];; // r21 = vcr.ipsr
extr.u r22=r21,IA64_PSR_BE_BIT,1 ;;
@@ -375,7 +385,13 @@ ENTRY(hyper_rfi)
(p7) br.sptk.many dispatch_break_fault ;;
// OK now, let's do an rfi.
- // r18=&vpsr.i|vpsr.ic, r21==vpsr, r20==&vcr.iip, r22=vcr.iip
+#ifdef FAST_HYPERPRIVOP_CNT
+ movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_RFI);;
+ ld8 r23=[r20];;
+ adds r23=1,r23;;
+ st8 [r20]=r23;;
+#endif
+ // r18=&vpsr.i|vpsr.ic, r21==vpsr, r22=vcr.iip
mov cr.iip=r22;;
adds r20=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
st4 [r20]=r0 ;;
diff --git a/xen/arch/ia64/ivt.S b/xen/arch/ia64/ivt.S
index 4d6785c310..b1def7004f 100644
--- a/xen/arch/ia64/ivt.S
+++ b/xen/arch/ia64/ivt.S
@@ -348,12 +348,23 @@ ENTRY(alt_itlb_miss)
// ;;
//#endif
#endif
+#ifdef XEN
+ mov r31=pr
+ mov r16=cr.ifa // get address that caused the TLB miss
+ ;;
+late_alt_itlb_miss:
+ movl r17=PAGE_KERNEL
+ mov r21=cr.ipsr
+ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+ ;;
+#else
mov r16=cr.ifa // get address that caused the TLB miss
movl r17=PAGE_KERNEL
mov r21=cr.ipsr
movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
mov r31=pr
;;
+#endif
#ifdef CONFIG_DISABLE_VHPT
shr.u r22=r16,61 // get the region number into r21
;;
@@ -367,9 +378,15 @@ ENTRY(alt_itlb_miss)
#endif
extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
+#ifdef XEN
+ shr.u r18=r16,55 // move address bit 59 to bit 4
+ ;;
+ and r18=0x10,r18 // bit 4=address-bit(59)
+#else
shr.u r18=r16,57 // move address bit 61 to bit 4
;;
andcm r18=0x10,r18 // bit 4=~address-bit(61)
+#endif
cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
or r19=r17,r19 // insert PTE control bits into r19
;;
@@ -393,13 +410,18 @@ ENTRY(alt_dtlb_miss)
// ;;
//#endif
#endif
+#ifdef XEN
+ mov r31=pr
mov r16=cr.ifa // get address that caused the TLB miss
+ ;;
+late_alt_dtlb_miss:
movl r17=PAGE_KERNEL
mov r20=cr.isr
movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
mov r21=cr.ipsr
- mov r31=pr
;;
+#else
+#endif
#ifdef CONFIG_DISABLE_VHPT
shr.u r22=r16,61 // get the region number into r21
;;
@@ -414,24 +436,33 @@ ENTRY(alt_dtlb_miss)
extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
+#ifdef XEN
+ shr.u r18=r16,55 // move address bit 59 to bit 4
+ and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
+ tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
+ ;;
+ and r18=0x10,r18 // bit 4=address-bit(59)
+#else
shr.u r18=r16,57 // move address bit 61 to bit 4
and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
;;
andcm r18=0x10,r18 // bit 4=~address-bit(61)
+#endif
cmp.ne p8,p0=r0,r23
(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
(p8) br.cond.spnt page_fault
#ifdef XEN
;;
- // FIXME: inadequate test, this is where we test for Xen address
- // note that 0xf000 (cached) and 0xd000 (uncached) addresses
- // should be OK. (Though no I/O is done in Xen, EFI needs uncached
- // addresses and some domain EFI calls are passed through)
- tbit.nz p0,p8=r16,60
-(p8) br.cond.spnt page_fault
-//(p8) br.cond.spnt 0
- ;;
+ // Test for Xen address, if not handle via page_fault
+ // note that 0xf000 (cached) and 0xe800 (uncached) addresses
+ // should be OK.
+ extr.u r22=r16,59,5;;
+ cmp.eq p8,p0=0x1e,r22
+(p8) br.cond.spnt 1f;;
+ cmp.ne p8,p0=0x1d,r22
+(p8) br.cond.sptk page_fault ;;
+1:
#endif
dep r21=-1,r21,IA64_PSR_ED_BIT,1
diff --git a/xen/arch/ia64/patch/linux-2.6.11/io.h b/xen/arch/ia64/patch/linux-2.6.11/io.h
index c935f35cf3..b42ae6b549 100644
--- a/xen/arch/ia64/patch/linux-2.6.11/io.h
+++ b/xen/arch/ia64/patch/linux-2.6.11/io.h
@@ -5,7 +5,7 @@
#define SLOW_DOWN_IO do { } while (0)
+#ifdef XEN
-+#define __IA64_UNCACHED_OFFSET 0xd000000000000000UL /* region 6 */
++#define __IA64_UNCACHED_OFFSET 0xe800000000000000UL
+#else
#define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */
+#endif
diff --git a/xen/arch/ia64/patch/linux-2.6.11/ptrace.h b/xen/arch/ia64/patch/linux-2.6.11/ptrace.h
index dd79914f59..f96ceb1ba0 100644
--- a/xen/arch/ia64/patch/linux-2.6.11/ptrace.h
+++ b/xen/arch/ia64/patch/linux-2.6.11/ptrace.h
@@ -4,9 +4,9 @@
* (because the memory stack pointer MUST ALWAYS be aligned this way)
*
*/
-+#ifdef CONFIG_VTI
-+#include "vmx_ptrace.h"
-+#else //CONFIG_VTI
++#ifdef XEN
++#include <public/arch-ia64.h>
++#else
struct pt_regs {
/* The following registers are saved by SAVE_MIN: */
unsigned long b6; /* scratch */
@@ -14,7 +14,7 @@
struct ia64_fpreg f10; /* scratch */
struct ia64_fpreg f11; /* scratch */
};
-+#endif // CONFIG_VTI
++#endif
/*
* This structure contains the addition registers that need to
diff --git a/xen/arch/ia64/patch/linux-2.6.11/uaccess.h b/xen/arch/ia64/patch/linux-2.6.11/uaccess.h
new file mode 100644
index 0000000000..def5aaac47
--- /dev/null
+++ b/xen/arch/ia64/patch/linux-2.6.11/uaccess.h
@@ -0,0 +1,22 @@
+--- ../../linux-2.6.11/include/asm-ia64/uaccess.h 2005-06-06 10:36:23.000000000 -0600
++++ include/asm-ia64/uaccess.h 2005-06-10 18:08:06.000000000 -0600
+@@ -60,6 +60,11 @@
+ * address TASK_SIZE is never valid. We also need to make sure that the address doesn't
+ * point inside the virtually mapped linear page table.
+ */
++#ifdef XEN
++/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
++#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
++#define __access_ok(addr, size, segment) (!IS_VMM_ADDRESS((unsigned long)(addr)))
++#else
+ #define __access_ok(addr, size, segment) \
+ ({ \
+ __chk_user_ptr(addr); \
+@@ -67,6 +72,7 @@
+ && ((segment).seg == KERNEL_DS.seg \
+ || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
+ })
++#endif
+ #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
+
+ static inline int
diff --git a/xen/arch/ia64/privop.c b/xen/arch/ia64/privop.c
index 1f50ea2448..c4fbcca45c 100644
--- a/xen/arch/ia64/privop.c
+++ b/xen/arch/ia64/privop.c
@@ -748,10 +748,22 @@ priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
#define HYPERPRIVOP_ITC_D 0x5
#define HYPERPRIVOP_ITC_I 0x6
#define HYPERPRIVOP_SSM_I 0x7
-#define HYPERPRIVOP_MAX 0x7
+#define HYPERPRIVOP_GET_IVR 0x8
+#define HYPERPRIVOP_GET_TPR 0x9
+#define HYPERPRIVOP_SET_TPR 0xa
+#define HYPERPRIVOP_EOI 0xb
+#define HYPERPRIVOP_SET_ITM 0xc
+#define HYPERPRIVOP_THASH 0xd
+#define HYPERPRIVOP_PTC_GA 0xe
+#define HYPERPRIVOP_ITR_D 0xf
+#define HYPERPRIVOP_GET_RR 0x10
+#define HYPERPRIVOP_SET_RR 0x11
+#define HYPERPRIVOP_MAX 0x11
char *hyperpriv_str[HYPERPRIVOP_MAX+1] = {
0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
+ "=ivr", "=tpr", "tpr=", "eoi", "itm=", "thash", "ptc.ga", "itr.d",
+ "=rr", "rr=",
0
};
@@ -766,6 +778,7 @@ ia64_hyperprivop(unsigned long iim, REGS *regs)
struct vcpu *v = (struct domain *) current;
INST64 inst;
UINT64 val;
+ UINT64 itir, ifa;
// FIXME: Handle faults appropriately for these
if (!iim || iim > HYPERPRIVOP_MAX) {
@@ -797,6 +810,44 @@ ia64_hyperprivop(unsigned long iim, REGS *regs)
case HYPERPRIVOP_SSM_I:
(void)vcpu_set_psr_i(v);
return 1;
+ case HYPERPRIVOP_GET_IVR:
+ (void)vcpu_get_ivr(v,&val);
+ regs->r8 = val;
+ return 1;
+ case HYPERPRIVOP_GET_TPR:
+ (void)vcpu_get_tpr(v,&val);
+ regs->r8 = val;
+ return 1;
+ case HYPERPRIVOP_SET_TPR:
+ (void)vcpu_set_tpr(v,regs->r8);
+ return 1;
+ case HYPERPRIVOP_EOI:
+ (void)vcpu_set_eoi(v,0L);
+ return 1;
+ case HYPERPRIVOP_SET_ITM:
+ (void)vcpu_set_itm(v,regs->r8);
+ return 1;
+ case HYPERPRIVOP_THASH:
+ (void)vcpu_thash(v,regs->r8,&val);
+ regs->r8 = val;
+ return 1;
+ case HYPERPRIVOP_PTC_GA:
+ // FIXME: this doesn't seem to work yet, turned off
+ //(void)vcpu_ptc_ga(v,regs->r8,regs->r9);
+ //return 1;
+ break;
+ case HYPERPRIVOP_ITR_D:
+ (void)vcpu_get_itir(v,&itir);
+ (void)vcpu_get_ifa(v,&ifa);
+ (void)vcpu_itr_d(v,regs->r8,regs->r9,itir,ifa);
+ return 1;
+ case HYPERPRIVOP_GET_RR:
+ (void)vcpu_get_rr(v,regs->r8,&val);
+ regs->r8 = val;
+ return 1;
+ case HYPERPRIVOP_SET_RR:
+ (void)vcpu_set_rr(v,regs->r8,regs->r9);
+ return 1;
}
return 0;
}
diff --git a/xen/arch/ia64/process.c b/xen/arch/ia64/process.c
index f664b74a42..a26194d8f3 100644
--- a/xen/arch/ia64/process.c
+++ b/xen/arch/ia64/process.c
@@ -313,45 +313,31 @@ void xen_handle_domain_access(unsigned long address, unsigned long isr, struct p
}
if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip);
+ if (trp = match_tr(current,address)) {
+ // FIXME address had better be pre-validated on insert
+ pteval = translate_domain_pte(trp->page_flags,address,trp->itir);
+ vcpu_itc_no_srlz(current,6,address,pteval,-1UL,(trp->itir>>2)&0x3f);
+ return;
+ }
// if we are fortunate enough to have it in the 1-entry TLB...
if (pteval = match_dtlb(ed,address,&ps,NULL)) {
vcpu_itc_no_srlz(ed,6,address,pteval,-1UL,ps);
return;
}
- // look in the TRs
- fault = vcpu_tpa(ed,address,&mpaddr);
- if (fault != IA64_NO_FAULT) {
- static int uacnt = 0;
- // can't translate it, just fail (poor man's exception)
- // which results in retrying execution
-//printk("*** xen_handle_domain_access: poor man's exception cnt=%i iip=%p, addr=%p...\n",uacnt++,iip,address);
- if (ia64_done_with_exception(regs)) {
+ if (ia64_done_with_exception(regs)) {
//if (!(uacnt++ & 0x3ff)) printk("*** xen_handle_domain_access: successfully handled cnt=%d iip=%p, addr=%p...\n",uacnt,iip,address);
return;
- }
- else {
- // should never happen. If it does, region 0 addr may
- // indicate a bad xen pointer
- printk("*** xen_handle_domain_access: exception table"
- " lookup failed, iip=%p, addr=%p, spinning...\n",
- iip,address);
- panic_domain(regs,"*** xen_handle_domain_access: exception table"
- " lookup failed, iip=%p, addr=%p, spinning...\n",
- iip,address);
- }
}
- if (d == dom0) {
- if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
- printk("xen_handle_domain_access: vcpu_tpa returned out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
- tdpfoo();
- }
+ else {
+ // should never happen. If it does, region 0 addr may
+ // indicate a bad xen pointer
+ printk("*** xen_handle_domain_access: exception table"
+ " lookup failed, iip=%p, addr=%p, spinning...\n",
+ iip,address);
+ panic_domain(regs,"*** xen_handle_domain_access: exception table"
+ " lookup failed, iip=%p, addr=%p, spinning...\n",
+ iip,address);
}
-//printk("*** xen_handle_domain_access: tpa resolved miss @%p...\n",address);
- pteval = lookup_domain_mpa(d,mpaddr);
- // would be nice to have a counter here
- //printf("Handling privop data TLB miss\n");
- // FIXME, must be inlined or potential for nested fault here!
- vcpu_itc_no_srlz(ed,2,address,pteval,-1UL,PAGE_SHIFT);
}
void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
@@ -441,7 +427,7 @@ panic_domain(0,"ia64_do_page_fault: @%p???, iip=%p, b0=%p, itc=%p (spinning...)\
if (pteval & _PAGE_P)
{
pteval = translate_domain_pte(pteval,address,itir);
- vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(itir>>2)&0x3f);
+ vcpu_itc_no_srlz(current,is_data?6:1,address,pteval,-1UL,(itir>>2)&0x3f);
return;
}
else vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
@@ -768,7 +754,7 @@ if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring
vcpu_set_gr(current,8,-1L);
break;
default:
- printf("ia64_handle_break: bad ssc code %lx, iip=%p\n",ssc,regs->cr_iip);
+ printf("ia64_handle_break: bad ssc code %lx, iip=%p, b0=%p\n",ssc,regs->cr_iip,regs->b0);
break;
}
vcpu_increment_iip(current);
diff --git a/xen/arch/ia64/regionreg.c b/xen/arch/ia64/regionreg.c
index 6653d4b6a8..a40d0098e3 100644
--- a/xen/arch/ia64/regionreg.c
+++ b/xen/arch/ia64/regionreg.c
@@ -274,6 +274,7 @@ int set_one_rr(unsigned long rr, unsigned long val)
return 0;
}
+#ifdef CONFIG_VTI
memrrv.rrval = rrv.rrval;
if (rreg == 7) {
newrrv.rid = newrid;
@@ -290,6 +291,15 @@ int set_one_rr(unsigned long rr, unsigned long val)
if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
set_rr(rr,newrrv.rrval);
}
+#else
+ memrrv.rrval = rrv.rrval;
+ newrrv.rid = newrid;
+ newrrv.ve = 1; // VHPT now enabled for region 7!!
+ newrrv.ps = PAGE_SHIFT;
+ if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
+ if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
+ else set_rr(rr,newrrv.rrval);
+#endif
return 1;
}
diff --git a/xen/arch/ia64/tools/mkbuildtree b/xen/arch/ia64/tools/mkbuildtree
index 5964c836c8..18d0c72c67 100644
--- a/xen/arch/ia64/tools/mkbuildtree
+++ b/xen/arch/ia64/tools/mkbuildtree
@@ -259,7 +259,7 @@ softlink include/asm-ia64/string.h include/asm-ia64/string.h
softlink include/asm-ia64/thread_info.h include/asm-ia64/thread_info.h
softlink include/asm-ia64/timex.h include/asm-ia64/timex.h
softlink include/asm-ia64/topology.h include/asm-ia64/topology.h
-softlink include/asm-ia64/uaccess.h include/asm-ia64/uaccess.h
+cp_patch include/asm-ia64/uaccess.h include/asm-ia64/uaccess.h uaccess.h
softlink include/asm-ia64/unaligned.h include/asm-ia64/unaligned.h
softlink include/asm-ia64/unistd.h include/asm-ia64/unistd.h
softlink include/asm-ia64/unwind.h include/asm-ia64/unwind.h
diff --git a/xen/arch/ia64/vcpu.c b/xen/arch/ia64/vcpu.c
index b55e5b6bd7..45ae1bc656 100644
--- a/xen/arch/ia64/vcpu.c
+++ b/xen/arch/ia64/vcpu.c
@@ -43,8 +43,9 @@ typedef union {
#ifdef PRIVOP_ADDR_COUNT
struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS] = {
- { "rsm", { 0 }, { 0 }, 0 },
- { "ssm", { 0 }, { 0 }, 0 }
+ { "=ifa", { 0 }, { 0 }, 0 },
+ { "thash", { 0 }, { 0 }, 0 },
+ 0
};
extern void privop_count_addr(unsigned long addr, int inst);
#define PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
@@ -135,7 +136,7 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
struct ia64_psr psr, imm, *ipsr;
REGS *regs = vcpu_regs(vcpu);
- PRIVOP_COUNT_ADDR(regs,_RSM);
+ //PRIVOP_COUNT_ADDR(regs,_RSM);
// TODO: All of these bits need to be virtualized
// TODO: Only allowed for current vcpu
__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
@@ -183,7 +184,7 @@ IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
REGS *regs = vcpu_regs(vcpu);
UINT64 mask, enabling_interrupts = 0;
- PRIVOP_COUNT_ADDR(regs,_SSM);
+ //PRIVOP_COUNT_ADDR(regs,_SSM);
// TODO: All of these bits need to be virtualized
__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
imm = *(struct ia64_psr *)&imm24;
@@ -369,6 +370,8 @@ IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
{
UINT64 val = PSCB(vcpu,ifa);
+ REGS *regs = vcpu_regs(vcpu);
+ PRIVOP_COUNT_ADDR(regs,_GET_IFA);
*pval = val;
return (IA64_NO_FAULT);
}
@@ -422,6 +425,8 @@ IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
{
//return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
UINT64 val = PSCB(vcpu,iha);
+ REGS *regs = vcpu_regs(vcpu);
+ PRIVOP_COUNT_ADDR(regs,_THASH);
*pval = val;
return (IA64_NO_FAULT);
}
@@ -539,7 +544,7 @@ void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
} else
#endif // CONFIG_VTI
{
- if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return;
+ /* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
if (test_bit(vector,PSCBX(vcpu,irr))) {
//printf("vcpu_pend_interrupt: overrun\n");
}
@@ -569,10 +574,10 @@ UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
p = &PSCBX(vcpu,irr[3]);
- q = &PSCB(vcpu,delivery_mask[3]);
+ /* q = &PSCB(vcpu,delivery_mask[3]); */
r = &PSCBX(vcpu,insvc[3]);
for (i = 3; ; p--, q--, r--, i--) {
- bits = *p & *q;
+ bits = *p /* & *q */;
if (bits) break; // got a potential interrupt
if (*r) {
// nothing in this word which is pending+inservice
@@ -1589,7 +1594,8 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64
// addresses never get flushed. More work needed if this
// ever happens.
//printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
- vhpt_insert(vaddr,pte,logps<<2);
+ if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
+ else vhpt_insert(vaddr,pte,logps<<2);
}
// even if domain pagesize is larger than PAGE_SIZE, just put
// PAGE_SIZE mapping in the vhpt for now, else purging is complicated
diff --git a/xen/arch/ia64/vhpt.c b/xen/arch/ia64/vhpt.c
index b535f9fc56..86495a8fe8 100644
--- a/xen/arch/ia64/vhpt.c
+++ b/xen/arch/ia64/vhpt.c
@@ -87,6 +87,37 @@ void vhpt_map(void)
ia64_srlz_i();
}
+void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps)
+{
+ unsigned long mask = (1L << logps) - 1;
+ int i;
+
+ if (logps-PAGE_SHIFT > 10) {
+ // if this happens, we may want to revisit this algorithm
+ printf("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n");
+ while(1);
+ }
+ if (logps-PAGE_SHIFT > 2) {
+ // FIXME: Should add counter here to see how often this
+ // happens (e.g. for 16MB pages!) and determine if it
+ // is a performance problem. On a quick look, it takes
+ // about 39000 instrs for a 16MB page and it seems to occur
+ // only a few times/second, so OK for now.
+ // An alternate solution would be to just insert the one
+ // 16KB in the vhpt (but with the full mapping)?
+ //printf("vhpt_multiple_insert: logps-PAGE_SHIFT==%d,"
+ //"va=%p, pa=%p, pa-masked=%p\n",
+ //logps-PAGE_SHIFT,vaddr,pte&_PFN_MASK,
+ //(pte&_PFN_MASK)&~mask);
+ }
+ vaddr &= ~mask;
+ pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK);
+ for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) {
+ vhpt_insert(vaddr,pte,logps<<2);
+ vaddr += PAGE_SIZE;
+ }
+}
+
void vhpt_init(void)
{
unsigned long vhpt_total_size, vhpt_alignment, vhpt_imva;
diff --git a/xen/arch/ia64/vmmu.c b/xen/arch/ia64/vmmu.c
index c39d6f2851..60126b23b2 100644
--- a/xen/arch/ia64/vmmu.c
+++ b/xen/arch/ia64/vmmu.c
@@ -454,12 +454,13 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TC;
+ data.tc = 1;
data.cl=ISIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
while (ovl) {
@@ -467,9 +468,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
panic("Tlb conflict!!");
return;
}
- sections.v = THASH_SECTION_TC;
- thash_purge_entries(hcb, &data, sections);
- thash_insert(hcb, &data, ifa);
+ thash_purge_and_insert(hcb, &data);
return IA64_NO_FAULT;
}
@@ -488,11 +487,12 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TC;
+ data.tc = 1;
data.cl=DSIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
if (ovl) {
@@ -500,42 +500,27 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
panic("Tlb conflict!!");
return;
}
- sections.v = THASH_SECTION_TC;
- thash_purge_entries(hcb, &data, sections);
- thash_insert(hcb, &data, ifa);
+ thash_purge_and_insert(hcb, &data);
return IA64_NO_FAULT;
}
-IA64FAULT insert_foreignmap(VCPU *vcpu, UINT64 pte, UINT64 ps, UINT64 va)
+/*
+ * Return TRUE/FALSE for success of lock operation
+ */
+int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock)
{
- thash_data_t data, *ovl;
thash_cb_t *hcb;
- search_section_t sections;
- rr_t vrr;
+ rr_t vrr;
+ u64 preferred_size;
- hcb = vmx_vcpu_get_vtlb(vcpu);
- data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
- data.itir=0;
- data.ps = ps;
- data.vadr=PAGEALIGN(va,ps);
- data.section=THASH_TLB_FM;
- data.cl=DSIDE_TLB;
vmx_vcpu_get_rr(vcpu, va, &vrr);
- data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR|THASH_SECTION_TC|THASH_SECTION_FM;
-
- ovl = thash_find_overlap(hcb, &data, sections);
- if (ovl) {
- // generate MCA.
- panic("Foreignmap Tlb conflict!!");
- return;
- }
- thash_insert(hcb, &data, va);
- return IA64_NO_FAULT;
+ hcb = vmx_vcpu_get_vtlb(vcpu);
+ va = PAGEALIGN(va,vrr.ps);
+ preferred_size = PSIZE(vrr.ps);
+ return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock);
}
-
IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
{
@@ -548,11 +533,12 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TR;
+ data.tc = 0;
data.cl=ISIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
if (ovl) {
@@ -560,7 +546,8 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64
panic("Tlb conflict!!");
return;
}
- sections.v=THASH_SECTION_TC;
+ sections.tr = 0;
+ sections.tc = 1;
thash_purge_entries(hcb, &data, sections);
thash_tr_insert(hcb, &data, ifa, idx);
return IA64_NO_FAULT;
@@ -579,11 +566,12 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TR;
+ data.tc = 0;
data.cl=DSIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
while (ovl) {
@@ -591,7 +579,8 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64
panic("Tlb conflict!!");
return;
}
- sections.v=THASH_SECTION_TC;
+ sections.tr = 0;
+ sections.tc = 1;
thash_purge_entries(hcb, &data, sections);
thash_tr_insert(hcb, &data, ifa, idx);
return IA64_NO_FAULT;
@@ -607,7 +596,8 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps)
hcb = vmx_vcpu_get_vtlb(vcpu);
rr=vmx_vcpu_rr(vcpu,vadr);
- sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
+ sections.tr = 1;
+ sections.tc = 1;
thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB);
return IA64_NO_FAULT;
}
@@ -619,7 +609,8 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps)
search_section_t sections;
hcb = vmx_vcpu_get_vtlb(vcpu);
rr=vmx_vcpu_rr(vcpu,vadr);
- sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
+ sections.tr = 1;
+ sections.tc = 1;
thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB);
return IA64_NO_FAULT;
}
@@ -632,7 +623,8 @@ IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps)
thash_data_t data, *ovl;
hcb = vmx_vcpu_get_vtlb(vcpu);
vrr=vmx_vcpu_rr(vcpu,vadr);
- sections.v = THASH_SECTION_TC;
+ sections.tr = 0;
+ sections.tc = 1;
vadr = PAGEALIGN(vadr, ps);
thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB);
diff --git a/xen/arch/ia64/vmx_ivt.S b/xen/arch/ia64/vmx_ivt.S
index 9647386a8c..407dc4cd86 100644
--- a/xen/arch/ia64/vmx_ivt.S
+++ b/xen/arch/ia64/vmx_ivt.S
@@ -180,7 +180,7 @@ ENTRY(vmx_dtlb_miss)
mov r29=cr.ipsr;
;;
tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
-(p6)br.sptk vmx_fault_1
+(p6)br.sptk vmx_fault_2
mov r16 = cr.ifa
;;
thash r17 = r16
@@ -249,9 +249,9 @@ ENTRY(vmx_alt_itlb_miss)
movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
;;
and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
- shr.u r18=r16,57 // move address bit 61 to bit 4
+ shr.u r18=r16,55 // move address bit 59 to bit 4
;;
- andcm r18=0x10,r18 // bit 4=~address-bit(61)
+ and r18=0x10,r18 // bit 4=address-bit(61)
or r19=r17,r19 // insert PTE control bits into r19
;;
or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
@@ -280,11 +280,11 @@ ENTRY(vmx_alt_dtlb_miss)
;;
and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
- shr.u r18=r16,57 // move address bit 61 to bit 4
+ shr.u r18=r16,55 // move address bit 59 to bit 4
and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
;;
- andcm r18=0x10,r18 // bit 4=~address-bit(61)
+ and r18=0x10,r18 // bit 4=address-bit(61)
(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
dep r24=-1,r24,IA64_PSR_ED_BIT,1
or r19=r19,r17 // insert PTE control bits into r19
@@ -346,7 +346,12 @@ END(vmx_daccess_bit)
ENTRY(vmx_break_fault)
mov r31=pr
mov r19=11
- br.sptk.many vmx_dispatch_break_fault
+ mov r30=cr.iim
+ mov r29=0x1100
+ ;;
+ cmp4.eq p6,p7=r29,r30
+ (p6) br.dptk.few vmx_hypercall_dispatch
+ (p7) br.sptk.many vmx_dispatch_break_fault
END(vmx_break_fault)
.org vmx_ia64_ivt+0x3000
@@ -929,10 +934,9 @@ END(vmx_dispatch_tlb_miss)
ENTRY(vmx_dispatch_break_fault)
- cmp.ne pEml,pNonEml=r0,r0 /* force pNonEml =1, don't save r4 ~ r7 */
- ;;
VMX_SAVE_MIN_WITH_COVER_R19
;;
+ ;;
alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
mov out0=cr.ifa
adds out1=16,sp
@@ -951,9 +955,37 @@ ENTRY(vmx_dispatch_break_fault)
;;
mov rp=r14
br.call.sptk.many b6=vmx_ia64_handle_break
+ ;;
END(vmx_dispatch_break_fault)
+ENTRY(vmx_hypercall_dispatch)
+ VMX_SAVE_MIN_WITH_COVER
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ ssm psr.i // restore psr.i
+ adds r3=16,r2 // set up second base pointer
+ ;;
+ VMX_SAVE_REST
+ ;;
+ movl r14=ia64_leave_hypervisor
+ movl r2=hyper_call_table
+ ;;
+ mov rp=r14
+ shladd r2=r15,3,r2
+ ;;
+ ld8 r2=[r2]
+ ;;
+ mov b6=r2
+ ;;
+ br.call.sptk.many b6=b6
+ ;;
+END(vmx_hypercall_dispatch)
+
+
+
ENTRY(vmx_dispatch_interrupt)
cmp.ne pEml,pNonEml=r0,r0 /* force pNonEml =1, don't save r4 ~ r7 */
;;
@@ -976,3 +1008,39 @@ ENTRY(vmx_dispatch_interrupt)
mov rp=r14
br.call.sptk.many b6=vmx_ia64_handle_irq
END(vmx_dispatch_interrupt)
+
+
+
+ .rodata
+ .align 8
+ .globl hyper_call_table
+hyper_call_table:
+ data8 hyper_not_support //hyper_set_trap_table /* 0 */
+ data8 hyper_mmu_update
+ data8 hyper_not_support //hyper_set_gdt
+ data8 hyper_not_support //hyper_stack_switch
+ data8 hyper_not_support //hyper_set_callbacks
+ data8 hyper_not_support //hyper_fpu_taskswitch /* 5 */
+ data8 hyper_sched_op
+ data8 hyper_dom0_op
+ data8 hyper_not_support //hyper_set_debugreg
+ data8 hyper_not_support //hyper_get_debugreg
+ data8 hyper_not_support //hyper_update_descriptor /* 10 */
+ data8 hyper_not_support //hyper_set_fast_trap
+ data8 hyper_dom_mem_op
+ data8 hyper_not_support //hyper_multicall
+ data8 hyper_not_support //hyper_update_va_mapping
+ data8 hyper_not_support //hyper_set_timer_op /* 15 */
+ data8 hyper_event_channel_op
+ data8 hyper_xen_version
+ data8 hyper_not_support //hyper_console_io
+ data8 hyper_not_support //hyper_physdev_op
+ data8 hyper_not_support //hyper_grant_table_op /* 20 */
+ data8 hyper_not_support //hyper_vm_assist
+ data8 hyper_not_support //hyper_update_va_mapping_otherdomain
+ data8 hyper_not_support //hyper_switch_vm86
+ data8 hyper_not_support //hyper_boot_vcpu
+ data8 hyper_not_support //hyper_ni_hypercall /* 25 */
+ data8 hyper_not_support //hyper_mmuext_op
+ data8 hyper_lock_page
+ data8 hyper_set_shared_page
diff --git a/xen/arch/ia64/vmx_minstate.h b/xen/arch/ia64/vmx_minstate.h
index afee6516d9..76f8e7f065 100644
--- a/xen/arch/ia64/vmx_minstate.h
+++ b/xen/arch/ia64/vmx_minstate.h
@@ -282,11 +282,9 @@
;; \
.mem.offset 0,0; st8.spill [r4]=r20,16; \
.mem.offset 8,0; st8.spill [r5]=r21,16; \
- mov r18=b6; \
;; \
.mem.offset 0,0; st8.spill [r4]=r22,16; \
.mem.offset 8,0; st8.spill [r5]=r23,16; \
- mov r19=b7; \
;; \
.mem.offset 0,0; st8.spill [r4]=r24,16; \
.mem.offset 8,0; st8.spill [r5]=r25,16; \
@@ -296,9 +294,11 @@
;; \
.mem.offset 0,0; st8.spill [r4]=r28,16; \
.mem.offset 8,0; st8.spill [r5]=r29,16; \
+ mov r26=b6; \
;; \
.mem.offset 0,0; st8.spill [r4]=r30,16; \
.mem.offset 8,0; st8.spill [r5]=r31,16; \
+ mov r27=b7; \
;; \
mov r30=ar.unat; \
;; \
@@ -317,8 +317,8 @@
adds r2=PT(B6)-PT(F10),r2; \
adds r3=PT(B7)-PT(F11),r3; \
;; \
- st8 [r2]=r18,16; /* b6 */ \
- st8 [r3]=r19,16; /* b7 */ \
+ st8 [r2]=r26,16; /* b6 */ \
+ st8 [r3]=r27,16; /* b7 */ \
;; \
st8 [r2]=r9; /* ar.csd */ \
st8 [r3]=r10; /* ar.ssd */ \
diff --git a/xen/arch/ia64/vmx_process.c b/xen/arch/ia64/vmx_process.c
index 2c541af113..8ab671cdb8 100644
--- a/xen/arch/ia64/vmx_process.c
+++ b/xen/arch/ia64/vmx_process.c
@@ -116,7 +116,6 @@ vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long is
case FW_HYPERCALL_EFI_GET_TIME:
{
unsigned long *tv, *tc;
- fooefi();
vmx_vcpu_get_gr(v, 32, &tv);
vmx_vcpu_get_gr(v, 33, &tc);
printf("efi_get_time(%p,%p) called...",tv,tc);
diff --git a/xen/arch/ia64/vtlb.c b/xen/arch/ia64/vtlb.c
index 6cbb4478b7..86565531bf 100644
--- a/xen/arch/ia64/vtlb.c
+++ b/xen/arch/ia64/vtlb.c
@@ -252,7 +252,7 @@ static thash_data_t *_vtlb_next_overlap_in_chain(thash_cb_t *hcb)
/* Find overlap TLB entry */
for (cch=priv->cur_cch; cch; cch = cch->next) {
- if ( ((1UL<<cch->section) & priv->s_sect.v) &&
+ if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr ) &&
__is_tlb_overlap(hcb, cch, priv->rid, priv->cl,
priv->_curva, priv->_eva) ) {
return cch;
@@ -322,7 +322,7 @@ int __tlb_to_vhpt(thash_cb_t *hcb,
void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx)
{
- if ( hcb->ht != THASH_TLB || entry->section != THASH_TLB_TR ) {
+ if ( hcb->ht != THASH_TLB || entry->tc ) {
panic("wrong parameter\n");
}
entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
@@ -356,7 +356,7 @@ thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
* 3: The caller need to make sure the new entry will not overlap
* with any existed entry.
*/
-static void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
+void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
{
thash_data_t *hash_table, *cch;
rr_t vrr;
@@ -411,7 +411,7 @@ void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
rr_t vrr;
vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
- if ( entry->ps != vrr.ps && entry->section==THASH_TLB_TC) {
+ if ( entry->ps != vrr.ps && entry->tc ) {
panic("Not support for multiple page size now\n");
}
entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
@@ -450,7 +450,7 @@ static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry)
thash_internal_t *priv = &hcb->priv;
int idx;
- if ( entry->section == THASH_TLB_TR ) {
+ if ( !entry->tc ) {
return rem_tr(hcb, entry->cl, entry->tr_idx);
}
rem_thash(hcb, entry);
@@ -525,19 +525,19 @@ thash_data_t *thash_find_overlap(thash_cb_t *hcb,
thash_data_t *in, search_section_t s_sect)
{
return (hcb->find_overlap)(hcb, in->vadr,
- in->ps, in->rid, in->cl, s_sect);
+ PSIZE(in->ps), in->rid, in->cl, s_sect);
}
static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb,
- u64 va, u64 ps, int rid, char cl, search_section_t s_sect)
+ u64 va, u64 size, int rid, char cl, search_section_t s_sect)
{
thash_data_t *hash_table;
thash_internal_t *priv = &hcb->priv;
u64 tag;
rr_t vrr;
- priv->_curva = PAGEALIGN(va,ps);
- priv->_eva = priv->_curva + PSIZE(ps);
+ priv->_curva = va & ~(size-1);
+ priv->_eva = priv->_curva + size;
priv->rid = rid;
vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
priv->ps = vrr.ps;
@@ -553,15 +553,15 @@ static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb,
}
static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb,
- u64 va, u64 ps, int rid, char cl, search_section_t s_sect)
+ u64 va, u64 size, int rid, char cl, search_section_t s_sect)
{
thash_data_t *hash_table;
thash_internal_t *priv = &hcb->priv;
u64 tag;
rr_t vrr;
- priv->_curva = PAGEALIGN(va,ps);
- priv->_eva = priv->_curva + PSIZE(ps);
+ priv->_curva = va & ~(size-1);
+ priv->_eva = priv->_curva + size;
priv->rid = rid;
vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
priv->ps = vrr.ps;
@@ -691,13 +691,46 @@ void thash_purge_entries_ex(thash_cb_t *hcb,
{
thash_data_t *ovl;
- ovl = (hcb->find_overlap)(hcb, va, ps, rid, cl, p_sect);
+ ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect);
while ( ovl != NULL ) {
(hcb->rem_hash)(hcb, ovl);
ovl = (hcb->next_overlap)(hcb);
};
}
+/*
+ * Purge overlap TCs and then insert the new entry to emulate itc ops.
+ * Notes: Only TC entry can purge and insert.
+ */
+void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in)
+{
+ thash_data_t *ovl;
+ search_section_t sections;
+
+#ifdef XEN_DEBUGGER
+ vrr = (hcb->get_rr_fn)(hcb->vcpu,in->vadr);
+ if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) {
+ panic ("Oops, wrong call for purge_and_insert\n");
+ return;
+ }
+#endif
+ in->vadr = PAGEALIGN(in->vadr,in->ps);
+ in->ppn = PAGEALIGN(in->ppn, in->ps-12);
+ sections.tr = 0;
+ sections.tc = 1;
+ ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps),
+ in->rid, in->cl, sections);
+ if(ovl)
+ (hcb->rem_hash)(hcb, ovl);
+#ifdef XEN_DEBUGGER
+ ovl = (hcb->next_overlap)(hcb);
+ if ( ovl ) {
+ panic ("Oops, 2+ overlaps for purge_and_insert\n");
+ return;
+ }
+#endif
+ (hcb->ins_hash)(hcb, in, in->vadr);
+}
/*
* Purge all TCs or VHPT entries including those in Hash table.
@@ -766,6 +799,42 @@ thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb,
return NULL;
}
+/*
+ * Lock/Unlock TC if found.
+ * NOTES: Only the page in prefered size can be handled.
+ * return:
+ * 1: failure
+ * 0: success
+ */
+int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock)
+{
+ thash_data_t *ovl;
+ search_section_t sections;
+
+ sections.tr = 1;
+ sections.tc = 1;
+ ovl = (hcb->find_overlap)(hcb, va, size, rid, cl, sections);
+ if ( ovl ) {
+ if ( !ovl->tc ) {
+// panic("Oops, TR for lock\n");
+ return 0;
+ }
+ else if ( lock ) {
+ if ( ovl->locked ) {
+ DPRINTK("Oops, already locked entry\n");
+ }
+ ovl->locked = 1;
+ }
+ else if ( !lock ) {
+ if ( !ovl->locked ) {
+ DPRINTK("Oops, already unlocked entry\n");
+ }
+ ovl->locked = 0;
+ }
+ return 0;
+ }
+ return 1;
+}
/*
* Notifier when TLB is deleted from hash table and its collision chain.
@@ -824,7 +893,6 @@ void thash_init(thash_cb_t *hcb, u64 sz)
}
}
-
#ifdef VTLB_DEBUG
static u64 cch_length_statistics[MAX_CCH_LENGTH+1];
u64 sanity_check=0;
diff --git a/xen/arch/ia64/xenmem.c b/xen/arch/ia64/xenmem.c
index 3a749840a0..088611b22a 100644
--- a/xen/arch/ia64/xenmem.c
+++ b/xen/arch/ia64/xenmem.c
@@ -52,7 +52,7 @@ paging_init (void)
panic("Not enough memory to bootstrap Xen.\n");
printk("machine to physical table: 0x%lx\n", (u64)mpt_table);
- memset(mpt_table, 0x55, mpt_table_size);
+ memset(mpt_table, INVALID_M2P_ENTRY, mpt_table_size);
/* Any more setup here? On VMX enabled platform,
* there's no need to keep guest linear pg table,
diff --git a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c
index bb9f83019a..6703b397ab 100644
--- a/xen/arch/ia64/xenmisc.c
+++ b/xen/arch/ia64/xenmisc.c
@@ -63,13 +63,7 @@ void sync_lazy_execstate_mask(cpumask_t mask) {}
void sync_lazy_execstate_all(void) {}
int grant_table_create(struct domain *d) { return 0; }
-void grant_table_destroy(struct domain *d)
-{
- printf("grant_table_destroy: domain_destruct not tested!!!\n");
- printf("grant_table_destroy: ensure atomic_* calls work in domain_destruct!!\n");
- dummy();
- return;
-}
+void grant_table_destroy(struct domain *d) { return; }
struct pt_regs *guest_cpu_user_regs(void) { return ia64_task_regs(current); }
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 1241e50921..4b49e17f37 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -193,23 +193,30 @@ static void __init init_amd(struct cpuinfo_x86 *c)
}
display_cacheinfo(c);
- detect_ht(c);
-
-#ifdef CONFIG_X86_HT
- /* AMD dual core looks like HT but isn't really. Hide it from the
- scheduler. This works around problems with the domain scheduler.
- Also probably gives slightly better scheduling and disables
- SMT nice which is harmful on dual core.
- TBD tune the domain scheduler for dual core. */
- if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
- smp_num_siblings = 1;
-#endif
if (cpuid_eax(0x80000000) >= 0x80000008) {
c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
if (c->x86_num_cores & (c->x86_num_cores - 1))
c->x86_num_cores = 1;
}
+
+#ifdef CONFIG_X86_HT
+ /*
+ * On a AMD dual core setup the lower bits of the APIC id
+ * distingush the cores. Assumes number of cores is a power
+ * of two.
+ */
+ if (c->x86_num_cores > 1) {
+ int cpu = smp_processor_id();
+ unsigned bits = 0;
+ while ((1 << bits) < c->x86_num_cores)
+ bits++;
+ cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
+ phys_proc_id[cpu] >>= bits;
+ printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
+ cpu, c->x86_num_cores, cpu_core_id[cpu]);
+ }
+#endif
}
static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index fcb5c16ecb..49661af7d8 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -186,7 +186,7 @@ static inline int flag_is_changeable_p(unsigned long flag)
/* Probe for the CPUID instruction */
-int __init have_cpuid_p(void)
+static int __init have_cpuid_p(void)
{
return flag_is_changeable_p(X86_EFLAGS_ID);
}
@@ -194,7 +194,7 @@ int __init have_cpuid_p(void)
/* Do minimum CPU detection early.
Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
The others are not touched to avoid unwanted side effects. */
-void __init early_cpu_detect(void)
+static void __init early_cpu_detect(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -228,6 +228,10 @@ void __init early_cpu_detect(void)
}
early_intel_workaround(c);
+
+#ifdef CONFIG_X86_HT
+ phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
+#endif
}
void __init generic_identify(struct cpuinfo_x86 * c)
@@ -416,25 +420,15 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
mcheck_init(c);
#endif
}
-/*
- * Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
- */
-
-void __init dodgy_tsc(void)
-{
- if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
- ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC ))
- cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
-}
#ifdef CONFIG_X86_HT
void __init detect_ht(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
- int index_lsb, index_msb, tmp;
+ int index_msb, tmp;
int cpu = smp_processor_id();
- if (!cpu_has(c, X86_FEATURE_HT))
+ if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
return;
cpuid(1, &eax, &ebx, &ecx, &edx);
@@ -443,7 +437,6 @@ void __init detect_ht(struct cpuinfo_x86 *c)
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
} else if (smp_num_siblings > 1 ) {
- index_lsb = 0;
index_msb = 31;
if (smp_num_siblings > NR_CPUS) {
@@ -452,21 +445,34 @@ void __init detect_ht(struct cpuinfo_x86 *c)
return;
}
tmp = smp_num_siblings;
- while ((tmp & 1) == 0) {
- tmp >>=1 ;
- index_lsb++;
- }
- tmp = smp_num_siblings;
while ((tmp & 0x80000000 ) == 0) {
tmp <<=1 ;
index_msb--;
}
- if (index_lsb != index_msb )
+ if (smp_num_siblings & (smp_num_siblings - 1))
index_msb++;
phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
phys_proc_id[cpu]);
+
+ smp_num_siblings = smp_num_siblings / c->x86_num_cores;
+
+ tmp = smp_num_siblings;
+ index_msb = 31;
+ while ((tmp & 0x80000000) == 0) {
+ tmp <<=1 ;
+ index_msb--;
+ }
+
+ if (smp_num_siblings & (smp_num_siblings - 1))
+ index_msb++;
+
+ cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
+
+ if (c->x86_num_cores > 1)
+ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
+ cpu_core_id[cpu]);
}
}
#endif
@@ -511,7 +517,6 @@ extern int amd_init_cpu(void);
extern int centaur_init_cpu(void);
extern int transmeta_init_cpu(void);
extern int rise_init_cpu(void);
-void early_cpu_detect(void);
void __init early_cpu_init(void)
{
diff --git a/xen/arch/x86/cpu/cpu.h b/xen/arch/x86/cpu/cpu.h
index 9df38d993c..5a1d4f163e 100644
--- a/xen/arch/x86/cpu/cpu.h
+++ b/xen/arch/x86/cpu/cpu.h
@@ -25,7 +25,6 @@ extern int get_model_name(struct cpuinfo_x86 *c);
extern void display_cacheinfo(struct cpuinfo_x86 *c);
extern void generic_identify(struct cpuinfo_x86 * c);
-extern int have_cpuid_p(void);
extern void early_intel_workaround(struct cpuinfo_x86 *c);
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 861723719b..ef713eb95e 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -74,6 +74,27 @@ static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c)
}
+/*
+ * find out the number of processor cores on the die
+ */
+static int __init num_cpu_cores(struct cpuinfo_x86 *c)
+{
+ unsigned int eax;
+
+ if (c->cpuid_level < 4)
+ return 1;
+
+ __asm__("cpuid"
+ : "=a" (eax)
+ : "0" (4), "c" (0)
+ : "bx", "dx");
+
+ if (eax & 0x1f)
+ return ((eax >> 26) + 1);
+ else
+ return 1;
+}
+
static void __init init_intel(struct cpuinfo_x86 *c)
{
unsigned int l2 = 0;
@@ -136,6 +157,8 @@ static void __init init_intel(struct cpuinfo_x86 *c)
if ( p )
strcpy(c->x86_model_id, p);
+ c->x86_num_cores = num_cpu_cores(c);
+
detect_ht(c);
/* Work around errata */
diff --git a/xen/arch/x86/dom0_ops.c b/xen/arch/x86/dom0_ops.c
index e8979417ec..2a269f11b6 100644
--- a/xen/arch/x86/dom0_ops.c
+++ b/xen/arch/x86/dom0_ops.c
@@ -179,8 +179,8 @@ long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
{
dom0_physinfo_t *pi = &op->u.physinfo;
- pi->ht_per_core = ht_per_core;
- pi->cores = num_online_cpus() / ht_per_core;
+ pi->ht_per_core = smp_num_siblings;
+ pi->cores = boot_cpu_data.x86_num_cores;
pi->total_pages = max_page;
pi->free_pages = avail_domheap_pages();
pi->cpu_khz = cpu_khz;
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 0903967796..ca7cf17aca 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -20,6 +20,7 @@
#include <asm/desc.h>
#include <asm/shadow.h>
#include <asm/e820.h>
+#include <public/acm_dom0_setup.h>
extern void dmi_scan_machine(void);
extern void generic_apic_probe(void);
@@ -66,7 +67,6 @@ boolean_param("noapic", skip_ioapic_setup);
int early_boot = 1;
-int ht_per_core = 1;
cpumask_t cpu_present_map;
/* Limits of Xen heap, used to initialise the allocator. */
@@ -394,12 +394,17 @@ void __init __start_xen(multiboot_info_t *mbi)
shadow_mode_init();
+ /* initialize access control security module */
+ acm_init();
+
/* Create initial domain 0. */
dom0 = do_createdomain(0, 0);
if ( dom0 == NULL )
panic("Error creating domain 0\n");
set_bit(_DOMF_privileged, &dom0->domain_flags);
+ /* post-create hooks sets security label */
+ acm_post_domain0_create(dom0->domain_id);
/* Grab the DOM0 command line. */
cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL);
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 80fe8122a4..c9e1ac9151 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -62,6 +62,8 @@ static int __initdata smp_b_stepping;
int smp_num_siblings = 1;
int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
EXPORT_SYMBOL(phys_proc_id);
+int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
+EXPORT_SYMBOL(cpu_core_id);
/* bitmap of online cpus */
cpumask_t cpu_online_map;
@@ -923,6 +925,8 @@ static int boot_cpu_logical_apicid;
void *xquad_portio;
cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
+cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
+EXPORT_SYMBOL(cpu_core_map);
static void __init smp_boot_cpus(unsigned int max_cpus)
{
@@ -947,6 +951,9 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
cpus_clear(cpu_sibling_map[0]);
cpu_set(0, cpu_sibling_map[0]);
+ cpus_clear(cpu_core_map[0]);
+ cpu_set(0, cpu_core_map[0]);
+
/*
* If we couldn't find an SMP configuration at boot time,
* get out of here now!
@@ -959,6 +966,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
printk(KERN_NOTICE "Local APIC not detected."
" Using dummy APIC emulation.\n");
map_cpu_to_logical_apicid();
+ cpu_set(0, cpu_sibling_map[0]);
+ cpu_set(0, cpu_core_map[0]);
return;
}
@@ -1079,10 +1088,13 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
* construct cpu_sibling_map[], so that we can tell sibling CPUs
* efficiently.
*/
- for (cpu = 0; cpu < NR_CPUS; cpu++)
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpus_clear(cpu_sibling_map[cpu]);
+ cpus_clear(cpu_core_map[cpu]);
+ }
for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ struct cpuinfo_x86 *c = cpu_data + cpu;
int siblings = 0;
int i;
if (!cpu_isset(cpu, cpu_callout_map))
@@ -1092,7 +1104,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_isset(i, cpu_callout_map))
continue;
- if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ if (cpu_core_id[cpu] == cpu_core_id[i]) {
siblings++;
cpu_set(i, cpu_sibling_map[cpu]);
}
@@ -1102,8 +1114,22 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
cpu_set(cpu, cpu_sibling_map[cpu]);
}
- if (siblings != smp_num_siblings)
+ if (siblings != smp_num_siblings) {
printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
+ smp_num_siblings = siblings;
+ }
+
+ if (c->x86_num_cores > 1) {
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_isset(i, cpu_callout_map))
+ continue;
+ if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ cpu_set(i, cpu_core_map[cpu]);
+ }
+ }
+ } else {
+ cpu_core_map[cpu] = cpu_sibling_map[cpu];
+ }
}
if (nmi_watchdog == NMI_LOCAL_APIC)
diff --git a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S
index 475474b99a..93827336d4 100644
--- a/xen/arch/x86/x86_32/entry.S
+++ b/xen/arch/x86/x86_32/entry.S
@@ -751,6 +751,7 @@ ENTRY(hypercall_table)
.long do_boot_vcpu
.long do_ni_hypercall /* 25 */
.long do_mmuext_op
+ .long do_policy_op /* 27 */
.rept NR_hypercalls-((.-hypercall_table)/4)
.long do_ni_hypercall
.endr
diff --git a/xen/common/Makefile b/xen/common/Makefile
index ee312fde92..892d407585 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -1,8 +1,8 @@
include $(BASEDIR)/Rules.mk
-ifeq ($(TARGET_ARCH),ia64)
-OBJS := $(subst dom_mem_ops.o,,$(OBJS))
+ifeq ($(TARGET_ARCH),ia64)
+#OBJS := $(subst dom_mem_ops.o,,$(OBJS))
OBJS := $(subst grant_table.o,,$(OBJS))
endif
diff --git a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c
index 216af3854a..ec86fc1b62 100644
--- a/xen/common/dom0_ops.c
+++ b/xen/common/dom0_ops.c
@@ -19,6 +19,7 @@
#include <asm/current.h>
#include <public/dom0_ops.h>
#include <public/sched_ctl.h>
+#include <acm/acm_hooks.h>
extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
extern void arch_getdomaininfo_ctxt(
@@ -91,6 +92,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
{
long ret = 0;
dom0_op_t curop, *op = &curop;
+ void *ssid = NULL; /* save security ptr between pre and post/fail hooks */
if ( !IS_PRIV(current->domain) )
return -EPERM;
@@ -101,6 +103,9 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
if ( op->interface_version != DOM0_INTERFACE_VERSION )
return -EACCES;
+ if ( acm_pre_dom0_op(op, &ssid) )
+ return -EACCES;
+
switch ( op->cmd )
{
@@ -184,8 +189,8 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
* domains will all share the second HT of each CPU. Since dom0 is on
* CPU 0, we favour high numbered CPUs in the event of a tie.
*/
- pro = ht_per_core - 1;
- for ( i = pro; i < num_online_cpus(); i += ht_per_core )
+ pro = smp_num_siblings - 1;
+ for ( i = pro; i < num_online_cpus(); i += smp_num_siblings )
if ( cnt[i] <= cnt[pro] )
pro = i;
@@ -357,6 +362,11 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
((d->domain_flags & DOMF_shutdown) ? DOMFLAGS_SHUTDOWN : 0) |
d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
+ if (d->ssid != NULL)
+ op->u.getdomaininfo.ssidref = ((struct acm_ssid_domain *)d->ssid)->ssidref;
+ else
+ op->u.getdomaininfo.ssidref = ACM_DEFAULT_SSID;
+
op->u.getdomaininfo.tot_pages = d->tot_pages;
op->u.getdomaininfo.max_pages = d->max_pages;
op->u.getdomaininfo.shared_info_frame =
@@ -493,7 +503,10 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
ret = arch_do_dom0_op(op,u_dom0_op);
}
-
+ if (!ret)
+ acm_post_dom0_op(op, ssid);
+ else
+ acm_fail_dom0_op(op, ssid);
return ret;
}
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 6f6e707667..29d10ef3d2 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -26,6 +26,7 @@
#include <public/xen.h>
#include <public/event_channel.h>
+#include <acm/acm_hooks.h>
#define bucket_from_port(d,p) \
((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
@@ -587,6 +588,9 @@ long do_event_channel_op(evtchn_op_t *uop)
if ( copy_from_user(&op, uop, sizeof(op)) != 0 )
return -EFAULT;
+ if (acm_pre_event_channel(&op))
+ return -EACCES;
+
switch ( op.cmd )
{
case EVTCHNOP_alloc_unbound:
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 683a051df3..9e7b7223e9 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -30,6 +30,7 @@
#include <xen/sched.h>
#include <xen/shadow.h>
#include <xen/mm.h>
+#include <acm/acm_hooks.h>
#define PIN_FAIL(_lbl, _rc, _f, _a...) \
do { \
@@ -357,6 +358,11 @@ __gnttab_map_grant_ref(
return GNTST_bad_gntref;
}
+ if (acm_pre_grant_map_ref(dom)) {
+ (void)__put_user(GNTST_permission_denied, &uop->handle);
+ return GNTST_permission_denied;
+ }
+
if ( unlikely((rd = find_domain_by_id(dom)) == NULL) ||
unlikely(ld == rd) )
{
diff --git a/xen/common/kernel.c b/xen/common/kernel.c
index 3acaac8e1b..d63c235248 100644
--- a/xen/common/kernel.c
+++ b/xen/common/kernel.c
@@ -1,10 +1,7 @@
/******************************************************************************
* kernel.c
*
- * This file should contain architecture-independent bootstrap and low-level
- * help routines. It's a bit x86/PC specific right now!
- *
- * Copyright (c) 2002-2003 K A Fraser
+ * Copyright (c) 2002-2005 K A Fraser
*/
#include <xen/config.h>
@@ -14,6 +11,7 @@
#include <xen/compile.h>
#include <xen/sched.h>
#include <asm/current.h>
+#include <public/version.h>
void cmdline_parse(char *cmdline)
{
@@ -83,11 +81,38 @@ void cmdline_parse(char *cmdline)
* Simple hypercalls.
*/
-long do_xen_version(int cmd)
+long do_xen_version(int cmd, void *arg)
{
- if ( cmd != 0 )
- return -ENOSYS;
- return (XEN_VERSION<<16) | (XEN_SUBVERSION);
+ switch ( cmd )
+ {
+ case XENVER_version:
+ {
+ return (XEN_VERSION<<16) | (XEN_SUBVERSION);
+ }
+
+ case XENVER_extraversion:
+ {
+ char extraversion[16];
+ safe_strcpy(extraversion, XEN_EXTRAVERSION);
+ if ( copy_to_user(arg, extraversion, sizeof(extraversion)) )
+ return -EFAULT;
+ return 0;
+ }
+
+ case XENVER_compile_info:
+ {
+ struct xen_compile_info info;
+ safe_strcpy(info.compiler, XEN_COMPILER);
+ safe_strcpy(info.compile_by, XEN_COMPILE_BY);
+ safe_strcpy(info.compile_domain, XEN_COMPILE_DOMAIN);
+ safe_strcpy(info.compile_date, XEN_COMPILE_DATE);
+ if ( copy_to_user(arg, &info, sizeof(info)) )
+ return -EFAULT;
+ return 0;
+ }
+ }
+
+ return -ENOSYS;
}
long do_vm_assist(unsigned int cmd, unsigned int type)
diff --git a/xen/common/policy_ops.c b/xen/common/policy_ops.c
new file mode 100644
index 0000000000..ff2b2f9ba4
--- /dev/null
+++ b/xen/common/policy_ops.c
@@ -0,0 +1,117 @@
+/******************************************************************************
+ *policy_ops.c
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Author:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * Process policy command requests from guest OS.
+ *
+ */
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/lib.h>
+#include <xen/mm.h>
+#include <public/policy_ops.h>
+#include <xen/sched.h>
+#include <xen/event.h>
+#include <xen/trace.h>
+#include <xen/console.h>
+#include <asm/shadow.h>
+#include <public/sched_ctl.h>
+#include <acm/acm_hooks.h>
+
+/* function prototypes defined in acm/acm_policy.c */
+int acm_set_policy(void *buf, u16 buf_size, u16 policy);
+int acm_get_policy(void *buf, u16 buf_size);
+int acm_dump_statistics(void *buf, u16 buf_size);
+
+typedef enum policyoperation {
+ POLICY, /* access to policy interface (early drop) */
+ GETPOLICY, /* dump policy cache */
+ SETPOLICY, /* set policy cache (controls security) */
+ DUMPSTATS /* dump policy statistics */
+} policyoperation_t;
+
+int
+acm_authorize_policyops(struct domain *d, policyoperation_t pops)
+{
+ /* currently, all policy management functions are restricted to privileged domains,
+ * soon we will introduce finer-grained privileges for policy operations
+ */
+ if (!IS_PRIV(d)) {
+ printk("%s: Policy management authorization denied ERROR!\n", __func__);
+ return ACM_ACCESS_DENIED;
+ }
+ return ACM_ACCESS_PERMITTED;
+}
+
+long do_policy_op(policy_op_t *u_policy_op)
+{
+ long ret = 0;
+ policy_op_t curop, *op = &curop;
+
+ /* check here policy decision for policy commands */
+ /* for now allow DOM0 only, later indepedently */
+ if (acm_authorize_policyops(current->domain, POLICY))
+ return -EACCES;
+
+ if ( copy_from_user(op, u_policy_op, sizeof(*op)) )
+ return -EFAULT;
+
+ if ( op->interface_version != POLICY_INTERFACE_VERSION )
+ return -EACCES;
+
+ switch ( op->cmd )
+ {
+ case POLICY_SETPOLICY:
+ {
+ if (acm_authorize_policyops(current->domain, SETPOLICY))
+ return -EACCES;
+ printkd("%s: setting policy.\n", __func__);
+ ret = acm_set_policy(op->u.setpolicy.pushcache, op->u.setpolicy.pushcache_size, op->u.setpolicy.policy_type);
+ if (ret == ACM_OK)
+ ret = 0;
+ else
+ ret = -ESRCH;
+ }
+ break;
+
+ case POLICY_GETPOLICY:
+ {
+ if (acm_authorize_policyops(current->domain, GETPOLICY))
+ return -EACCES;
+ printkd("%s: getting policy.\n", __func__);
+ ret = acm_get_policy(op->u.getpolicy.pullcache, op->u.getpolicy.pullcache_size);
+ if (ret == ACM_OK)
+ ret = 0;
+ else
+ ret = -ESRCH;
+ }
+ break;
+
+ case POLICY_DUMPSTATS:
+ {
+ if (acm_authorize_policyops(current->domain, DUMPSTATS))
+ return -EACCES;
+ printkd("%s: dumping statistics.\n", __func__);
+ ret = acm_dump_statistics(op->u.dumpstats.pullcache, op->u.dumpstats.pullcache_size);
+ if (ret == ACM_OK)
+ ret = 0;
+ else
+ ret = -ESRCH;
+ }
+ break;
+
+ default:
+ ret = -ESRCH;
+
+ }
+ return ret;
+}
diff --git a/xen/include/acm/acm_core.h b/xen/include/acm/acm_core.h
new file mode 100644
index 0000000000..e404b455ad
--- /dev/null
+++ b/xen/include/acm/acm_core.h
@@ -0,0 +1,117 @@
+/****************************************************************
+ * acm_core.h
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Author:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * sHype header file describing core data types and constants
+ * for the access control module and relevant policies
+ *
+ */
+#ifndef _ACM_CORE_H
+#define _ACM_CORE_H
+
+#include <xen/spinlock.h>
+#include <public/acm.h>
+#include <public/policy_ops.h>
+
+/* Xen-internal representation of the binary policy */
+struct acm_binary_policy {
+ u16 primary_policy_code;
+ u16 secondary_policy_code;
+ void *primary_binary_policy;
+ void *secondary_binary_policy;
+
+};
+
+struct chwall_binary_policy {
+ u16 max_types;
+ u16 max_ssidrefs;
+ u16 max_conflictsets;
+ domaintype_t *ssidrefs; /* [max_ssidrefs][max_types] */
+ domaintype_t *conflict_aggregate_set; /* [max_types] */
+ domaintype_t *running_types; /* [max_types] */
+ domaintype_t *conflict_sets; /* [max_conflictsets][max_types]*/
+};
+
+struct ste_binary_policy {
+ u16 max_types;
+ u16 max_ssidrefs;
+ domaintype_t *ssidrefs; /* [max_ssidrefs][max_types] */
+ atomic_t ec_eval_count, gt_eval_count;
+ atomic_t ec_denied_count, gt_denied_count;
+ atomic_t ec_cachehit_count, gt_cachehit_count;
+};
+
+/* global acm policy */
+extern struct acm_binary_policy acm_bin_pol;
+extern struct chwall_binary_policy chwall_bin_pol;
+extern struct ste_binary_policy ste_bin_pol;
+/* use the lock when reading / changing binary policy ! */
+extern rwlock_t acm_bin_pol_rwlock;
+
+/* subject and object type definitions */
+enum acm_datatype { DOMAIN };
+
+/* defines number of access decisions to other domains can be cached
+ * one entry per domain, TE does not distinguish evtchn or grant_table */
+#define ACM_TE_CACHE_SIZE 8
+enum acm_ste_flag { VALID, FREE };
+
+/* cache line:
+ * if cache_line.valid==VALID, then
+ * STE decision is cached as "permitted"
+ * on domain cache_line.id
+ */
+struct acm_ste_cache_line {
+ enum acm_ste_flag valid;
+ domid_t id;
+};
+
+/* general definition of a subject security id */
+struct acm_ssid_domain {
+ enum acm_datatype datatype; /* type of subject (e.g., partition) */
+ ssidref_t ssidref; /* combined security reference */
+ void *primary_ssid; /* primary policy ssid part (e.g. chinese wall) */
+ void *secondary_ssid; /* secondary policy ssid part (e.g. type enforcement) */
+ struct domain *subject; /* backpointer to subject structure */
+ domid_t domainid; /* replicate id */
+};
+
+/* chinese wall ssid type */
+struct chwall_ssid {
+ ssidref_t chwall_ssidref;
+};
+
+/* simple type enforcement ssid type */
+struct ste_ssid {
+ ssidref_t ste_ssidref;
+ struct acm_ste_cache_line ste_cache[ACM_TE_CACHE_SIZE]; /* decision cache */
+};
+
+/* macros to access ssidref for primary / secondary policy
+ * primary ssidref = lower 16 bit
+ * secondary ssidref = higher 16 bit
+ */
+#define GET_SSIDREF(POLICY, ssidref) \
+ ((POLICY) == acm_bin_pol.primary_policy_code) ? \
+ ((ssidref) & 0xffff) : ((ssidref) >> 16)
+
+/* macros to access ssid pointer for primary / secondary policy */
+#define GET_SSIDP(POLICY, ssid) \
+ ((POLICY) == acm_bin_pol.primary_policy_code) ? \
+ ((ssid)->primary_ssid) : ((ssid)->secondary_ssid)
+
+/* protos */
+int acm_init_domain_ssid(domid_t id, ssidref_t ssidref);
+int acm_free_domain_ssid(struct acm_ssid_domain *ssid);
+
+#endif
+
diff --git a/xen/include/acm/acm_endian.h b/xen/include/acm/acm_endian.h
new file mode 100644
index 0000000000..fd7229b846
--- /dev/null
+++ b/xen/include/acm/acm_endian.h
@@ -0,0 +1,88 @@
+/****************************************************************
+ * acm_endian.h
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Author:
+ * Stefan Berger <stefanb@watson.ibm.com>
+ *
+ * Contributions:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * sHype header file defining endian-dependent functions for the
+ * big-endian policy interface
+ *
+ */
+#ifndef _ACM_ENDIAN_H
+#define _ACM_ENDIAN_H
+
+/* don't use these functions in performance critical sections! */
+
+/* set during initialization by testing */
+extern u8 little_endian;
+
+static inline u32 ntohl(u32 x)
+{
+ if (little_endian)
+ return
+ ( (((x) >> 24) & 0xff )|
+ (((x) >> 8) & 0xff00 )|
+ (((x) << 8) & 0xff0000 )|
+ (((x) << 24) & 0xff000000) );
+ else
+ return x;
+}
+
+static inline u16 ntohs(u16 x)
+{
+ if (little_endian)
+ return
+ ( (((x) >> 8) & 0xff )|
+ (((x) << 8) & 0xff00 ) );
+ else
+ return x;
+}
+
+#define htonl(x) ntohl(x)
+#define htons(x) ntohs(x)
+
+static inline void arrcpy16(u16 *dest, const u16 *src, size_t n)
+{
+ unsigned int i = 0;
+ while (i < n) {
+ dest[i] = htons(src[i]);
+ i++;
+ }
+}
+
+static inline void arrcpy32(u32 *dest, const u32 *src, size_t n)
+{
+ unsigned int i = 0;
+ while (i < n) {
+ dest[i] = htonl(src[i]);
+ i++;
+ }
+}
+
+static inline void arrcpy(void *dest, const void *src, unsigned int elsize, size_t n)
+{
+ switch (elsize) {
+ case sizeof(u16):
+ arrcpy16((u16 *)dest, (u16 *)src, n);
+ break;
+
+ case sizeof(u32):
+ arrcpy32((u32 *)dest, (u32 *)src, n);
+ break;
+
+ default:
+ memcpy(dest, src, elsize*n);
+ }
+}
+
+#endif
diff --git a/xen/include/acm/acm_hooks.h b/xen/include/acm/acm_hooks.h
new file mode 100644
index 0000000000..534d919ff4
--- /dev/null
+++ b/xen/include/acm/acm_hooks.h
@@ -0,0 +1,337 @@
+/****************************************************************
+ * acm_hooks.h
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Author:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * acm header file implementing the global (policy-independent)
+ * sHype hooks that are called throughout Xen.
+ *
+ */
+#ifndef _ACM_HOOKS_H
+#define _ACM_HOOKS_H
+
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <xen/types.h>
+#include <xen/lib.h>
+#include <xen/delay.h>
+#include <xen/sched.h>
+#include <public/acm.h>
+#include <acm/acm_core.h>
+#include <public/dom0_ops.h>
+#include <public/event_channel.h>
+#include <asm/current.h>
+
+/* if ACM_TRACE_MODE defined, all hooks should
+ * print a short trace message */
+/* #define ACM_TRACE_MODE */
+
+#ifdef ACM_TRACE_MODE
+# define traceprintk(fmt, args...) printk(fmt,## args)
+#else
+# define traceprintk(fmt, args...)
+#endif
+
+/* global variables */
+extern struct acm_operations *acm_primary_ops;
+extern struct acm_operations *acm_secondary_ops;
+
+/**********************************************************************************************
+ * HOOK structure and meaning (justifies a few words about our model):
+ *
+ * General idea: every policy-controlled system operation is reflected in a
+ * transaction in the system's security state
+ *
+ * Keeping the security state consistent requires "atomic" transactions.
+ * The name of the hooks to place around policy-controlled transactions
+ * reflects this. If authorizations do not involve security state changes,
+ * then and only then POST and FAIL hooks remain empty since we don't care
+ * about the eventual outcome of the operation from a security viewpoint.
+ *
+ * PURPOSE of hook types:
+ * ======================
+ * PRE-Hooks
+ * a) general authorization to guard a controlled system operation
+ * b) prepare security state change (means: fail hook must be able to "undo" this)
+ *
+ * POST-Hooks
+ * a) commit prepared state change
+ *
+ * FAIL-Hooks
+ * a) roll-back prepared security state change from PRE-Hook
+ *
+ *
+ * PLACEMENT of hook types:
+ * ========================
+ * PRE-Hooks must be called:
+ * a) before a guarded/controlled system operation is started
+ * (return is ACM_ACCESS_PERMITTED or ACM_ACCESS_DENIED or error)
+ * --> operation must be aborted if return is != ACM_ACCESS_PERMITTED
+ *
+ * POST-Hooks must be called:
+ * a) after successful transaction (no return value; commit shall never fail)
+ *
+ * FAIL-Hooks must be called:
+ * a) if system transaction (operation) fails somewhen after calling the PRE-hook
+ * (obviously the POST-Hook is not called in this case)
+ * b) if another (secondary) policy denies access in its PRE-Hook
+ * (policy layering is useful but requires additional handling)
+ *
+ *
+ *
+ * Hook model from a security transaction viewpoint:
+ *
+ * start-sys-ops--> prepare ----succeed-----> commit --> sys-ops success
+ * (pre-hook) \ (post-hook)
+ * \
+ * fail
+ * \
+ * \
+ * roll-back
+ * (fail-hook)
+ * \
+ * sys-ops error
+ *
+ *************************************************************************************************/
+
+struct acm_operations {
+ /* policy management functions (must always be defined!) */
+ int (*init_domain_ssid) (void **ssid, ssidref_t ssidref);
+ void (*free_domain_ssid) (void *ssid);
+ int (*dump_binary_policy) (u8 *buffer, u16 buf_size);
+ int (*set_binary_policy) (u8 *buffer, u16 buf_size);
+ int (*dump_statistics) (u8 *buffer, u16 buf_size);
+ /* domain management control hooks (can be NULL) */
+ int (*pre_domain_create) (void *subject_ssid, ssidref_t ssidref);
+ void (*post_domain_create) (domid_t domid, ssidref_t ssidref);
+ void (*fail_domain_create) (void *subject_ssid, ssidref_t ssidref);
+ void (*post_domain_destroy) (void *object_ssid, domid_t id);
+ /* event channel control hooks (can be NULL) */
+ int (*pre_eventchannel_unbound) (domid_t id);
+ void (*fail_eventchannel_unbound) (domid_t id);
+ int (*pre_eventchannel_interdomain) (domid_t id1, domid_t id2);
+ int (*fail_eventchannel_interdomain) (domid_t id1, domid_t id2);
+ /* grant table control hooks (can be NULL) */
+ int (*pre_grant_map_ref) (domid_t id);
+ void (*fail_grant_map_ref) (domid_t id);
+ int (*pre_grant_setup) (domid_t id);
+ void (*fail_grant_setup) (domid_t id);
+};
+
+static inline int acm_pre_domain_create (void *subject_ssid, ssidref_t ssidref)
+{
+ if ((acm_primary_ops->pre_domain_create != NULL) &&
+ acm_primary_ops->pre_domain_create (subject_ssid, ssidref))
+ return ACM_ACCESS_DENIED;
+ else if ((acm_secondary_ops->pre_domain_create != NULL) &&
+ acm_secondary_ops->pre_domain_create (subject_ssid, ssidref)) {
+ /* roll-back primary */
+ if (acm_primary_ops->fail_domain_create != NULL)
+ acm_primary_ops->fail_domain_create (subject_ssid, ssidref);
+ return ACM_ACCESS_DENIED;
+ } else
+ return ACM_ACCESS_PERMITTED;
+}
+
+static inline void acm_post_domain_create (domid_t domid, ssidref_t ssidref)
+{
+ if (acm_primary_ops->post_domain_create != NULL)
+ acm_primary_ops->post_domain_create (domid, ssidref);
+ if (acm_secondary_ops->post_domain_create != NULL)
+ acm_secondary_ops->post_domain_create (domid, ssidref);
+}
+
+static inline void acm_fail_domain_create (void *subject_ssid, ssidref_t ssidref)
+{
+ if (acm_primary_ops->fail_domain_create != NULL)
+ acm_primary_ops->fail_domain_create (subject_ssid, ssidref);
+ if (acm_secondary_ops->fail_domain_create != NULL)
+ acm_secondary_ops->fail_domain_create (subject_ssid, ssidref);
+}
+
+static inline void acm_post_domain_destroy (void *object_ssid, domid_t id)
+{
+ if (acm_primary_ops->post_domain_destroy != NULL)
+ acm_primary_ops->post_domain_destroy (object_ssid, id);
+ if (acm_secondary_ops->post_domain_destroy != NULL)
+ acm_secondary_ops->post_domain_destroy (object_ssid, id);
+ return;
+}
+
+/* event channel ops */
+
+static inline int acm_pre_eventchannel_unbound (domid_t id)
+{
+ if ((acm_primary_ops->pre_eventchannel_unbound != NULL) &&
+ acm_primary_ops->pre_eventchannel_unbound (id))
+ return ACM_ACCESS_DENIED;
+ else if ((acm_secondary_ops->pre_eventchannel_unbound != NULL) &&
+ acm_secondary_ops->pre_eventchannel_unbound (id)) {
+ /* roll-back primary */
+ if (acm_primary_ops->fail_eventchannel_unbound != NULL)
+ acm_primary_ops->fail_eventchannel_unbound (id);
+ return ACM_ACCESS_DENIED;
+ } else
+ return ACM_ACCESS_PERMITTED;
+}
+
+static inline int acm_pre_eventchannel_interdomain (domid_t id1, domid_t id2)
+{
+ if ((acm_primary_ops->pre_eventchannel_interdomain != NULL) &&
+ acm_primary_ops->pre_eventchannel_interdomain (id1, id2))
+ return ACM_ACCESS_DENIED;
+ else if ((acm_secondary_ops->pre_eventchannel_interdomain != NULL) &&
+ acm_secondary_ops->pre_eventchannel_interdomain (id1, id2)) {
+ /* roll-back primary */
+ if (acm_primary_ops->fail_eventchannel_interdomain != NULL)
+ acm_primary_ops->fail_eventchannel_interdomain (id1, id2);
+ return ACM_ACCESS_DENIED;
+ } else
+ return ACM_ACCESS_PERMITTED;
+}
+
+/************ Xen inline hooks ***************/
+
+/* small macro to make the hooks more readable
+ * (eliminates hooks if NULL policy is active)
+ */
+#if (ACM_USE_SECURITY_POLICY == ACM_NULL_POLICY)
+static inline int acm_pre_dom0_op(dom0_op_t *op, void **ssid)
+{ return 0; }
+#else
+static inline int acm_pre_dom0_op(dom0_op_t *op, void **ssid)
+{
+ int ret = -EACCES;
+ struct domain *d;
+
+ switch(op->cmd) {
+ case DOM0_CREATEDOMAIN:
+ ret = acm_pre_domain_create(current->domain->ssid, op->u.createdomain.ssidref);
+ break;
+ case DOM0_DESTROYDOMAIN:
+ d = find_domain_by_id(op->u.destroydomain.domain);
+ if (d != NULL) {
+ *ssid = d->ssid; /* save for post destroy when d is gone */
+ /* no policy-specific hook */
+ put_domain(d);
+ ret = 0;
+ }
+ break;
+ default:
+ ret = 0; /* ok */
+ }
+ return ret;
+}
+#endif
+
+
+#if (ACM_USE_SECURITY_POLICY == ACM_NULL_POLICY)
+static inline void acm_post_dom0_op(dom0_op_t *op, void *ssid)
+{ return; }
+#else
+static inline void acm_post_dom0_op(dom0_op_t *op, void *ssid)
+{
+ switch(op->cmd) {
+ case DOM0_CREATEDOMAIN:
+ /* initialialize shared sHype security labels for new domain */
+ acm_init_domain_ssid(op->u.createdomain.domain, op->u.createdomain.ssidref);
+ acm_post_domain_create(op->u.createdomain.domain, op->u.createdomain.ssidref);
+ break;
+ case DOM0_DESTROYDOMAIN:
+ acm_post_domain_destroy(ssid, op->u.destroydomain.domain);
+ /* free security ssid for the destroyed domain (also if running null policy */
+ acm_free_domain_ssid((struct acm_ssid_domain *)ssid);
+ break;
+ }
+}
+#endif
+
+
+#if (ACM_USE_SECURITY_POLICy == ACM_NULL_POLICY)
+static inline void acm_fail_dom0_op(dom0_op_t *op, void *ssid)
+{ return; }
+#else
+static inline void acm_fail_dom0_op(dom0_op_t *op, void *ssid)
+{
+ switch(op->cmd) {
+ case DOM0_CREATEDOMAIN:
+ acm_fail_domain_create(current->domain->ssid, op->u.createdomain.ssidref);
+ break;
+ }
+}
+#endif
+
+
+#if (ACM_USE_SECURITY_POLICY == ACM_NULL_POLICY)
+static inline int acm_pre_event_channel(evtchn_op_t *op)
+{ return 0; }
+#else
+static inline int acm_pre_event_channel(evtchn_op_t *op)
+{
+ int ret = -EACCES;
+
+ switch(op->cmd) {
+ case EVTCHNOP_alloc_unbound:
+ ret = acm_pre_eventchannel_unbound(op->u.alloc_unbound.dom);
+ break;
+ case EVTCHNOP_bind_interdomain:
+ ret = acm_pre_eventchannel_interdomain(op->u.bind_interdomain.dom1, op->u.bind_interdomain.dom2);
+ break;
+ default:
+ ret = 0; /* ok */
+ }
+ return ret;
+}
+#endif
+
+#if (ACM_USE_SECURITY_POLICY == ACM_NULL_POLICY)
+static inline int acm_pre_grant_map_ref(domid_t id)
+{ return 0; }
+#else
+static inline int acm_pre_grant_map_ref (domid_t id)
+{
+ if ((acm_primary_ops->pre_grant_map_ref != NULL) &&
+ acm_primary_ops->pre_grant_map_ref (id))
+ return ACM_ACCESS_DENIED;
+ else if ((acm_secondary_ops->pre_grant_map_ref != NULL) &&
+ acm_secondary_ops->pre_grant_map_ref (id)) {
+ /* roll-back primary */
+ if (acm_primary_ops->fail_grant_map_ref != NULL)
+ acm_primary_ops->fail_grant_map_ref (id);
+ return ACM_ACCESS_DENIED;
+ } else
+ return ACM_ACCESS_PERMITTED;
+}
+#endif
+
+
+#if (ACM_USE_SECURITY_POLICY == ACM_NULL_POLICY)
+static inline int acm_pre_grant_setup(domid_t id)
+{ return 0; }
+#else
+static inline int acm_pre_grant_setup (domid_t id)
+{
+ if ((acm_primary_ops->pre_grant_setup != NULL) &&
+ acm_primary_ops->pre_grant_setup (id))
+ return ACM_ACCESS_DENIED;
+ else if ((acm_secondary_ops->pre_grant_setup != NULL) &&
+ acm_secondary_ops->pre_grant_setup (id)) {
+ /* roll-back primary */
+ if (acm_primary_ops->fail_grant_setup != NULL)
+ acm_primary_ops->fail_grant_setup (id);
+ return ACM_ACCESS_DENIED;
+ } else
+ return ACM_ACCESS_PERMITTED;
+}
+#endif
+
+
+#endif
diff --git a/xen/include/asm-ia64/config.h b/xen/include/asm-ia64/config.h
index 442d49a382..9df0d907aa 100644
--- a/xen/include/asm-ia64/config.h
+++ b/xen/include/asm-ia64/config.h
@@ -177,8 +177,7 @@ void sort_main_extable(void);
// see include/asm-x86/atomic.h (different from standard linux)
#define _atomic_set(v,i) (((v).counter) = (i))
#define _atomic_read(v) ((v).counter)
-// FIXME following needs work
-#define atomic_compareandswap(old, new, v) old
+#define atomic_compareandswap(old, new, v) ((atomic_t){ cmpxchg(v, _atomic_read(old), _atomic_read(new)) })
// see include/asm-ia64/mm.h, handle remaining pfn_info uses until gone
#define pfn_info page
@@ -227,6 +226,8 @@ struct screen_info { };
#define FORCE_CRASH() asm("break 0;;");
+#define dummy() dummy_called(__FUNCTION__)
+
// these declarations got moved at some point, find a better place for them
extern int ht_per_core;
diff --git a/xen/include/asm-ia64/domain.h b/xen/include/asm-ia64/domain.h
index 0f0e37895b..27ff16e560 100644
--- a/xen/include/asm-ia64/domain.h
+++ b/xen/include/asm-ia64/domain.h
@@ -2,18 +2,17 @@
#define __ASM_DOMAIN_H__
#include <linux/thread_info.h>
+#include <asm/tlb.h>
#ifdef CONFIG_VTI
#include <asm/vmx_vpd.h>
#include <asm/vmmu.h>
#include <asm/regionreg.h>
+#include <public/arch-ia64.h>
#endif // CONFIG_VTI
#include <xen/list.h>
extern void arch_do_createdomain(struct vcpu *);
-extern int arch_final_setup_guestos(
- struct vcpu *, struct vcpu_guest_context *);
-
extern void domain_relinquish_resources(struct domain *);
#ifdef CONFIG_VTI
@@ -36,7 +35,15 @@ struct arch_domain {
int imp_va_msb;
ia64_rr emul_phy_rr0;
ia64_rr emul_phy_rr4;
- u64 *pmt; /* physical to machine table */
+ unsigned long *pmt; /* physical to machine table */
+ /*
+ * max_pfn is the maximum page frame in guest physical space, including
+ * inter-middle I/O ranges and memory holes. This is different with
+ * max_pages in domain struct, which indicates maximum memory size
+ */
+ unsigned long max_pfn;
+ unsigned int section_nr;
+ mm_section_t *sections; /* Describe memory hole except for Dom0 */
#endif //CONFIG_VTI
u64 xen_vastart;
u64 xen_vaend;
diff --git a/xen/include/asm-ia64/event.h b/xen/include/asm-ia64/event.h
new file mode 100644
index 0000000000..e7b5cda8b1
--- /dev/null
+++ b/xen/include/asm-ia64/event.h
@@ -0,0 +1,16 @@
+/******************************************************************************
+ * event.h
+ *
+ * A nice interface for passing asynchronous events to guest OSes.
+ * (architecture-dependent part)
+ *
+ */
+
+#ifndef __ASM_EVENT_H__
+#define __ASM_EVENT_H__
+
+static inline void evtchn_notify(struct vcpu *v)
+{
+}
+
+#endif
diff --git a/xen/include/asm-ia64/mm.h b/xen/include/asm-ia64/mm.h
index a762ec6318..c84a7c781a 100644
--- a/xen/include/asm-ia64/mm.h
+++ b/xen/include/asm-ia64/mm.h
@@ -27,43 +27,12 @@ typedef unsigned long page_flags_t;
/*
* Per-page-frame information.
+ *
+ * Every architecture must ensure the following:
+ * 1. 'struct pfn_info' contains a 'struct list_head list'.
+ * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
*/
-
-//FIXME: This can go away when common/dom0_ops.c is fully arch-independent
-#if 0
-struct pfn_info
-{
- /* Each frame can be threaded onto a doubly-linked list. */
- struct list_head list;
- /* Context-dependent fields follow... */
- union {
-
- /* Page is in use by a domain. */
- struct {
- /* Owner of this page. */
- struct domain *domain;
- /* Reference count and various PGC_xxx flags and fields. */
- u32 count_info;
- /* Type reference count and various PGT_xxx flags and fields. */
- u32 type_info;
- } inuse;
-
- /* Page is on a free list. */
- struct {
- /* Mask of possibly-tainted TLBs. */
- unsigned long cpu_mask;
- /* Must be at same offset as 'u.inuse.count_flags'. */
- u32 __unavailable;
- /* Order-size of the free chunk this page is the head of. */
- u8 order;
- } free;
-
- } u;
-
- /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
- u32 tlbflush_timestamp;
-};
-#endif
+#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
struct page
{
@@ -82,7 +51,7 @@ struct page
/* Page is in use by a domain. */
struct {
/* Owner of this page. */
- u64 _domain;
+ u32 _domain;
/* Type reference count and various PGT_xxx flags and fields. */
u32 type_info;
} inuse;
@@ -104,37 +73,49 @@ struct page
#define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
-//FIXME: These can go away when common/dom0_ops.c is fully arch-independent
- /* The following page types are MUTUALLY EXCLUSIVE. */
+/* Still small set of flags defined by far on IA-64 */
+/* The following page types are MUTUALLY EXCLUSIVE. */
#define PGT_none (0<<29) /* no special uses of this page */
#define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
#define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
#define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
#define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
-#define PGT_gdt_page (5<<29) /* using this page in a GDT? */
-#define PGT_ldt_page (6<<29) /* using this page in an LDT? */
-#define PGT_writeable_page (7<<29) /* has writable mappings of this page? */
-#define PGT_type_mask (7<<29) /* Bits 29-31. */
+#define PGT_writeable_page (5<<29) /* has writable mappings of this page? */
+#define PGT_type_mask (5<<29) /* Bits 29-31. */
+
/* Has this page been validated for use as its current type? */
#define _PGT_validated 28
#define PGT_validated (1<<_PGT_validated)
- /* 28-bit count of uses of this frame as its current type. */
-#define PGT_count_mask ((1<<28)-1)
+/* Owning guest has pinned this page to its current type? */
+#define _PGT_pinned 27
+#define PGT_pinned (1U<<_PGT_pinned)
+
+/* 27-bit count of uses of this frame as its current type. */
+#define PGT_count_mask ((1U<<27)-1)
/* Cleared when the owning guest 'frees' this page. */
#define _PGC_allocated 31
#define PGC_allocated (1U<<_PGC_allocated)
-#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
+/* Set when the page is used as a page table */
+#define _PGC_page_table 30
+#define PGC_page_table (1U<<_PGC_page_table)
+/* 30-bit count of references to this frame. */
+#define PGC_count_mask ((1U<<30)-1)
#define IS_XEN_HEAP_FRAME(_pfn) ((page_to_phys(_pfn) < xenheap_phys_end) \
&& (page_to_phys(_pfn) >= xen_pstart))
-#define pickle_domptr(_d) ((u64)(_d))
-#define unpickle_domptr(_d) ((struct domain*)(_d))
+static inline struct domain *unpickle_domptr(u32 _d)
+{ return (_d == 0) ? NULL : __va(_d); }
+static inline u32 pickle_domptr(struct domain *_d)
+{ return (_d == NULL) ? 0 : (u32)__pa(_d); }
#define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
#define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))
+/* Dummy now */
+#define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) do { } while (0)
+
extern struct pfn_info *frame_table;
extern unsigned long frame_table_size;
extern struct list_head free_list;
@@ -151,16 +132,46 @@ void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
static inline void put_page(struct pfn_info *page)
{
- dummy();
-}
+ u32 nx, x, y = page->count_info;
+
+ do {
+ x = y;
+ nx = x - 1;
+ }
+ while (unlikely((y = cmpxchg(&page->count_info, x, nx)) != x));
+ if (unlikely((nx & PGC_count_mask) == 0))
+ free_domheap_page(page);
+}
+/* count_info and ownership are checked atomically. */
static inline int get_page(struct pfn_info *page,
struct domain *domain)
{
- dummy();
+ u64 x, nx, y = *((u64*)&page->count_info);
+ u32 _domain = pickle_domptr(domain);
+
+ do {
+ x = y;
+ nx = x + 1;
+ if (unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
+ unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
+ unlikely((x >> 32) != _domain)) { /* Wrong owner? */
+ DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%08x\n",
+ page_to_pfn(page), domain, unpickle_domptr(d),
+ x, page->u.inuse.typeinfo);
+ return 0;
+ }
+ }
+ while(unlikely(y = cmpxchg(&page->count_info, x, nx)) != x);
+
+ return 1;
}
+/* No type info now */
+#define put_page_and_type(page) put_page((page))
+#define get_page_and_type(page, domain, type) get_page((page))
+
#define set_machinetophys(_mfn, _pfn) do { } while(0);
#ifdef MEMORY_GUARD
@@ -364,17 +375,40 @@ extern unsigned long *mpt_table;
#undef machine_to_phys_mapping
#define machine_to_phys_mapping mpt_table
+#define INVALID_M2P_ENTRY (~0U)
+#define VALID_M2P(_e) (!((_e) & (1U<<63)))
+#define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e))
/* If pmt table is provided by control pannel later, we need __get_user
* here. However if it's allocated by HV, we should access it directly
*/
-#define phys_to_machine_mapping(d, gpfn) \
- ((d) == dom0 ? gpfn : (d)->arch.pmt[(gpfn)])
+#define phys_to_machine_mapping(d, gpfn) \
+ ((d) == dom0 ? gpfn : \
+ (gpfn <= d->arch.max_pfn ? (d)->arch.pmt[(gpfn)] : \
+ INVALID_MFN))
#define __mfn_to_gpfn(_d, mfn) \
machine_to_phys_mapping[(mfn)]
#define __gpfn_to_mfn(_d, gpfn) \
phys_to_machine_mapping((_d), (gpfn))
+
+#define __gpfn_invalid(_d, gpfn) \
+ (__gpfn_to_mfn((_d), (gpfn)) & GPFN_INV_MASK)
+
+#define __gpfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
+
+/* Return I/O type if trye */
+#define __gpfn_is_io(_d, gpfn) \
+ (__gpfn_valid(_d, gpfn) ? \
+ (__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) : 0)
+
+#define __gpfn_is_mem(_d, gpfn) \
+ (__gpfn_valid(_d, gpfn) ? \
+ ((__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) == GPFN_MEM) : 0)
+
+
+#define __gpa_to_mpa(_d, gpa) \
+ ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
#endif // CONFIG_VTI
#endif /* __ASM_IA64_MM_H__ */
diff --git a/xen/include/asm-ia64/tlb.h b/xen/include/asm-ia64/tlb.h
index 7947bf3dcc..049f7b5f21 100644
--- a/xen/include/asm-ia64/tlb.h
+++ b/xen/include/asm-ia64/tlb.h
@@ -39,11 +39,11 @@ typedef struct {
typedef union {
unsigned long value;
struct {
- uint64_t ve : 1;
- uint64_t rv1 : 1;
- uint64_t ps : 6;
- uint64_t rid : 24;
- uint64_t rv2 : 32;
+ unsigned long ve : 1;
+ unsigned long rv1 : 1;
+ unsigned long ps : 6;
+ unsigned long rid : 24;
+ unsigned long rv2 : 32;
};
} rr_t;
#endif // CONFIG_VTI
diff --git a/xen/include/asm-ia64/vcpu.h b/xen/include/asm-ia64/vcpu.h
index d3ef4c229b..f4ca7de36f 100644
--- a/xen/include/asm-ia64/vcpu.h
+++ b/xen/include/asm-ia64/vcpu.h
@@ -23,8 +23,8 @@ typedef struct pt_regs REGS;
#define PRIVOP_ADDR_COUNT
#ifdef PRIVOP_ADDR_COUNT
-#define _RSM 0
-#define _SSM 1
+#define _GET_IFA 0
+#define _THASH 1
#define PRIVOP_COUNT_NINSTS 2
#define PRIVOP_COUNT_NADDRS 30
diff --git a/xen/include/asm-ia64/vhpt.h b/xen/include/asm-ia64/vhpt.h
index e4268f0947..2ef29b32af 100644
--- a/xen/include/asm-ia64/vhpt.h
+++ b/xen/include/asm-ia64/vhpt.h
@@ -140,12 +140,20 @@ CC_##Name:; \
mov r16 = cr.ifa; \
movl r30 = int_counts; \
;; \
+ extr.u r17=r16,59,5 \
+ ;; \
+ cmp.eq p6,p0=0x1e,r17; \
+(p6) br.cond.spnt .Alt_##Name \
+ ;; \
+ cmp.eq p6,p0=0x1d,r17; \
+(p6) br.cond.spnt .Alt_##Name \
+ ;; \
thash r28 = r16; \
adds r30 = CAUSE_VHPT_CC_HANDLED << 3, r30; \
;; \
ttag r19 = r16; \
- ld8 r27 = [r30]; \
- adds r17 = VLE_CCHAIN_OFFSET, r28; \
+ld8 r27 = [r30]; \
+adds r17 = VLE_CCHAIN_OFFSET, r28; \
;; \
ld8 r17 = [r17]; \
;; \
@@ -192,6 +200,11 @@ CC_##Name:; \
rfi; \
;; \
\
+.Alt_##Name:; \
+ mov pr = r31, 0x1ffff; \
+ ;; \
+ br.cond.sptk late_alt_##Name \
+ ;; \
.Out_##Name:; \
mov pr = r31, 0x1ffff; \
;; \
diff --git a/xen/include/asm-ia64/vmmu.h b/xen/include/asm-ia64/vmmu.h
index cee7d89a90..8464c929ac 100644
--- a/xen/include/asm-ia64/vmmu.h
+++ b/xen/include/asm-ia64/vmmu.h
@@ -28,13 +28,13 @@
#include "public/xen.h"
#include "asm/tlb.h"
-#define THASH_TLB_TR 0
-#define THASH_TLB_TC 1
-#define THASH_TLB_FM 2 // foreign map
+//#define THASH_TLB_TR 0
+//#define THASH_TLB_TC 1
-#define THASH_SECTION_TR (1<<0)
-#define THASH_SECTION_TC (1<<1)
-#define THASH_SECTION_FM (1<<2)
+
+// bit definition of TR, TC search cmobination
+//#define THASH_SECTION_TR (1<<0)
+//#define THASH_SECTION_TC (1<<1)
/*
* Next bit definition must be same with THASH_TLB_XX
@@ -43,8 +43,7 @@ typedef union search_section {
struct {
u32 tr : 1;
u32 tc : 1;
- u32 fm : 1;
- u32 rsv: 29;
+ u32 rsv: 30;
};
u32 v;
} search_section_t;
@@ -80,12 +79,10 @@ typedef struct thash_data {
u64 ig1 : 11; //53-63
};
struct {
- u64 __rv1 : 12;
- // sizeof(domid_t) must be less than 38!!! Refer to its definition
- u64 fm_dom : 38; // 12-49 foreign map domain ID
- u64 __rv2 : 3; // 50-52
+ u64 __rv1 : 53; // 0-52
// next extension to ig1, only for TLB instance
- u64 section : 2; // 53-54 TR, TC or FM (thash_TLB_XX)
+ u64 tc : 1; // 53 TR or TC
+ u64 locked : 1; // 54 entry locked or not
CACHE_LINE_TYPE cl : 1; // I side or D side cache line
u64 nomap : 1; // entry cann't be inserted into machine TLB.
u64 __ig1 : 5; // 56-61
@@ -227,8 +224,8 @@ typedef struct thash_cb {
INVALID_ENTRY(hcb, hash) = 1; \
hash->next = NULL; }
-#define PURGABLE_ENTRY(hcb,en) \
- ((hcb)->ht == THASH_VHPT || (en)->section == THASH_TLB_TC)
+#define PURGABLE_ENTRY(hcb,en) \
+ ((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
/*
@@ -306,7 +303,7 @@ extern void thash_purge_entries_ex(thash_cb_t *hcb,
u64 rid, u64 va, u64 sz,
search_section_t p_sect,
CACHE_LINE_TYPE cl);
-extern thash_cb_t *init_domain_tlb(struct vcpu *d);
+extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in);
/*
* Purge all TCs or VHPT entries including those in Hash table.
@@ -323,6 +320,7 @@ extern thash_data_t *vtlb_lookup(thash_cb_t *hcb,
thash_data_t *in);
extern thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb,
u64 rid, u64 va,CACHE_LINE_TYPE cl);
+extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock);
#define ITIR_RV_MASK (((1UL<<32)-1)<<32 | 0x3)
@@ -332,6 +330,7 @@ extern u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps);
extern void purge_machine_tc_by_domid(domid_t domid);
extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
extern rr_t vmmu_get_rr(struct vcpu *vcpu, u64 va);
+extern thash_cb_t *init_domain_tlb(struct vcpu *d);
#define VTLB_DEBUG
#ifdef VTLB_DEBUG
diff --git a/xen/include/asm-ia64/vmx_platform.h b/xen/include/asm-ia64/vmx_platform.h
index bf59e61fec..37560863fa 100644
--- a/xen/include/asm-ia64/vmx_platform.h
+++ b/xen/include/asm-ia64/vmx_platform.h
@@ -25,7 +25,7 @@
struct mmio_list;
typedef struct virutal_platform_def {
//unsigned long *real_mode_data; /* E820, etc. */
- //unsigned long shared_page_va;
+ unsigned long shared_page_va;
//struct vmx_virpit_t vmx_pit;
//struct vmx_handler_t vmx_handler;
//struct mi_per_cpu_info mpci; /* MMIO */
diff --git a/xen/include/asm-ia64/vmx_ptrace.h b/xen/include/asm-ia64/vmx_ptrace.h
deleted file mode 100644
index 4065c097f4..0000000000
--- a/xen/include/asm-ia64/vmx_ptrace.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 2003 Intel Co
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Fenghua Yu <fenghua.yu@intel.com>
- * Arun Sharma <arun.sharma@intel.com>
- *
- * 12/07/98 S. Eranian added pt_regs & switch_stack
- * 12/21/98 D. Mosberger updated to match latest code
- * 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
- * 4/28/05 Anthony Xu ported to Xen
- *
- */
-
-struct pt_regs {
- /* The following registers are saved by SAVE_MIN: */
- unsigned long b6; /* scratch */
- unsigned long b7; /* scratch */
-
- unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
- unsigned long ar_ssd; /* reserved for future use (scratch) */
-
- unsigned long r8; /* scratch (return value register 0) */
- unsigned long r9; /* scratch (return value register 1) */
- unsigned long r10; /* scratch (return value register 2) */
- unsigned long r11; /* scratch (return value register 3) */
-
- unsigned long cr_ipsr; /* interrupted task's psr */
- unsigned long cr_iip; /* interrupted task's instruction pointer */
- unsigned long cr_ifs; /* interrupted task's function state */
-
- unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
- unsigned long ar_pfs; /* prev function state */
- unsigned long ar_rsc; /* RSE configuration */
- /* The following two are valid only if cr_ipsr.cpl > 0: */
- unsigned long ar_rnat; /* RSE NaT */
- unsigned long ar_bspstore; /* RSE bspstore */
-
- unsigned long pr; /* 64 predicate registers (1 bit each) */
- unsigned long b0; /* return pointer (bp) */
- unsigned long loadrs; /* size of dirty partition << 16 */
-
- unsigned long r1; /* the gp pointer */
- unsigned long r12; /* interrupted task's memory stack pointer */
- unsigned long r13; /* thread pointer */
-
- unsigned long ar_fpsr; /* floating point status (preserved) */
- unsigned long r15; /* scratch */
-
- /* The remaining registers are NOT saved for system calls. */
-
- unsigned long r14; /* scratch */
- unsigned long r2; /* scratch */
- unsigned long r3; /* scratch */
- unsigned long r4; /* preserved */
- unsigned long r5; /* preserved */
- unsigned long r6; /* preserved */
- unsigned long r7; /* preserved */
- unsigned long cr_iipa; /* for emulation */
- unsigned long cr_isr; /* for emulation */
- unsigned long eml_unat; /* used for emulating instruction */
- unsigned long rfi_pfs; /* used for elulating rfi */
-
- /* The following registers are saved by SAVE_REST: */
- unsigned long r16; /* scratch */
- unsigned long r17; /* scratch */
- unsigned long r18; /* scratch */
- unsigned long r19; /* scratch */
- unsigned long r20; /* scratch */
- unsigned long r21; /* scratch */
- unsigned long r22; /* scratch */
- unsigned long r23; /* scratch */
- unsigned long r24; /* scratch */
- unsigned long r25; /* scratch */
- unsigned long r26; /* scratch */
- unsigned long r27; /* scratch */
- unsigned long r28; /* scratch */
- unsigned long r29; /* scratch */
- unsigned long r30; /* scratch */
- unsigned long r31; /* scratch */
-
- unsigned long ar_ccv; /* compare/exchange value (scratch) */
-
- /*
- * Floating point registers that the kernel considers scratch:
- */
- struct ia64_fpreg f6; /* scratch */
- struct ia64_fpreg f7; /* scratch */
- struct ia64_fpreg f8; /* scratch */
- struct ia64_fpreg f9; /* scratch */
- struct ia64_fpreg f10; /* scratch */
- struct ia64_fpreg f11; /* scratch */
-};
-
-
diff --git a/xen/include/asm-ia64/vmx_vpd.h b/xen/include/asm-ia64/vmx_vpd.h
index 78149ba31f..be29ed1d47 100644
--- a/xen/include/asm-ia64/vmx_vpd.h
+++ b/xen/include/asm-ia64/vmx_vpd.h
@@ -26,6 +26,7 @@
#include <asm/vtm.h>
#include <asm/vmx_platform.h>
+#include <public/arch-ia64.h>
#define VPD_SHIFT 17 /* 128K requirement */
#define VPD_SIZE (1 << VPD_SHIFT)
diff --git a/xen/include/asm-x86/event.h b/xen/include/asm-x86/event.h
new file mode 100644
index 0000000000..e7b5cda8b1
--- /dev/null
+++ b/xen/include/asm-x86/event.h
@@ -0,0 +1,16 @@
+/******************************************************************************
+ * event.h
+ *
+ * A nice interface for passing asynchronous events to guest OSes.
+ * (architecture-dependent part)
+ *
+ */
+
+#ifndef __ASM_EVENT_H__
+#define __ASM_EVENT_H__
+
+static inline void evtchn_notify(struct vcpu *v)
+{
+}
+
+#endif
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index bec90dbab0..82b8cbce62 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -181,6 +181,7 @@ extern struct cpuinfo_x86 cpu_data[];
#endif
extern int phys_proc_id[NR_CPUS];
+extern int cpu_core_id[NR_CPUS];
extern void identify_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
diff --git a/xen/include/asm-x86/smp.h b/xen/include/asm-x86/smp.h
index c70f4d90fc..552b699bc4 100644
--- a/xen/include/asm-x86/smp.h
+++ b/xen/include/asm-x86/smp.h
@@ -8,6 +8,7 @@
#include <xen/config.h>
#include <xen/kernel.h>
#include <xen/cpumask.h>
+#include <asm/current.h>
#endif
#ifdef CONFIG_X86_LOCAL_APIC
@@ -34,6 +35,7 @@ extern void smp_alloc_memory(void);
extern int pic_mode;
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
+extern cpumask_t cpu_core_map[];
extern void smp_flush_tlb(void);
extern void smp_invalidate_rcv(void); /* Process an NMI */
diff --git a/xen/include/public/acm.h b/xen/include/public/acm.h
new file mode 100644
index 0000000000..31191b7b5a
--- /dev/null
+++ b/xen/include/public/acm.h
@@ -0,0 +1,161 @@
+/****************************************************************
+ * acm.h
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Author:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ *
+ * Contributors:
+ * Stefan Berger <stefanb@watson.ibm.com>
+ * added network byte order support for binary policies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * sHype general access control module header file.
+ * here are all definitions that are shared between
+ * xen-core, guest-kernels, and applications.
+ *
+ * todo: move from static policy choice to compile option.
+ */
+
+#ifndef _XEN_PUBLIC_SHYPE_H
+#define _XEN_PUBLIC_SHYPE_H
+
+#include "xen.h"
+#include "sched_ctl.h"
+
+/* if ACM_DEBUG defined, all hooks should
+ * print a short trace message (comment it out
+ * when not in testing mode )
+ */
+/* #define ACM_DEBUG */
+
+#ifdef ACM_DEBUG
+# define printkd(fmt, args...) printk(fmt,## args)
+#else
+# define printkd(fmt, args...)
+#endif
+
+/* default ssid reference value if not supplied */
+#define ACM_DEFAULT_SSID 0xffffffff
+#define ACM_DEFAULT_LOCAL_SSID 0xffff
+
+/* Internal ACM ERROR types */
+#define ACM_OK 0
+#define ACM_UNDEF -1
+#define ACM_INIT_SSID_ERROR -2
+#define ACM_INIT_SOID_ERROR -3
+#define ACM_ERROR -4
+
+/* External ACCESS DECISIONS */
+#define ACM_ACCESS_PERMITTED 0
+#define ACM_ACCESS_DENIED -111
+#define ACM_NULL_POINTER_ERROR -200
+
+#define ACM_MAX_POLICY 3
+
+#define ACM_NULL_POLICY 0
+#define ACM_CHINESE_WALL_POLICY 1
+#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
+#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY 3
+
+/* policy: */
+#define ACM_POLICY_NAME(X) \
+ (X == ACM_NULL_POLICY) ? "NULL policy" : \
+ (X == ACM_CHINESE_WALL_POLICY) ? "CHINESE WALL policy" : \
+ (X == ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY) ? "SIMPLE TYPE ENFORCEMENT policy" : \
+ (X == ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT policy" : \
+ "UNDEFINED policy"
+
+#ifndef ACM_USE_SECURITY_POLICY
+#define ACM_USE_SECURITY_POLICY ACM_NULL_POLICY
+#endif
+
+/* defines a ssid reference used by xen */
+typedef u32 ssidref_t;
+
+/* -------security policy relevant type definitions-------- */
+
+/* type identifier; compares to "equal" or "not equal" */
+typedef u16 domaintype_t;
+
+/* CHINESE WALL POLICY DATA STRUCTURES
+ *
+ * current accumulated conflict type set:
+ * When a domain is started and has a type that is in
+ * a conflict set, the conflicting types are incremented in
+ * the aggregate set. When a domain is destroyed, the
+ * conflicting types to its type are decremented.
+ * If a domain has multiple types, this procedure works over
+ * all those types.
+ *
+ * conflict_aggregate_set[i] holds the number of
+ * running domains that have a conflict with type i.
+ *
+ * running_types[i] holds the number of running domains
+ * that include type i in their ssidref-referenced type set
+ *
+ * conflict_sets[i][j] is "0" if type j has no conflict
+ * with type i and is "1" otherwise.
+ */
+/* high-16 = version, low-16 = check magic */
+#define ACM_MAGIC 0x0001debc
+
+/* each offset in bytes from start of the struct they
+ * the are part of */
+/* each buffer consists of all policy information for
+ * the respective policy given in the policy code
+ */
+struct acm_policy_buffer {
+ u32 magic;
+ u32 policyversion;
+ u32 len;
+ u16 primary_policy_code;
+ u16 primary_buffer_offset;
+ u16 secondary_policy_code;
+ u16 secondary_buffer_offset;
+};
+
+struct acm_chwall_policy_buffer {
+ u16 policy_code;
+ u16 chwall_max_types;
+ u16 chwall_max_ssidrefs;
+ u16 chwall_max_conflictsets;
+ u16 chwall_ssid_offset;
+ u16 chwall_conflict_sets_offset;
+ u16 chwall_running_types_offset;
+ u16 chwall_conflict_aggregate_offset;
+};
+
+struct acm_ste_policy_buffer {
+ u16 policy_code;
+ u16 ste_max_types;
+ u16 ste_max_ssidrefs;
+ u16 ste_ssid_offset;
+};
+
+struct acm_stats_buffer {
+ u32 magic;
+ u32 policyversion;
+ u32 len;
+ u16 primary_policy_code;
+ u16 primary_stats_offset;
+ u16 secondary_policy_code;
+ u16 secondary_stats_offset;
+};
+
+struct acm_ste_stats_buffer {
+ u32 ec_eval_count;
+ u32 gt_eval_count;
+ u32 ec_denied_count;
+ u32 gt_denied_count;
+ u32 ec_cachehit_count;
+ u32 gt_cachehit_count;
+};
+
+
+#endif
diff --git a/xen/include/public/acm_dom0_setup.h b/xen/include/public/acm_dom0_setup.h
new file mode 100644
index 0000000000..6604156ccf
--- /dev/null
+++ b/xen/include/public/acm_dom0_setup.h
@@ -0,0 +1,34 @@
+/****************************************************************
+ * acm_dom0_setup.h
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Author:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ *
+ * Includes necessary definitions to bring-up dom0
+ */
+#include <acm/acm_hooks.h>
+
+extern int acm_init(void);
+
+#if (ACM_USE_SECURITY_POLICY == ACM_NULL_POLICY)
+
+static inline void acm_post_domain0_create(domid_t domid)
+{
+ return;
+}
+
+#else
+
+/* predefined ssidref for DOM0 used by xen when creating DOM0 */
+#define ACM_DOM0_SSIDREF 0
+
+static inline void acm_post_domain0_create(domid_t domid)
+{
+ /* initialialize shared sHype security labels for new domain */
+ acm_init_domain_ssid(domid, ACM_DOM0_SSIDREF);
+ acm_post_domain_create(domid, ACM_DOM0_SSIDREF);
+}
+
+#endif
diff --git a/xen/include/public/arch-ia64.h b/xen/include/public/arch-ia64.h
index ec00554959..cd259c2e04 100644
--- a/xen/include/public/arch-ia64.h
+++ b/xen/include/public/arch-ia64.h
@@ -14,11 +14,41 @@
#define _MEMORY_PADDING(_X)
#define MEMORY_PADDING
+/* Maximum number of virtual CPUs in multi-processor guests. */
+/* WARNING: before changing this, check that shared_info fits on a page */
+#define MAX_VIRT_CPUS 1
+
#ifndef __ASSEMBLY__
/* NB. Both the following are 64 bits each. */
typedef unsigned long memory_t; /* Full-sized pointer/address/memory-size. */
+#define MAX_NR_SECTION 32 // at most 32 memory holes
+typedef struct {
+ unsigned long start; /* start of memory hole */
+ unsigned long end; /* end of memory hole */
+} mm_section_t;
+
+typedef struct {
+ unsigned long mfn : 56;
+ unsigned long type: 8;
+} pmt_entry_t;
+
+#define GPFN_MEM (0UL << 56) /* Guest pfn is normal mem */
+#define GPFN_FRAME_BUFFER (1UL << 56) /* VGA framebuffer */
+#define GPFN_LOW_MMIO (2UL << 56) /* Low MMIO range */
+#define GPFN_PIB (3UL << 56) /* PIB base */
+#define GPFN_IOSAPIC (4UL << 56) /* IOSAPIC base */
+#define GPFN_LEGACY_IO (5UL << 56) /* Legacy I/O base */
+#define GPFN_GFW (6UL << 56) /* Guest Firmware */
+#define GPFN_HIGH_MMIO (7UL << 56) /* High MMIO range */
+
+#define GPFN_IO_MASK (7UL << 56) /* Guest pfn is I/O type */
+#define GPFN_INV_MASK (31UL << 59) /* Guest pfn is invalid */
+
+#define INVALID_MFN (~0UL)
+
+
typedef struct
{
} PACKED cpu_user_regs;
@@ -28,11 +58,99 @@ typedef struct
* structure size will still be 8 bytes, so no other alignments will change.
*/
typedef struct {
- u32 tsc_bits; /* 0: 32 bits read from the CPU's TSC. */
- u32 tsc_bitshift; /* 4: 'tsc_bits' uses N:N+31 of TSC. */
+ unsigned int tsc_bits; /* 0: 32 bits read from the CPU's TSC. */
+ unsigned int tsc_bitshift; /* 4: 'tsc_bits' uses N:N+31 of TSC. */
} PACKED tsc_timestamp_t; /* 8 bytes */
-#include <asm/tlb.h> /* TR_ENTRY */
+struct pt_fpreg {
+ union {
+ unsigned long bits[2];
+ long double __dummy; /* force 16-byte alignment */
+ } u;
+};
+
+struct pt_regs {
+ /* The following registers are saved by SAVE_MIN: */
+ unsigned long b6; /* scratch */
+ unsigned long b7; /* scratch */
+
+ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
+ unsigned long ar_ssd; /* reserved for future use (scratch) */
+
+ unsigned long r8; /* scratch (return value register 0) */
+ unsigned long r9; /* scratch (return value register 1) */
+ unsigned long r10; /* scratch (return value register 2) */
+ unsigned long r11; /* scratch (return value register 3) */
+
+ unsigned long cr_ipsr; /* interrupted task's psr */
+ unsigned long cr_iip; /* interrupted task's instruction pointer */
+ unsigned long cr_ifs; /* interrupted task's function state */
+
+ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
+ unsigned long ar_pfs; /* prev function state */
+ unsigned long ar_rsc; /* RSE configuration */
+ /* The following two are valid only if cr_ipsr.cpl > 0: */
+ unsigned long ar_rnat; /* RSE NaT */
+ unsigned long ar_bspstore; /* RSE bspstore */
+
+ unsigned long pr; /* 64 predicate registers (1 bit each) */
+ unsigned long b0; /* return pointer (bp) */
+ unsigned long loadrs; /* size of dirty partition << 16 */
+
+ unsigned long r1; /* the gp pointer */
+ unsigned long r12; /* interrupted task's memory stack pointer */
+ unsigned long r13; /* thread pointer */
+
+ unsigned long ar_fpsr; /* floating point status (preserved) */
+ unsigned long r15; /* scratch */
+
+ /* The remaining registers are NOT saved for system calls. */
+
+ unsigned long r14; /* scratch */
+ unsigned long r2; /* scratch */
+ unsigned long r3; /* scratch */
+
+#ifdef CONFIG_VTI
+ unsigned long r4; /* preserved */
+ unsigned long r5; /* preserved */
+ unsigned long r6; /* preserved */
+ unsigned long r7; /* preserved */
+ unsigned long cr_iipa; /* for emulation */
+ unsigned long cr_isr; /* for emulation */
+ unsigned long eml_unat; /* used for emulating instruction */
+ unsigned long rfi_pfs; /* used for elulating rfi */
+#endif
+
+ /* The following registers are saved by SAVE_REST: */
+ unsigned long r16; /* scratch */
+ unsigned long r17; /* scratch */
+ unsigned long r18; /* scratch */
+ unsigned long r19; /* scratch */
+ unsigned long r20; /* scratch */
+ unsigned long r21; /* scratch */
+ unsigned long r22; /* scratch */
+ unsigned long r23; /* scratch */
+ unsigned long r24; /* scratch */
+ unsigned long r25; /* scratch */
+ unsigned long r26; /* scratch */
+ unsigned long r27; /* scratch */
+ unsigned long r28; /* scratch */
+ unsigned long r29; /* scratch */
+ unsigned long r30; /* scratch */
+ unsigned long r31; /* scratch */
+
+ unsigned long ar_ccv; /* compare/exchange value (scratch) */
+
+ /*
+ * Floating point registers that the kernel considers scratch:
+ */
+ struct pt_fpreg f6; /* scratch */
+ struct pt_fpreg f7; /* scratch */
+ struct pt_fpreg f8; /* scratch */
+ struct pt_fpreg f9; /* scratch */
+ struct pt_fpreg f10; /* scratch */
+ struct pt_fpreg f11; /* scratch */
+};
typedef struct {
unsigned long ipsr;
@@ -64,18 +182,20 @@ typedef struct {
unsigned long krs[8]; // kernel registers
unsigned long pkrs[8]; // protection key registers
unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
-//} PACKED arch_shared_info_t;
+//} PACKED arch_vcpu_info_t;
} arch_vcpu_info_t; // DON'T PACK
typedef struct {
+ int evtchn_vector;
+ int domain_controller_evtchn;
+ unsigned int flags;
+//} PACKED arch_shared_info_t;
} arch_shared_info_t; // DON'T PACK
-/*
- * The following is all CPU context. Note that the i387_ctxt block is filled
- * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
- */
typedef struct vcpu_guest_context {
- //unsigned long flags;
+ struct pt_regs regs;
+ arch_vcpu_info_t vcpu;
+ arch_shared_info_t shared;
} PACKED vcpu_guest_context_t;
#endif /* !__ASSEMBLY__ */
diff --git a/xen/include/public/arch-x86_32.h b/xen/include/public/arch-x86_32.h
index 21f97669d5..1a11a3be86 100644
--- a/xen/include/public/arch-x86_32.h
+++ b/xen/include/public/arch-x86_32.h
@@ -73,6 +73,9 @@
#define machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
#endif
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS 32
+
#ifndef __ASSEMBLY__
/* NB. Both the following are 32 bits each. */
diff --git a/xen/include/public/arch-x86_64.h b/xen/include/public/arch-x86_64.h
index a4f4ac2fcf..634c53a34e 100644
--- a/xen/include/public/arch-x86_64.h
+++ b/xen/include/public/arch-x86_64.h
@@ -73,6 +73,9 @@
#define HYPERVISOR_VIRT_END (0xFFFF880000000000UL)
#endif
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS 32
+
#ifndef __ASSEMBLY__
/* The machine->physical mapping table starts at this address, read-only. */
diff --git a/xen/include/public/dom0_ops.h b/xen/include/public/dom0_ops.h
index 0768b8c6ae..3ff82b43ac 100644
--- a/xen/include/public/dom0_ops.h
+++ b/xen/include/public/dom0_ops.h
@@ -43,6 +43,8 @@ typedef struct sched_adjdom_cmd dom0_adjustdom_t;
#define DOM0_CREATEDOMAIN 8
typedef struct {
+ /* IN parameters */
+ u32 ssidref;
/* IN/OUT parameters. */
/* Identifier for new domain (auto-allocate if zero is specified). */
domid_t domain;
@@ -88,6 +90,7 @@ typedef struct {
u32 n_vcpu;
s32 vcpu_to_cpu[MAX_VIRT_CPUS]; /* current mapping */
cpumap_t cpumap[MAX_VIRT_CPUS]; /* allowable mapping */
+ u32 ssidref;
} dom0_getdomaininfo_t;
#define DOM0_SETDOMAININFO 13
diff --git a/xen/include/public/policy_ops.h b/xen/include/public/policy_ops.h
new file mode 100644
index 0000000000..6b55f764b1
--- /dev/null
+++ b/xen/include/public/policy_ops.h
@@ -0,0 +1,74 @@
+/******************************************************************************
+ * policy_ops.h
+ *
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Author:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * Process policy command requests from guest OS.
+ * access checked by policy; not restricted to DOM0
+ *
+ */
+
+
+#ifndef __XEN_PUBLIC_POLICY_OPS_H__
+#define __XEN_PUBLIC_POLICY_OPS_H__
+
+#include "xen.h"
+#include "sched_ctl.h"
+
+/*
+ * Make sure you increment the interface version whenever you modify this file!
+ * This makes sure that old versions of policy tools will stop working in a
+ * well-defined way (rather than crashing the machine, for instance).
+ */
+#define POLICY_INTERFACE_VERSION 0xAAAA0001
+
+/************************************************************************/
+
+#define POLICY_SETPOLICY 4
+typedef struct {
+ /* IN variables. */
+ u16 policy_type;
+ u16 padding1;
+ /* OUT variables */
+ void *pushcache;
+ u16 pushcache_size;
+} PACKED policy_setpolicy_t;
+
+
+#define POLICY_GETPOLICY 5
+typedef struct {
+ /* IN variables. */
+ u16 policy_type;
+ u16 padding1;
+ /* OUT variables */
+ void *pullcache;
+ u16 pullcache_size;
+} PACKED policy_getpolicy_t;
+
+#define POLICY_DUMPSTATS 6
+typedef struct {
+ void *pullcache;
+ u16 pullcache_size;
+} PACKED policy_dumpstats_t;
+
+
+typedef struct {
+ u32 cmd; /* 0 */
+ u32 interface_version; /* 4 */ /* POLICY_INTERFACE_VERSION */
+ union { /* 8 */
+ u32 dummy[14]; /* 72bytes */
+ policy_setpolicy_t setpolicy;
+ policy_getpolicy_t getpolicy;
+ policy_dumpstats_t dumpstats;
+ } PACKED u;
+} PACKED policy_op_t; /* 80 bytes */
+
+#endif /* __XEN_PUBLIC_POLICY_OPS_H__ */
diff --git a/xen/include/public/version.h b/xen/include/public/version.h
new file mode 100644
index 0000000000..1860d061da
--- /dev/null
+++ b/xen/include/public/version.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+ * version.h
+ *
+ * Xen version, type, and compile information.
+ *
+ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com>
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_VERSION_H__
+#define __XEN_PUBLIC_VERSION_H__
+
+/* NB. All ops return zero on success, except XENVER_version. */
+
+/* arg == NULL; returns major:minor (16:16). */
+#define XENVER_version 0
+
+/* arg == 16-char string buffer. */
+#define XENVER_extraversion 1
+
+/* arg == xenversion_compile_info_t. */
+#define XENVER_compile_info 2
+typedef struct xen_compile_info {
+ char compiler[64];
+ char compile_by[16];
+ char compile_domain[32];
+ char compile_date[32];
+} xen_compile_info_t;
+
+#endif /* __XEN_PUBLIC_VERSION_H__ */
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index d46472c16c..8b183491a6 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -58,6 +58,7 @@
#define __HYPERVISOR_boot_vcpu 24
#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
#define __HYPERVISOR_mmuext_op 26
+#define __HYPERVISOR_policy_op 27
/*
* VIRTUAL INTERRUPTS
@@ -287,9 +288,6 @@ typedef struct
/* Event channel endpoints per domain. */
#define NR_EVENT_CHANNELS 1024
-/* Support for multi-processor guests. */
-#define MAX_VIRT_CPUS 32
-
/*
* Per-VCPU information goes here. This will be cleaned up more when Xen
* actually supports multi-VCPU guests.
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 734427266b..05683344ca 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -13,6 +13,7 @@
#include <xen/sched.h>
#include <xen/smp.h>
#include <asm/bitops.h>
+#include <asm/event.h>
/*
* EVENT-CHANNEL NOTIFICATIONS
@@ -34,6 +35,7 @@ static inline void evtchn_set_pending(struct vcpu *v, int port)
{
/* The VCPU pending flag must be set /after/ update to evtchn-pend. */
set_bit(0, &v->vcpu_info->evtchn_upcall_pending);
+ evtchn_notify(v);
/*
* NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 35a3c36cab..7649f1a450 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -137,6 +137,8 @@ struct domain
cpumask_t cpumask;
struct arch_domain arch;
+
+ void *ssid; /* sHype security subject identifier */
};
struct domain_setup_info
diff --git a/xen/include/xen/smp.h b/xen/include/xen/smp.h
index 2004211589..57f7580ade 100644
--- a/xen/include/xen/smp.h
+++ b/xen/include/xen/smp.h
@@ -58,8 +58,6 @@ static inline int on_each_cpu(void (*func) (void *info), void *info,
return ret;
}
-extern int ht_per_core;
-
extern volatile unsigned long smp_msg_data;
extern volatile int smp_src_cpu;
extern volatile int smp_msg_id;
diff --git a/xen/include/xen/string.h b/xen/include/xen/string.h
index 384ee2cfce..0c6dd612ad 100644
--- a/xen/include/xen/string.h
+++ b/xen/include/xen/string.h
@@ -81,4 +81,9 @@ extern void * memchr(const void *,int,__kernel_size_t);
}
#endif
+#define safe_strcpy(d,s) \
+do { strncpy((d),(s),sizeof((d))); \
+ (d)[sizeof((d))-1] = '\0'; \
+} while (0)
+
#endif /* _LINUX_STRING_H_ */