aboutsummaryrefslogtreecommitdiffstats
path: root/package/libs/openssl/Makefile
blob: eb267f31f07afbb27204febb687c4668cc889a52 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
#
# Copyright (C) 2006-2016 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#

include $(TOPDIR)/rules.mk

PKG_NAME:=openssl
PKG_BASE:=1.1.1
PKG_BUGFIX:=d
PKG_VERSION:=$(PKG_BASE)$(PKG_BUGFIX)
PKG_RELEASE:=2
PKG_USE_MIPS16:=0
ENGINES_DIR=engines-1.1

PKG_BUILD_PARALLEL:=1

PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:= \
	http://ftp.fi.muni.cz/pub/openssl/source/ \
	http://ftp.linux.hr/pub/openssl/source/ \
	ftp://ftp.pca.dfn.de/pub/tools/net/openssl/source/ \
	http://www.openssl.org/source/ \
	http://www.openssl.org/source/old/$(PKG_BASE)/
PKG_HASH:=1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2

PKG_LICENSE:=OpenSSL
PKG_LICENSE_FILES:=LICENSE
PKG_MAINTAINER:=Eneas U de Queiroz <cotequeiroz@gmail.com>
PKG_CPE_ID:=cpe:/a:openssl:openssl
PKG_CONFIG_DEPENDS:= \
	CONFIG_OPENSSL_ENGINE \
	CONFIG_OPENSSL_ENGINE_BUILTIN \
	CONFIG_OPENSSL_ENGINE_BUILTIN_AFALG \
	CONFIG_OPENSSL_ENGINE_BUILTIN_DEVCRYPTO \
	CONFIG_OPENSSL_ENGINE_BUILTIN_PADLOCK \
	CONFIG_OPENSSL_NO_DEPRECATED \
	CONFIG_OPENSSL_OPTIMIZE_SPEED \
	CONFIG_OPENSSL_PREFER_CHACHA_OVER_GCM \
	CONFIG_OPENSSL_WITH_ARIA \
	CONFIG_OPENSSL_WITH_ASM \
	CONFIG_OPENSSL_WITH_ASYNC \
	CONFIG_OPENSSL_WITH_BLAKE2 \
	CONFIG_OPENSSL_WITH_CAMELLIA \
	CONFIG_OPENSSL_WITH_CHACHA_POLY1305 \
	CONFIG_OPENSSL_WITH_CMS \
	CONFIG_OPENSSL_WITH_COMPRESSION \
	CONFIG_OPENSSL_WITH_DTLS \
	CONFIG_OPENSSL_WITH_EC2M \
	CONFIG_OPENSSL_WITH_ERROR_MESSAGES \
	CONFIG_OPENSSL_WITH_GOST \
	CONFIG_OPENSSL_WITH_IDEA \
	CONFIG_OPENSSL_WITH_MDC2 \
	CONFIG_OPENSSL_WITH_NPN \
	CONFIG_OPENSSL_WITH_PSK \
	CONFIG_OPENSSL_WITH_RFC3779 \
	CONFIG_OPENSSL_WITH_SEED \
	CONFIG_OPENSSL_WITH_SM234 \
	CONFIG_OPENSSL_WITH_SRP \
	CONFIG_OPENSSL_WITH_SSE2 \
	CONFIG_OPENSSL_WITH_TLS13 \
	CONFIG_OPENSSL_WITH_WHIRLPOOL

include $(INCLUDE_DIR)/package.mk

ifneq ($(CONFIG_CCACHE),)
HOSTCC=$(HOSTCC_NOCACHE)
HOSTCXX=$(HOSTCXX_NOCACHE)
endif

define Package/openssl/Default
  TITLE:=Open source SSL toolkit
  URL:=http://www.openssl.org/
  SECTION:=libs
  CATEGORY:=Libraries
endef

define Package/libopenssl/config
source "$(SOURCE)/Config.in"
endef

define Package/openssl/Default/description
The OpenSSL Project is a collaborative effort to develop a robust,
commercial-grade, full-featured, and Open Source toolkit implementing the
Transport Layer Security (TLS) protocol as well as a full-strength
general-purpose cryptography library.
endef

define Package/libopenssl
$(call Package/openssl/Default)
  SUBMENU:=SSL
  DEPENDS:=+OPENSSL_WITH_COMPRESSION:zlib \
	   +OPENSSL_ENGINE_BUILTIN_AFALG:kmod-crypto-user \
	   +OPENSSL_ENGINE_BUILTIN_DEVCRYPTO:kmod-cryptodev \
	   +OPENSSL_ENGINE_BUILTIN_PADLOCK:kmod-crypto-hw-padlock
  TITLE+= (libraries)
  ABI_VERSION:=1.1
  MENU:=1
endef

define Package/libopenssl/description
$(call Package/openssl/Default/description)
This package contains the OpenSSL shared libraries, needed by other programs.
endef

define Package/openssl-util
  $(call Package/openssl/Default)
  SECTION:=utils
  CATEGORY:=Utilities
  DEPENDS:=+libopenssl +libopenssl-conf
  TITLE+= (utility)
endef

define Package/openssl-util/description
$(call Package/openssl/Default/description)
This package contains the OpenSSL command-line utility.
endef

define Package/libopenssl-conf
  $(call Package/openssl/Default)
  SUBMENU:=SSL
  TITLE:=/etc/ssl/openssl.cnf config file
  DEPENDS:=libopenssl
endef

define Package/libopenssl-conf/conffiles
/etc/ssl/openssl.cnf
endef

define Package/libopenssl-conf/description
$(call Package/openssl/Default/description)
This package installs the OpenSSL configuration file /etc/ssl/openssl.cnf.
endef

define Package/libopenssl-afalg
  $(call Package/openssl/Default)
  SUBMENU:=SSL
  TITLE:=AFALG hardware acceleration engine
  DEPENDS:=libopenssl @OPENSSL_ENGINE @KERNEL_AIO \
	   +PACKAGE_libopenssl-afalg:kmod-crypto-user +libopenssl-conf @!OPENSSL_ENGINE_BUILTIN
endef

define Package/libopenssl-afalg/description
This package adds an engine that enables hardware acceleration
through the AF_ALG kernel interface.
To use it, you need to configure the engine in /etc/ssl/openssl.cnf
See https://www.openssl.org/docs/man1.1.1/man5/config.html#Engine-Configuration-Module
and https://openwrt.org/docs/techref/hardware/cryptographic.hardware.accelerators
The engine_id is "afalg"
endef

define Package/libopenssl-devcrypto
  $(call Package/openssl/Default)
  SUBMENU:=SSL
  TITLE:=/dev/crypto hardware acceleration engine
  DEPENDS:=libopenssl @OPENSSL_ENGINE +PACKAGE_libopenssl-devcrypto:kmod-cryptodev +libopenssl-conf \
	   @!OPENSSL_ENGINE_BUILTIN
endef

define Package/libopenssl-devcrypto/description
This package adds an engine that enables hardware acceleration
through the /dev/crypto kernel interface.
To use it, you need to configure the engine in /etc/ssl/openssl.cnf
See https://www.openssl.org/docs/man1.1.1/man5/config.html#Engine-Configuration-Module
and https://openwrt.org/docs/techref/hardware/cryptographic.hardware.accelerators
The engine_id is "devcrypto"
endef

define Package/libopenssl-padlock
  $(call Package/openssl/Default)
  SUBMENU:=SSL
  TITLE:=VIA Padlock hardware acceleration engine
  DEPENDS:=libopenssl @OPENSSL_ENGINE @TARGET_x86 +PACKAGE_libopenssl-padlock:kmod-crypto-hw-padlock \
	   +libopenssl-conf @!OPENSSL_ENGINE_BUILTIN
endef

define Package/libopenssl-padlock/description
This package adds an engine that enables VIA Padlock hardware acceleration.
To use it, you need to configure it in /etc/ssl/openssl.cnf.
See https://www.openssl.org/docs/man1.1.1/man5/config.html#Engine-Configuration-Module
and https://openwrt.org/docs/techref/hardware/cryptographic.hardware.accelerators
The engine_id is "padlock"
endef

OPENSSL_OPTIONS:= shared

ifndef CONFIG_OPENSSL_WITH_BLAKE2
  OPENSSL_OPTIONS += no-blake2
endif

ifndef CONFIG_OPENSSL_WITH_CHACHA_POLY1305
  OPENSSL_OPTIONS += no-chacha no-poly1305
else
  ifdef CONFIG_OPENSSL_PREFER_CHACHA_OVER_GCM
    OPENSSL_OPTIONS += -DOPENSSL_PREFER_CHACHA_OVER_GCM
  endif
endif

ifndef CONFIG_OPENSSL_WITH_ASYNC
  OPENSSL_OPTIONS += no-async
endif

ifndef CONFIG_OPENSSL_WITH_EC2M
  OPENSSL_OPTIONS += no-ec2m
endif

ifndef CONFIG_OPENSSL_WITH_ERROR_MESSAGES
  OPENSSL_OPTIONS += no-err
endif

ifndef CONFIG_OPENSSL_WITH_TLS13
  OPENSSL_OPTIONS += no-tls1_3
endif

ifndef CONFIG_OPENSSL_WITH_ARIA
  OPENSSL_OPTIONS += no-aria
endif

ifndef CONFIG_OPENSSL_WITH_SM234
  OPENSSL_OPTIONS += no-sm2 no-sm3 no-sm4
endif

ifndef CONFIG_OPENSSL_WITH_CAMELLIA
  OPENSSL_OPTIONS += no-camellia
endif

ifndef CONFIG_OPENSSL_WITH_IDEA
  OPENSSL_OPTIONS += no-idea
endif

ifndef CONFIG_OPENSSL_WITH_SEED
  OPENSSL_OPTIONS += no-seed
endif

ifndef CONFIG_OPENSSL_WITH_MDC2
  OPENSSL_OPTIONS += no-mdc2
endif

ifndef CONFIG_OPENSSL_WITH_WHIRLPOOL
  OPENSSL_OPTIONS += no-whirlpool
endif

ifndef CONFIG_OPENSSL_WITH_CMS
  OPENSSL_OPTIONS += no-cms
endif

ifndef CONFIG_OPENSSL_WITH_RFC3779
  OPENSSL_OPTIONS += no-rfc3779
endif

ifdef CONFIG_OPENSSL_NO_DEPRECATED
  OPENSSL_OPTIONS += no-deprecated
endif

ifeq ($(CONFIG_OPENSSL_OPTIMIZE_SPEED),y)
  TARGET_CFLAGS := $(filter-out -O%,$(TARGET_CFLAGS)) -O3
else
  OPENSSL_OPTIONS += -DOPENSSL_SMALL_FOOTPRINT
endif

ifdef CONFIG_OPENSSL_ENGINE
  ifdef CONFIG_OPENSSL_ENGINE_BUILTIN
    OPENSSL_OPTIONS += disable-dynamic-engine
    ifndef CONFIG_OPENSSL_ENGINE_BUILTIN_AFALG
      OPENSSL_OPTIONS += no-afalgeng
    endif
    ifdef CONFIG_OPENSSL_ENGINE_BUILTIN_DEVCRYPTO
      OPENSSL_OPTIONS += enable-devcryptoeng
    endif
    ifndef CONFIG_OPENSSL_ENGINE_BUILTIN_PADLOCK
      OPENSSL_OPTIONS += no-hw-padlock
    endif
  else
    ifdef CONFIG_PACKAGE_libopenssl-devcrypto
      OPENSSL_OPTIONS += enable-devcryptoeng
    endif
    ifndef CONFIG_PACKAGE_libopenssl-afalg
      OPENSSL_OPTIONS += no-afalgeng
    endif
    ifndef CONFIG_PACKAGE_libopenssl-padlock
      OPENSSL_OPTIONS += no-hw-padlock
    endif
  endif
else
  OPENSSL_OPTIONS += no-engine
endif

ifndef CONFIG_OPENSSL_WITH_GOST
  OPENSSL_OPTIONS += no-gost
endif

ifndef CONFIG_OPENSSL_WITH_DTLS
  OPENSSL_OPTIONS += no-dtls
endif

ifdef CONFIG_OPENSSL_WITH_COMPRESSION
  OPENSSL_OPTIONS += zlib-dynamic
else
  OPENSSL_OPTIONS += no-comp
endif

ifndef CONFIG_OPENSSL_WITH_NPN
  OPENSSL_OPTIONS += no-nextprotoneg
endif

ifndef CONFIG_OPENSSL_WITH_PSK
  OPENSSL_OPTIONS += no-psk
endif

ifndef CONFIG_OPENSSL_WITH_SRP
  OPENSSL_OPTIONS += no-srp
endif

ifndef CONFIG_OPENSSL_WITH_ASM
  OPENSSL_OPTIONS += no-asm
endif

ifdef CONFIG_i386
  ifndef CONFIG_OPENSSL_WITH_SSE2
    OPENSSL_OPTIONS += no-sse2
  endif
endif

OPENSSL_TARGET:=linux-$(call qstrip,$(CONFIG_ARCH))-openwrt

STAMP_CONFIGURED := $(STAMP_CONFIGURED)_$(shell echo $(OPENSSL_OPTIONS) | mkhash md5)

define Build/Configure
	(cd $(PKG_BUILD_DIR); \
		./Configure $(OPENSSL_TARGET) \
			--prefix=/usr \
			--libdir=lib \
			--openssldir=/etc/ssl \
			$(TARGET_CPPFLAGS) \
			$(TARGET_LDFLAGS) \
			$(OPENSSL_OPTIONS) && \
		{ [ -f $(STAMP_CONFIGURED) ] || make clean; } \
	)
endef

TARGET_CFLAGS += $(FPIC) -ffunction-sections -fdata-sections
TARGET_LDFLAGS += -Wl,--gc-sections

define Build/Compile
	+$(MAKE) $(PKG_JOBS) -C $(PKG_BUILD_DIR) \
		CROSS_COMPILE="$(TARGET_CROSS)" \
		CC="$(TARGET_CC)" \
		SOURCE_DATE_EPOCH=$(SOURCE_DATE_EPOCH) \
		OPENWRT_OPTIMIZATION_FLAGS="$(TARGET_CFLAGS)" \
		$(OPENSSL_MAKEFLAGS) \
		all
	$(MAKE) -C $(PKG_BUILD_DIR) \
		CROSS_COMPILE="$(TARGET_CROSS)" \
		CC="$(TARGET_CC)" \
		DESTDIR="$(PKG_INSTALL_DIR)" \
		$(OPENSSL_MAKEFLAGS) \
		install_sw install_ssldirs
endef

define Build/InstallDev
	$(INSTALL_DIR) $(1)/usr/include
	$(CP) $(PKG_INSTALL_DIR)/usr/include/openssl $(1)/usr/include/
	$(INSTALL_DIR) $(1)/usr/lib/
	$(CP) $(PKG_INSTALL_DIR)/usr/lib/lib{crypto,ssl}.{a,so*} $(1)/usr/lib/
	$(INSTALL_DIR) $(1)/usr/lib/pkgconfig
	$(CP) $(PKG_INSTALL_DIR)/usr/lib/pkgconfig/{openssl,libcrypto,libssl}.pc $(1)/usr/lib/pkgconfig/
	[ -n "$(TARGET_LDFLAGS)" ] && $(SED) 's#$(TARGET_LDFLAGS)##g' $(1)/usr/lib/pkgconfig/{openssl,libcrypto,libssl}.pc || true
endef

define Package/libopenssl/install
	$(INSTALL_DIR) $(1)/etc/ssl/certs
	$(INSTALL_DIR) $(1)/etc/ssl/private
	chmod 0700 $(1)/etc/ssl/private
	$(INSTALL_DIR) $(1)/usr/lib
	$(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/lib/libcrypto.so.* $(1)/usr/lib/
	$(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/lib/libssl.so.* $(1)/usr/lib/
	$(if $(CONFIG_OPENSSL_ENGINE),$(INSTALL_DIR) $(1)/usr/lib/$(ENGINES_DIR))
endef

define Package/libopenssl-conf/install
	$(INSTALL_DIR) $(1)/etc/ssl
	$(CP) $(PKG_INSTALL_DIR)/etc/ssl/openssl.cnf $(1)/etc/ssl/
endef

define Package/openssl-util/install
	$(INSTALL_DIR) $(1)/usr/bin
	$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/openssl $(1)/usr/bin/
endef

define Package/libopenssl-afalg/install
	$(INSTALL_DIR) $(1)/usr/lib/$(ENGINES_DIR)
	$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/lib/$(ENGINES_DIR)/afalg.so $(1)/usr/lib/$(ENGINES_DIR)
endef

define Package/libopenssl-devcrypto/install
	$(INSTALL_DIR) $(1)/usr/lib/$(ENGINES_DIR)
	$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/lib/$(ENGINES_DIR)/devcrypto.so $(1)/usr/lib/$(ENGINES_DIR)
endef

define Package/libopenssl-padlock/install
	$(INSTALL_DIR) $(1)/usr/lib/$(ENGINES_DIR)
	$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/lib/$(ENGINES_DIR)/*padlock.so $(1)/usr/lib/$(ENGINES_DIR)
endef

$(eval $(call BuildPackage,libopenssl))
$(eval $(call BuildPackage,libopenssl-conf))
$(eval $(call BuildPackage,libopenssl-afalg))
$(eval $(call BuildPackage,libopenssl-devcrypto))
$(eval $(call BuildPackage,libopenssl-padlock))
$(eval $(call BuildPackage,openssl-util))
an class="cp"> #include <xen/event.h> #include <xen/kernel.h> #include <xen/keyhandler.h> #include <asm/shadow.h> #include <asm/tboot.h> static int __read_mostly opt_vpid_enabled = 1; boolean_param("vpid", opt_vpid_enabled); static int __read_mostly opt_unrestricted_guest_enabled = 1; boolean_param("unrestricted_guest", opt_unrestricted_guest_enabled); /* * These two parameters are used to config the controls for Pause-Loop Exiting: * ple_gap: upper bound on the amount of time between two successive * executions of PAUSE in a loop. * ple_window: upper bound on the amount of time a guest is allowed to execute * in a PAUSE loop. * Time is measured based on a counter that runs at the same rate as the TSC, * refer SDM volume 3b section 21.6.13 & 22.1.3. */ static unsigned int __read_mostly ple_gap = 128; integer_param("ple_gap", ple_gap); static unsigned int __read_mostly ple_window = 4096; integer_param("ple_window", ple_window); /* Dynamic (run-time adjusted) execution control flags. */ u32 vmx_pin_based_exec_control __read_mostly; u32 vmx_cpu_based_exec_control __read_mostly; u32 vmx_secondary_exec_control __read_mostly; u32 vmx_vmexit_control __read_mostly; u32 vmx_vmentry_control __read_mostly; u64 vmx_ept_vpid_cap __read_mostly; bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly; static DEFINE_PER_CPU_READ_MOSTLY(struct vmcs_struct *, vmxon_region); static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs); static DEFINE_PER_CPU(struct list_head, active_vmcs_list); static u32 vmcs_revision_id __read_mostly; static void __init vmx_display_features(void) { int printed = 0; printk("VMX: Supported advanced features:\n"); #define P(p,s) if ( p ) { printk(" - %s\n", s); printed = 1; } P(cpu_has_vmx_virtualize_apic_accesses, "APIC MMIO access virtualisation"); P(cpu_has_vmx_tpr_shadow, "APIC TPR shadow"); P(cpu_has_vmx_ept, "Extended Page Tables (EPT)"); P(cpu_has_vmx_vpid, "Virtual-Processor Identifiers (VPID)"); P(cpu_has_vmx_vnmi, "Virtual NMI"); P(cpu_has_vmx_msr_bitmap, "MSR direct-access bitmap"); P(cpu_has_vmx_unrestricted_guest, "Unrestricted Guest"); #undef P if ( !printed ) printk(" - none\n"); if ( cpu_has_vmx_ept_1gb ) printk("EPT supports 1GB super page.\n"); if ( cpu_has_vmx_ept_2mb ) printk("EPT supports 2MB super page.\n"); } static u32 adjust_vmx_controls( const char *name, u32 ctl_min, u32 ctl_opt, u32 msr, bool_t *mismatch) { u32 vmx_msr_low, vmx_msr_high, ctl = ctl_min | ctl_opt; rdmsr(msr, vmx_msr_low, vmx_msr_high); ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ /* Ensure minimum (required) set of control bits are supported. */ if ( ctl_min & ~ctl ) { *mismatch = 1; printk("VMX: CPU%d has insufficent %s (%08x but requires min %08x)\n", smp_processor_id(), name, ctl, ctl_min); } return ctl; } static bool_t cap_check(const char *name, u32 expected, u32 saw) { if ( saw != expected ) printk("VMX %s: saw 0x%08x expected 0x%08x\n", name, saw, expected); return saw != expected; } static int vmx_init_vmcs_config(void) { u32 vmx_basic_msr_low, vmx_basic_msr_high, min, opt; u32 _vmx_pin_based_exec_control; u32 _vmx_cpu_based_exec_control; u32 _vmx_secondary_exec_control = 0; u64 _vmx_ept_vpid_cap = 0; u32 _vmx_vmexit_control; u32 _vmx_vmentry_control; bool_t mismatch = 0; rdmsr(MSR_IA32_VMX_BASIC, vmx_basic_msr_low, vmx_basic_msr_high); min = (PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING); opt = PIN_BASED_VIRTUAL_NMIS; _vmx_pin_based_exec_control = adjust_vmx_controls( "Pin-Based Exec Control", min, opt, MSR_IA32_VMX_PINBASED_CTLS, &mismatch); min = (CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | CPU_BASED_MONITOR_EXITING | CPU_BASED_MWAIT_EXITING | CPU_BASED_MOV_DR_EXITING | CPU_BASED_ACTIVATE_IO_BITMAP | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_RDTSC_EXITING); opt = (CPU_BASED_ACTIVATE_MSR_BITMAP | CPU_BASED_TPR_SHADOW | CPU_BASED_MONITOR_TRAP_FLAG | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS); _vmx_cpu_based_exec_control = adjust_vmx_controls( "CPU-Based Exec Control", min, opt, MSR_IA32_VMX_PROCBASED_CTLS, &mismatch); _vmx_cpu_based_exec_control &= ~CPU_BASED_RDTSC_EXITING; #ifdef __x86_64__ if ( !(_vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW) ) { min |= CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING; _vmx_cpu_based_exec_control = adjust_vmx_controls( "CPU-Based Exec Control", min, opt, MSR_IA32_VMX_PROCBASED_CTLS, &mismatch); } #endif if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS ) { min = 0; opt = (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_WBINVD_EXITING | SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_ENABLE_RDTSCP | SECONDARY_EXEC_PAUSE_LOOP_EXITING); if ( opt_vpid_enabled ) opt |= SECONDARY_EXEC_ENABLE_VPID; if ( opt_unrestricted_guest_enabled ) opt |= SECONDARY_EXEC_UNRESTRICTED_GUEST; _vmx_secondary_exec_control = adjust_vmx_controls( "Secondary Exec Control", min, opt, MSR_IA32_VMX_PROCBASED_CTLS2, &mismatch); } /* The IA32_VMX_EPT_VPID_CAP MSR exists only when EPT or VPID available */ if ( _vmx_secondary_exec_control & (SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_ENABLE_VPID) ) { rdmsrl(MSR_IA32_VMX_EPT_VPID_CAP, _vmx_ept_vpid_cap); /* * Additional sanity checking before using EPT: * 1) the CPU we are running on must support EPT WB, as we will set * ept paging structures memory type to WB; * 2) the CPU must support the EPT page-walk length of 4 according to * Intel SDM 25.2.2. * 3) the CPU must support INVEPT all context invalidation, because we * will use it as final resort if other types are not supported. * * Or we just don't use EPT. */ if ( !(_vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_WB) || !(_vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED) || !(_vmx_ept_vpid_cap & VMX_EPT_INVEPT_ALL_CONTEXT) ) _vmx_secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; /* * the CPU must support INVVPID all context invalidation, because we * will use it as final resort if other types are not supported. * * Or we just don't use VPID. */ if ( !(_vmx_ept_vpid_cap & VMX_VPID_INVVPID_ALL_CONTEXT) ) _vmx_secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; } if ( _vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT ) { /* * To use EPT we expect to be able to clear certain intercepts. * We check VMX_BASIC_MSR[55] to correctly handle default1 controls. */ uint32_t must_be_one, must_be_zero, msr = MSR_IA32_VMX_PROCBASED_CTLS; if ( vmx_basic_msr_high & (1u << 23) ) msr = MSR_IA32_VMX_TRUE_PROCBASED_CTLS; rdmsr(msr, must_be_one, must_be_zero); if ( must_be_one & (CPU_BASED_INVLPG_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING) ) _vmx_secondary_exec_control &= ~(SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_UNRESTRICTED_GUEST); } if ( (_vmx_secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) && ple_gap == 0 ) { printk("Disable Pause-Loop Exiting.\n"); _vmx_secondary_exec_control &= ~ SECONDARY_EXEC_PAUSE_LOOP_EXITING; } #if defined(__i386__) /* If we can't virtualise APIC accesses, the TPR shadow is pointless. */ if ( !(_vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) ) _vmx_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; #endif min = VM_EXIT_ACK_INTR_ON_EXIT; opt = VM_EXIT_SAVE_GUEST_PAT | VM_EXIT_LOAD_HOST_PAT; #ifdef __x86_64__ min |= VM_EXIT_IA32E_MODE; #endif _vmx_vmexit_control = adjust_vmx_controls( "VMExit Control", min, opt, MSR_IA32_VMX_EXIT_CTLS, &mismatch); min = 0; opt = VM_ENTRY_LOAD_GUEST_PAT; _vmx_vmentry_control = adjust_vmx_controls( "VMEntry Control", min, opt, MSR_IA32_VMX_ENTRY_CTLS, &mismatch); if ( mismatch ) return -EINVAL; if ( !vmx_pin_based_exec_control ) { /* First time through. */ vmcs_revision_id = vmx_basic_msr_low; vmx_pin_based_exec_control = _vmx_pin_based_exec_control; vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control; vmx_secondary_exec_control = _vmx_secondary_exec_control; vmx_ept_vpid_cap = _vmx_ept_vpid_cap; vmx_vmexit_control = _vmx_vmexit_control; vmx_vmentry_control = _vmx_vmentry_control; cpu_has_vmx_ins_outs_instr_info = !!(vmx_basic_msr_high & (1U<<22)); vmx_display_features(); } else { /* Globals are already initialised: re-check them. */ mismatch |= cap_check( "VMCS revision ID", vmcs_revision_id, vmx_basic_msr_low); mismatch |= cap_check( "Pin-Based Exec Control", vmx_pin_based_exec_control, _vmx_pin_based_exec_control); mismatch |= cap_check( "CPU-Based Exec Control", vmx_cpu_based_exec_control, _vmx_cpu_based_exec_control); mismatch |= cap_check( "Secondary Exec Control", vmx_secondary_exec_control, _vmx_secondary_exec_control); mismatch |= cap_check( "VMExit Control", vmx_vmexit_control, _vmx_vmexit_control); mismatch |= cap_check( "VMEntry Control", vmx_vmentry_control, _vmx_vmentry_control); mismatch |= cap_check( "EPT and VPID Capability", vmx_ept_vpid_cap, _vmx_ept_vpid_cap); if ( cpu_has_vmx_ins_outs_instr_info != !!(vmx_basic_msr_high & (1U<<22)) ) { printk("VMX INS/OUTS Instruction Info: saw %d expected %d\n", !!(vmx_basic_msr_high & (1U<<22)), cpu_has_vmx_ins_outs_instr_info); mismatch = 1; } if ( mismatch ) { printk("VMX: Capabilities fatally differ between CPU%d and CPU0\n", smp_processor_id()); return -EINVAL; } } /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ if ( (vmx_basic_msr_high & 0x1fff) > PAGE_SIZE ) { printk("VMX: CPU%d VMCS size is too big (%u bytes)\n", smp_processor_id(), vmx_basic_msr_high & 0x1fff); return -EINVAL; } #ifdef __x86_64__ /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ if ( vmx_basic_msr_high & (1u<<16) ) { printk("VMX: CPU%d limits VMX structure pointers to 32 bits\n", smp_processor_id()); return -EINVAL; } #endif /* Require Write-Back (WB) memory type for VMCS accesses. */ if ( ((vmx_basic_msr_high >> 18) & 15) != 6 ) { printk("VMX: CPU%d has unexpected VMCS access type %u\n", smp_processor_id(), (vmx_basic_msr_high >> 18) & 15); return -EINVAL; } return 0; } static struct vmcs_struct *vmx_alloc_vmcs(void) { struct vmcs_struct *vmcs; if ( (vmcs = alloc_xenheap_page()) == NULL ) { gdprintk(XENLOG_WARNING, "Failed to allocate VMCS.\n"); return NULL; } clear_page(vmcs); vmcs->vmcs_revision_id = vmcs_revision_id; return vmcs; } static void vmx_free_vmcs(struct vmcs_struct *vmcs) { free_xenheap_page(vmcs); } static void __vmx_clear_vmcs(void *info) { struct vcpu *v = info; struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; /* Otherwise we can nest (vmx_cpu_down() vs. vmx_clear_vmcs()). */ ASSERT(!local_irq_is_enabled()); if ( arch_vmx->active_cpu == smp_processor_id() ) { __vmpclear(virt_to_maddr(arch_vmx->vmcs)); arch_vmx->active_cpu = -1; arch_vmx->launched = 0; list_del(&arch_vmx->active_list); if ( arch_vmx->vmcs == this_cpu(current_vmcs) ) this_cpu(current_vmcs) = NULL; } } static void vmx_clear_vmcs(struct vcpu *v) { int cpu = v->arch.hvm_vmx.active_cpu; if ( cpu != -1 ) on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1); } static void vmx_load_vmcs(struct vcpu *v) { unsigned long flags; local_irq_save(flags); if ( v->arch.hvm_vmx.active_cpu == -1 ) { list_add(&v->arch.hvm_vmx.active_list, &this_cpu(active_vmcs_list)); v->arch.hvm_vmx.active_cpu = smp_processor_id(); } ASSERT(v->arch.hvm_vmx.active_cpu == smp_processor_id()); __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs)); this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs; local_irq_restore(flags); } int vmx_cpu_up_prepare(unsigned int cpu) { if ( per_cpu(vmxon_region, cpu) != NULL ) return 0; per_cpu(vmxon_region, cpu) = vmx_alloc_vmcs(); if ( per_cpu(vmxon_region, cpu) != NULL ) return 0; printk("CPU%d: Could not allocate host VMCS\n", cpu); return -ENOMEM; } void vmx_cpu_dead(unsigned int cpu) { vmx_free_vmcs(per_cpu(vmxon_region, cpu)); per_cpu(vmxon_region, cpu) = NULL; } int vmx_cpu_up(void) { u32 eax, edx; int rc, bios_locked, cpu = smp_processor_id(); u64 cr0, vmx_cr0_fixed0, vmx_cr0_fixed1; BUG_ON(!(read_cr4() & X86_CR4_VMXE)); vmx_save_host_msrs(); /* * Ensure the current processor operating mode meets * the requred CRO fixed bits in VMX operation. */ cr0 = read_cr0(); rdmsrl(MSR_IA32_VMX_CR0_FIXED0, vmx_cr0_fixed0); rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx_cr0_fixed1); if ( (~cr0 & vmx_cr0_fixed0) || (cr0 & ~vmx_cr0_fixed1) ) { printk("CPU%d: some settings of host CR0 are " "not allowed in VMX operation.\n", cpu); return -EINVAL; } rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx); bios_locked = !!(eax & IA32_FEATURE_CONTROL_MSR_LOCK); if ( bios_locked ) { if ( !(eax & (tboot_in_measured_env() ? IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX : IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX)) ) { printk("CPU%d: VMX disabled by BIOS.\n", cpu); return -EINVAL; } } else { eax = IA32_FEATURE_CONTROL_MSR_LOCK; eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX; if ( test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) ) eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX; wrmsr(IA32_FEATURE_CONTROL_MSR, eax, 0); } if ( (rc = vmx_init_vmcs_config()) != 0 ) return rc; INIT_LIST_HEAD(&this_cpu(active_vmcs_list)); if ( (rc = vmx_cpu_up_prepare(cpu)) != 0 ) return rc; switch ( __vmxon(virt_to_maddr(this_cpu(vmxon_region))) ) { case -2: /* #UD or #GP */ if ( bios_locked && test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) && (!(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX) || !(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) ) { printk("CPU%d: VMXON failed: perhaps because of TXT settings " "in your BIOS configuration?\n", cpu); printk(" --> Disable TXT in your BIOS unless using a secure " "bootloader.\n"); return -EINVAL; } /* fall through */ case -1: /* CF==1 or ZF==1 */ printk("CPU%d: unexpected VMXON failure\n", cpu); return -EINVAL; case 0: /* success */ break; default: BUG(); } hvm_asid_init(cpu_has_vmx_vpid ? (1u << VMCS_VPID_WIDTH) : 0); if ( cpu_has_vmx_ept ) ept_sync_all(); if ( cpu_has_vmx_vpid ) vpid_sync_all(); return 0; } void vmx_cpu_down(void) { struct list_head *active_vmcs_list = &this_cpu(active_vmcs_list); unsigned long flags; local_irq_save(flags); while ( !list_empty(active_vmcs_list) ) __vmx_clear_vmcs(list_entry(active_vmcs_list->next, struct vcpu, arch.hvm_vmx.active_list)); BUG_ON(!(read_cr4() & X86_CR4_VMXE)); __vmxoff(); local_irq_restore(flags); } struct foreign_vmcs { struct vcpu *v; unsigned int count; }; static DEFINE_PER_CPU(struct foreign_vmcs, foreign_vmcs); void vmx_vmcs_enter(struct vcpu *v) { struct foreign_vmcs *fv; /* * NB. We must *always* run an HVM VCPU on its own VMCS, except for * vmx_vmcs_enter/exit critical regions. */ if ( likely(v == current) ) return; fv = &this_cpu(foreign_vmcs); if ( fv->v == v ) { BUG_ON(fv->count == 0); } else { BUG_ON(fv->v != NULL); BUG_ON(fv->count != 0); vcpu_pause(v); spin_lock(&v->arch.hvm_vmx.vmcs_lock); vmx_clear_vmcs(v); vmx_load_vmcs(v); fv->v = v; } fv->count++; } void vmx_vmcs_exit(struct vcpu *v) { struct foreign_vmcs *fv; if ( likely(v == current) ) return; fv = &this_cpu(foreign_vmcs); BUG_ON(fv->v != v); BUG_ON(fv->count == 0); if ( --fv->count == 0 ) { /* Don't confuse vmx_do_resume (for @v or @current!) */ vmx_clear_vmcs(v); if ( is_hvm_vcpu(current) ) vmx_load_vmcs(current); spin_unlock(&v->arch.hvm_vmx.vmcs_lock); vcpu_unpause(v); fv->v = NULL; } } struct xgt_desc { unsigned short size; unsigned long address __attribute__((packed)); }; static void vmx_set_host_env(struct vcpu *v) { unsigned int cpu = smp_processor_id(); __vmwrite(HOST_GDTR_BASE, (unsigned long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY)); __vmwrite(HOST_IDTR_BASE, (unsigned long)idt_tables[cpu]); __vmwrite(HOST_TR_SELECTOR, TSS_ENTRY << 3); __vmwrite(HOST_TR_BASE, (unsigned long)&per_cpu(init_tss, cpu)); __vmwrite(HOST_SYSENTER_ESP, get_stack_bottom()); /* * Skip end of cpu_user_regs when entering the hypervisor because the * CPU does not save context onto the stack. SS,RSP,CS,RIP,RFLAGS,etc * all get saved into the VMCS instead. */ __vmwrite(HOST_RSP, (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code); } void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr) { unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap; /* VMX MSR bitmap supported? */ if ( msr_bitmap == NULL ) return; /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */ if ( msr <= 0x1fff ) { __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */ __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */ } else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) { msr &= 0x1fff; __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */ __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */ } } static int construct_vmcs(struct vcpu *v) { struct domain *d = v->domain; uint16_t sysenter_cs; unsigned long sysenter_eip; u32 vmexit_ctl = vmx_vmexit_control; u32 vmentry_ctl = vmx_vmentry_control; vmx_vmcs_enter(v); /* VMCS controls. */ __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control); v->arch.hvm_vmx.exec_control = vmx_cpu_based_exec_control; if ( d->arch.vtsc ) v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING; v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control; /* Disable VPID for now: we decide when to enable it on VMENTER. */ v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; if ( paging_mode_hap(d) ) { v->arch.hvm_vmx.exec_control &= ~(CPU_BASED_INVLPG_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); } else { v->arch.hvm_vmx.secondary_exec_control &= ~(SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_UNRESTRICTED_GUEST); vmexit_ctl &= ~(VM_EXIT_SAVE_GUEST_PAT | VM_EXIT_LOAD_HOST_PAT); vmentry_ctl &= ~VM_ENTRY_LOAD_GUEST_PAT; } /* Do not enable Monitor Trap Flag unless start single step debug */ v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG; vmx_update_cpu_exec_control(v); __vmwrite(VM_EXIT_CONTROLS, vmexit_ctl); __vmwrite(VM_ENTRY_CONTROLS, vmentry_ctl); if ( cpu_has_vmx_ple ) { __vmwrite(PLE_GAP, ple_gap); __vmwrite(PLE_WINDOW, ple_window); } if ( cpu_has_vmx_secondary_exec_control ) __vmwrite(SECONDARY_VM_EXEC_CONTROL, v->arch.hvm_vmx.secondary_exec_control); /* MSR access bitmap. */ if ( cpu_has_vmx_msr_bitmap ) { unsigned long *msr_bitmap = alloc_xenheap_page(); if ( msr_bitmap == NULL ) return -ENOMEM; memset(msr_bitmap, ~0, PAGE_SIZE); v->arch.hvm_vmx.msr_bitmap = msr_bitmap; __vmwrite(MSR_BITMAP, virt_to_maddr(msr_bitmap)); vmx_disable_intercept_for_msr(v, MSR_FS_BASE); vmx_disable_intercept_for_msr(v, MSR_GS_BASE); vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS); vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP); vmx_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP); if ( cpu_has_vmx_pat && paging_mode_hap(d) ) vmx_disable_intercept_for_msr(v, MSR_IA32_CR_PAT); } /* I/O access bitmap. */ __vmwrite(IO_BITMAP_A, virt_to_maddr((char *)hvm_io_bitmap + 0)); __vmwrite(IO_BITMAP_B, virt_to_maddr((char *)hvm_io_bitmap + PAGE_SIZE)); /* Host data selectors. */ __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS); __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS); __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS); __vmwrite(HOST_FS_SELECTOR, 0); __vmwrite(HOST_GS_SELECTOR, 0); __vmwrite(HOST_FS_BASE, 0); __vmwrite(HOST_GS_BASE, 0); /* Host control registers. */ v->arch.hvm_vmx.host_cr0 = read_cr0() | X86_CR0_TS; __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0); __vmwrite(HOST_CR4, mmu_cr4_features | (cpu_has_xsave ? X86_CR4_OSXSAVE : 0)); /* Host CS:RIP. */ __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS); __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler); /* Host SYSENTER CS:RIP. */ rdmsrl(MSR_IA32_SYSENTER_CS, sysenter_cs); __vmwrite(HOST_SYSENTER_CS, sysenter_cs); rdmsrl(MSR_IA32_SYSENTER_EIP, sysenter_eip); __vmwrite(HOST_SYSENTER_EIP, sysenter_eip); /* MSR intercepts. */ __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0); __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); __vmwrite(VM_ENTRY_INTR_INFO, 0); __vmwrite(CR0_GUEST_HOST_MASK, ~0UL); __vmwrite(CR4_GUEST_HOST_MASK, ~0UL); __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0); __vmwrite(CR3_TARGET_COUNT, 0); __vmwrite(GUEST_ACTIVITY_STATE, 0); /* Guest segment bases. */ __vmwrite(GUEST_ES_BASE, 0); __vmwrite(GUEST_SS_BASE, 0); __vmwrite(GUEST_DS_BASE, 0); __vmwrite(GUEST_FS_BASE, 0); __vmwrite(GUEST_GS_BASE, 0); __vmwrite(GUEST_CS_BASE, 0); /* Guest segment limits. */ __vmwrite(GUEST_ES_LIMIT, ~0u); __vmwrite(GUEST_SS_LIMIT, ~0u); __vmwrite(GUEST_DS_LIMIT, ~0u); __vmwrite(GUEST_FS_LIMIT, ~0u); __vmwrite(GUEST_GS_LIMIT, ~0u); __vmwrite(GUEST_CS_LIMIT, ~0u); /* Guest segment AR bytes. */ __vmwrite(GUEST_ES_AR_BYTES, 0xc093); /* read/write, accessed */ __vmwrite(GUEST_SS_AR_BYTES, 0xc093); __vmwrite(GUEST_DS_AR_BYTES, 0xc093); __vmwrite(GUEST_FS_AR_BYTES, 0xc093); __vmwrite(GUEST_GS_AR_BYTES, 0xc093); __vmwrite(GUEST_CS_AR_BYTES, 0xc09b); /* exec/read, accessed */ /* Guest IDT. */ __vmwrite(GUEST_IDTR_BASE, 0); __vmwrite(GUEST_IDTR_LIMIT, 0); /* Guest GDT. */ __vmwrite(GUEST_GDTR_BASE, 0); __vmwrite(GUEST_GDTR_LIMIT, 0); /* Guest LDT. */ __vmwrite(GUEST_LDTR_AR_BYTES, 0x0082); /* LDT */ __vmwrite(GUEST_LDTR_SELECTOR, 0); __vmwrite(GUEST_LDTR_BASE, 0); __vmwrite(GUEST_LDTR_LIMIT, 0); /* Guest TSS. */ __vmwrite(GUEST_TR_AR_BYTES, 0x008b); /* 32-bit TSS (busy) */ __vmwrite(GUEST_TR_BASE, 0); __vmwrite(GUEST_TR_LIMIT, 0xff); __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); __vmwrite(GUEST_DR7, 0); __vmwrite(VMCS_LINK_POINTER, ~0UL); #if defined(__i386__) __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL); #endif v->arch.hvm_vmx.exception_bitmap = HVM_TRAP_MASK | (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault)) | (1U << TRAP_no_device); vmx_update_exception_bitmap(v); v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET; hvm_update_guest_cr(v, 0); v->arch.hvm_vcpu.guest_cr[4] = 0; hvm_update_guest_cr(v, 4); if ( cpu_has_vmx_tpr_shadow ) { __vmwrite(VIRTUAL_APIC_PAGE_ADDR, page_to_maddr(vcpu_vlapic(v)->regs_page)); __vmwrite(TPR_THRESHOLD, 0); } if ( paging_mode_hap(d) ) { __vmwrite(EPT_POINTER, d->arch.hvm_domain.vmx.ept_control.eptp); #ifdef __i386__ __vmwrite(EPT_POINTER_HIGH, d->arch.hvm_domain.vmx.ept_control.eptp >> 32); #endif } if ( cpu_has_vmx_vpid ) __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vcpu.asid); if ( cpu_has_vmx_pat && paging_mode_hap(d) ) { u64 host_pat, guest_pat; rdmsrl(MSR_IA32_CR_PAT, host_pat); guest_pat = MSR_IA32_CR_PAT_RESET; __vmwrite(HOST_PAT, host_pat); __vmwrite(GUEST_PAT, guest_pat); #ifdef __i386__ __vmwrite(HOST_PAT_HIGH, host_pat >> 32); __vmwrite(GUEST_PAT_HIGH, guest_pat >> 32); #endif } vmx_vmcs_exit(v); paging_update_paging_modes(v); /* will update HOST & GUEST_CR3 as reqd */ vmx_vlapic_msr_changed(v); return 0; } int vmx_read_guest_msr(u32 msr, u64 *val) { struct vcpu *curr = current; unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count; const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area; for ( i = 0; i < msr_count; i++ ) { if ( msr_area[i].index == msr ) { *val = msr_area[i].data; return 0; } } return -ESRCH; } int vmx_write_guest_msr(u32 msr, u64 val) { struct vcpu *curr = current; unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count; struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area; for ( i = 0; i < msr_count; i++ ) { if ( msr_area[i].index == msr ) { msr_area[i].data = val; return 0; } } return -ESRCH; } int vmx_add_guest_msr(u32 msr) { struct vcpu *curr = current; unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count; struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area; if ( msr_area == NULL ) { if ( (msr_area = alloc_xenheap_page()) == NULL ) return -ENOMEM; curr->arch.hvm_vmx.msr_area = msr_area; __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area)); __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area)); } for ( i = 0; i < msr_count; i++ ) if ( msr_area[i].index == msr ) return 0; if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) ) return -ENOSPC; msr_area[msr_count].index = msr; msr_area[msr_count].mbz = 0; msr_area[msr_count].data = 0; curr->arch.hvm_vmx.msr_count = ++msr_count; __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count); __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count); return 0; } int vmx_add_host_load_msr(u32 msr) { struct vcpu *curr = current; unsigned int i, msr_count = curr->arch.hvm_vmx.host_msr_count; struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area; if ( msr_area == NULL ) { if ( (msr_area = alloc_xenheap_page()) == NULL ) return -ENOMEM; curr->arch.hvm_vmx.host_msr_area = msr_area; __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area)); } for ( i = 0; i < msr_count; i++ ) if ( msr_area[i].index == msr ) return 0; if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) ) return -ENOSPC; msr_area[msr_count].index = msr; msr_area[msr_count].mbz = 0; rdmsrl(msr, msr_area[msr_count].data); curr->arch.hvm_vmx.host_msr_count = ++msr_count; __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count); return 0; } int vmx_create_vmcs(struct vcpu *v) { struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; int rc; if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL ) return -ENOMEM; INIT_LIST_HEAD(&arch_vmx->active_list); __vmpclear(virt_to_maddr(arch_vmx->vmcs)); arch_vmx->active_cpu = -1; arch_vmx->launched = 0; if ( (rc = construct_vmcs(v)) != 0 ) { vmx_free_vmcs(arch_vmx->vmcs); return rc; } return 0; } void vmx_destroy_vmcs(struct vcpu *v) { struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; vmx_clear_vmcs(v); vmx_free_vmcs(arch_vmx->vmcs); free_xenheap_page(v->arch.hvm_vmx.host_msr_area); free_xenheap_page(v->arch.hvm_vmx.msr_area); free_xenheap_page(v->arch.hvm_vmx.msr_bitmap); } void vm_launch_fail(void) { unsigned long error = __vmread(VM_INSTRUCTION_ERROR); printk("<vm_launch_fail> error code %lx\n", error); domain_crash_synchronous(); } void vm_resume_fail(void) { unsigned long error = __vmread(VM_INSTRUCTION_ERROR); printk("<vm_resume_fail> error code %lx\n", error); domain_crash_synchronous(); } static void wbinvd_ipi(void *info) { wbinvd(); } void vmx_do_resume(struct vcpu *v) { bool_t debug_state; if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() ) { if ( v->arch.hvm_vmx.vmcs != this_cpu(current_vmcs) ) vmx_load_vmcs(v); } else { /* * For pass-through domain, guest PCI-E device driver may leverage the * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space. * Since migration may occur before WBINVD or CLFLUSH, we need to * maintain data consistency either by: * 1: flushing cache (wbinvd) when the guest is scheduled out if * there is no wbinvd exit, or * 2: execute wbinvd on all dirty pCPUs when guest wbinvd exits. * If VT-d engine can force snooping, we don't need to do these. */ if ( has_arch_pdevs(v->domain) && !iommu_snoop && !cpu_has_wbinvd_exiting ) { int cpu = v->arch.hvm_vmx.active_cpu; if ( cpu != -1 ) on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1); } vmx_clear_vmcs(v); vmx_load_vmcs(v); hvm_migrate_timers(v); hvm_migrate_pirqs(v); vmx_set_host_env(v); hvm_asid_flush_vcpu(v); } debug_state = v->domain->debugger_attached; if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) ) { v->arch.hvm_vcpu.debug_state_latch = debug_state; vmx_update_debug_state(v); } hvm_do_resume(v); reset_stack_and_jump(vmx_asm_do_vmentry); } static unsigned long vmr(unsigned long field) { int rc; unsigned long val; val = __vmread_safe(field, &rc); return rc ? 0 : val; } static void vmx_dump_sel(char *name, uint32_t selector) { uint32_t sel, attr, limit; uint64_t base; sel = vmr(selector); attr = vmr(selector + (GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR)); limit = vmr(selector + (GUEST_ES_LIMIT - GUEST_ES_SELECTOR)); base = vmr(selector + (GUEST_ES_BASE - GUEST_ES_SELECTOR)); printk("%s: sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016"PRIx64"\n", name, sel, attr, limit, base); } static void vmx_dump_sel2(char *name, uint32_t lim) { uint32_t limit; uint64_t base; limit = vmr(lim); base = vmr(lim + (GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); printk("%s: limit=0x%08x, base=0x%016"PRIx64"\n", name, limit, base); } void vmcs_dump_vcpu(struct vcpu *v) { struct cpu_user_regs *regs = &v->arch.guest_context.user_regs; unsigned long long x; if ( v == current ) regs = guest_cpu_user_regs(); vmx_vmcs_enter(v); printk("*** Guest State ***\n"); printk("CR0: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n", (unsigned long long)vmr(GUEST_CR0), (unsigned long long)vmr(CR0_READ_SHADOW), (unsigned long long)vmr(CR0_GUEST_HOST_MASK)); printk("CR4: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n", (unsigned long long)vmr(GUEST_CR4), (unsigned long long)vmr(CR4_READ_SHADOW), (unsigned long long)vmr(CR4_GUEST_HOST_MASK)); printk("CR3: actual=0x%016llx, target_count=%d\n", (unsigned long long)vmr(GUEST_CR3), (int)vmr(CR3_TARGET_COUNT)); printk(" target0=%016llx, target1=%016llx\n", (unsigned long long)vmr(CR3_TARGET_VALUE0), (unsigned long long)vmr(CR3_TARGET_VALUE1)); printk(" target2=%016llx, target3=%016llx\n", (unsigned long long)vmr(CR3_TARGET_VALUE2), (unsigned long long)vmr(CR3_TARGET_VALUE3)); printk("RSP = 0x%016llx (0x%016llx) RIP = 0x%016llx (0x%016llx)\n", (unsigned long long)vmr(GUEST_RSP), (unsigned long long)regs->esp, (unsigned long long)vmr(GUEST_RIP), (unsigned long long)regs->eip); printk("RFLAGS=0x%016llx (0x%016llx) DR7 = 0x%016llx\n", (unsigned long long)vmr(GUEST_RFLAGS), (unsigned long long)regs->eflags, (unsigned long long)vmr(GUEST_DR7)); printk("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n", (unsigned long long)vmr(GUEST_SYSENTER_ESP), (int)vmr(GUEST_SYSENTER_CS), (unsigned long long)vmr(GUEST_SYSENTER_EIP)); vmx_dump_sel("CS", GUEST_CS_SELECTOR); vmx_dump_sel("DS", GUEST_DS_SELECTOR); vmx_dump_sel("SS", GUEST_SS_SELECTOR); vmx_dump_sel("ES", GUEST_ES_SELECTOR); vmx_dump_sel("FS", GUEST_FS_SELECTOR); vmx_dump_sel("GS", GUEST_GS_SELECTOR); vmx_dump_sel2("GDTR", GUEST_GDTR_LIMIT); vmx_dump_sel("LDTR", GUEST_LDTR_SELECTOR); vmx_dump_sel2("IDTR", GUEST_IDTR_LIMIT); vmx_dump_sel("TR", GUEST_TR_SELECTOR); printk("Guest PAT = 0x%08x%08x\n", (uint32_t)vmr(GUEST_PAT_HIGH), (uint32_t)vmr(GUEST_PAT)); x = (unsigned long long)vmr(TSC_OFFSET_HIGH) << 32; x |= (uint32_t)vmr(TSC_OFFSET); printk("TSC Offset = %016llx\n", x); x = (unsigned long long)vmr(GUEST_IA32_DEBUGCTL_HIGH) << 32; x |= (uint32_t)vmr(GUEST_IA32_DEBUGCTL); printk("DebugCtl=%016llx DebugExceptions=%016llx\n", x, (unsigned long long)vmr(GUEST_PENDING_DBG_EXCEPTIONS)); printk("Interruptibility=%04x ActivityState=%04x\n", (int)vmr(GUEST_INTERRUPTIBILITY_INFO), (int)vmr(GUEST_ACTIVITY_STATE)); printk("*** Host State ***\n"); printk("RSP = 0x%016llx RIP = 0x%016llx\n", (unsigned long long)vmr(HOST_RSP), (unsigned long long)vmr(HOST_RIP)); printk("CS=%04x DS=%04x ES=%04x FS=%04x GS=%04x SS=%04x TR=%04x\n", (uint16_t)vmr(HOST_CS_SELECTOR), (uint16_t)vmr(HOST_DS_SELECTOR), (uint16_t)vmr(HOST_ES_SELECTOR), (uint16_t)vmr(HOST_FS_SELECTOR), (uint16_t)vmr(HOST_GS_SELECTOR), (uint16_t)vmr(HOST_SS_SELECTOR), (uint16_t)vmr(HOST_TR_SELECTOR)); printk("FSBase=%016llx GSBase=%016llx TRBase=%016llx\n", (unsigned long long)vmr(HOST_FS_BASE), (unsigned long long)vmr(HOST_GS_BASE), (unsigned long long)vmr(HOST_TR_BASE)); printk("GDTBase=%016llx IDTBase=%016llx\n", (unsigned long long)vmr(HOST_GDTR_BASE), (unsigned long long)vmr(HOST_IDTR_BASE)); printk("CR0=%016llx CR3=%016llx CR4=%016llx\n", (unsigned long long)vmr(HOST_CR0), (unsigned long long)vmr(HOST_CR3), (unsigned long long)vmr(HOST_CR4)); printk("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n", (unsigned long long)vmr(HOST_SYSENTER_ESP), (int)vmr(HOST_SYSENTER_CS), (unsigned long long)vmr(HOST_SYSENTER_EIP)); printk("Host PAT = 0x%08x%08x\n", (uint32_t)vmr(HOST_PAT_HIGH), (uint32_t)vmr(HOST_PAT)); printk("*** Control State ***\n"); printk("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", (uint32_t)vmr(PIN_BASED_VM_EXEC_CONTROL), (uint32_t)vmr(CPU_BASED_VM_EXEC_CONTROL), (uint32_t)vmr(SECONDARY_VM_EXEC_CONTROL)); printk("EntryControls=%08x ExitControls=%08x\n", (uint32_t)vmr(VM_ENTRY_CONTROLS), (uint32_t)vmr(VM_EXIT_CONTROLS)); printk("ExceptionBitmap=%08x\n", (uint32_t)vmr(EXCEPTION_BITMAP)); printk("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", (uint32_t)vmr(VM_ENTRY_INTR_INFO), (uint32_t)vmr(VM_ENTRY_EXCEPTION_ERROR_CODE), (uint32_t)vmr(VM_ENTRY_INSTRUCTION_LEN)); printk("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", (uint32_t)vmr(VM_EXIT_INTR_INFO), (uint32_t)vmr(VM_EXIT_INTR_ERROR_CODE), (uint32_t)vmr(VM_ENTRY_INSTRUCTION_LEN)); printk(" reason=%08x qualification=%08x\n", (uint32_t)vmr(VM_EXIT_REASON), (uint32_t)vmr(EXIT_QUALIFICATION)); printk("IDTVectoring: info=%08x errcode=%08x\n", (uint32_t)vmr(IDT_VECTORING_INFO), (uint32_t)vmr(IDT_VECTORING_ERROR_CODE)); printk("TPR Threshold = 0x%02x\n", (uint32_t)vmr(TPR_THRESHOLD)); printk("EPT pointer = 0x%08x%08x\n", (uint32_t)vmr(EPT_POINTER_HIGH), (uint32_t)vmr(EPT_POINTER)); printk("Virtual processor ID = 0x%04x\n", (uint32_t)vmr(VIRTUAL_PROCESSOR_ID)); vmx_vmcs_exit(v); } static void vmcs_dump(unsigned char ch) { struct domain *d; struct vcpu *v; printk("*********** VMCS Areas **************\n"); rcu_read_lock(&domlist_read_lock); for_each_domain ( d ) { if ( !is_hvm_domain(d) ) continue; printk("\n>>> Domain %d <<<\n", d->domain_id); for_each_vcpu ( d, v ) { printk("\tVCPU %d\n", v->vcpu_id); vmcs_dump_vcpu(v); } } rcu_read_unlock(&domlist_read_lock); printk("**************************************\n"); } static struct keyhandler vmcs_dump_keyhandler = { .diagnostic = 1, .u.fn = vmcs_dump, .desc = "dump Intel's VMCS" }; void setup_vmcs_dump(void) { register_keyhandler('v', &vmcs_dump_keyhandler); } /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */