summaryrefslogtreecommitdiffstats
path: root/tboot/launch.S
blob: 1d244ab8673c5393d398ee6134438bc363eb3a74 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
/*
 * launch.S: the MLE post launch entry code.
 *
 * Copyright (c) 2017 Assured Information Security.
 *
 * Ross Philipson <philipsonr@ainfosec.com>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *   * Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *   * Redistributions in binary form must reproduce the above
 *     copyright notice, this list of conditions and the following
 *     disclaimer in the documentation and/or other materials provided
 *     with the distribution.
 *   * Neither the name of the Intel Corporation nor the names of its
 *     contributors may be used to endorse or promote products derived
 *     from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
 * OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#include <config.h>
#include <msr.h>
#include <page.h>
#include <processor.h>

/* TXT config regs addrs/offsets */
#define TXT_PRIV_CONFIG_REGS_BASE      0xfed20000
#define TXTCR_STS                      0x0000
#define TXTCR_ERRORCODE                0x0030
#define TXTCR_CMD_RESET                0x0038
#define TXTCR_CMD_UNLOCK_MEM_CONFIG    0x0218
#define TXTCR_HEAP_BASE                0x0300

/* OsSinitData field offsets */
#define MLE_PGTBL_OFFSET       8

/* 64b selectors */
#define CS_SEL64               0x0008
#define DS_SEL64               0x0010

/* errorcode for post-launch memory layout verfication failure */
#define LAYOUT_ERR             0xc0008001

#define BSP_STACK_SIZE         0x4000
#define AP_STACK_SIZE          0x0800

.section ".text"
	.align PAGE_SIZE, 0

.code32

.global _mle_start
_mle_start:

/* Original:
 * entry point post-launch, to verify memory layout
 * (must all be w/in one page; since _start is page-aligned, it will be;
 * which is why we can't call much other code (e.g. printk, TPM fns, etc.)
 * EFI:
 * this routine is on the first page of the .text section, page aligned and
 * far smaller than PAGE_SIZE.
 */
ENTRY(post_launch_entry)
        /*
	 * Per the spec:
	 * EBX - MLE entry point physical address.
	 * ECX - MLE page table base physical address.
	 *
	 * Restore the world, get back into longer mode. EBX contains the entry
	 * point which is our only known location in protected mode. We will
	 * use it to set things right then validate it later. If it is not
	 * what it is supposed to be, the world will come crashing down. Start
	 * by creating our page tables. Store it in ESI so EBX can be used.
	 *
	 * N.B. It seems though that TXT should guarantee the register contains
	 * what it says it should per the specification. Not sure why it would
	 * need verification.
	 *
	 * TODO if we end up needing to validate EBX here we can load a stack
	 * in some scratch location, call and pop to get EIP and then frob up
	 * a location to compare.
	 */
	cli
	movl	%ebx, %esi

	/* Fixup some addresses for the GDT and long jump */
	movl	%ebx, %ecx
	addl	$(gdt_desc64 - _mle_start + 2), %ecx
	movl	%ebx, %eax
	addl	$(gdt_table64 - _mle_start), %eax
	movl	%eax, (%ecx)

	movl	%ebx, %ecx
	addl	$(gdt_desc64_ptr - _mle_start), %ecx
	movl	%ebx, %eax
	addl	$(gdt_desc64 - _mle_start), %eax
	movl	%eax, (%ecx)

	movl	%ebx, %ecx
	addl	$(jump64 - _mle_start + 1), %ecx
	movl	%ebx, %eax
	addl	$(entry64 - _mle_start), %eax
	movl	%eax, (%ecx)

	/* Zero out all page table pages so there are no surprises */
	movl	%ebx, %edi
	subl	$(TBOOT_RTMEM_SIZE + PAGE_SIZE), %edi
	xorl	%eax, %eax
	movl	$(TBOOT_PLEPT_SIZE/4), %ecx
	rep stosl

	/* First page is the PML4 table with one PDP entry */
	movl	%ebx, %eax
	subl	$(TBOOT_RTMEM_SIZE + PAGE_SIZE), %eax
	movl	%eax, %ecx
	addl	$PAGE_SIZE, %ecx
	orl	$0x3, %ecx
	movl	%ecx, (%eax)

	/* Second page is the PDP table with 4 PD entries */
	addl	$PAGE_SIZE, %eax
	movl	%eax, %ecx
	xorl	%edx, %edx
1:
	addl	$PAGE_SIZE, %ecx
	cmpb	$4, %dl
	jz	2f
	orl	$0x3, %ecx
	movl	%ecx, (%eax)
	addl	$0x8, %eax
	incb	%dl
	jmp	1b
2:	/* EAX Page 2 + 0x20 */

	/* Next 4 pages are PDs that map all of mem < 4G as 2M pages */
	addl	$(PAGE_SIZE - 0x20), %eax
	xorl	%edx, %edx
	xorl	%ecx, %ecx
	xorl	%ebx, %ebx
	addl	$0x83, %ecx
1:
	cmpw	$512, %dx
	jz	2f
	movl	%ecx, (%eax)
	addl	$0x8, %eax
	addl	$0x200000, %ecx
	incw	%dx
	jmp	1b
2:
	cmpb	$3, %bl	
	jz	3f
	incb	%bl
	xorl	%edx, %edx
	jmp	1b
3:	/* EAX Page 7 */

	/* Return to first PD entry on first PD and setup a PT entry */
	movl	%eax, %ecx
	subl	$(4*PAGE_SIZE), %eax
	orl	$0x3, %ecx
	movl	%ecx, (%eax)

	/* Page 7 is a PT that maps all of mem < 2M as 4K pages */
	addl	$(4*PAGE_SIZE), %eax
	xorl	%edx, %edx
	xorl	%ecx, %ecx
	addl	$0x3, %ecx
1:
	cmpw	$512, %dx
	jz	2f
	movl	%ecx, (%eax)
	addl	$0x8, %eax
	addl	$0x1000, %ecx
	incw	%dx
	jmp	1b
2:	/* EAX Page 8 */

	/*
	 * And done - all memory below 4G is identity mapped. Time to get back
	 * to long mode. EAX contains the base of the page table structures.
	 */
	subl	$(7*PAGE_SIZE), %eax

	/* Restore CR4, PAE must be enabled before IA-32e mode */
	movl	%cr4, %ecx
	orl	$(CR4_PAE | CR4_PGE), %ecx
	movl	%ecx, %cr4

	/* Load PML4 table location into PT base register */
	movl	%eax, %cr3

	/* Enable IA-32e mode and paging */
	movl	$MSR_EFER, %ecx
	rdmsr
	orl	$_EFER_LME, %eax
	wrmsr
	movl	%cr0, %eax
	orl	$(CR0_PG | CR0_NE | CR0_ET | CR0_MP | CR0_PE), %eax
	movl	%eax, %cr0
	jmp	1f
1:
	/* Now in IA-32e compatibility mode */

	/* Setup GDT and ljmp to 64b mode */
	movl	%esi, %ebx
	addl	$(gdt_desc64_ptr - _mle_start), %ebx
	lgdt	(%ebx)

jump64:
	.byte	0xea       /* far jmp op */
	.long	0x00000000 /* offset (fixed up) */
	.word	CS_SEL64   /* 64b code segment selector */

.code64

entry64:
	/* Load data segment regs */
	movw	DS_SEL64, %ax
	movw	%ax, %ds
	movw	%ax, %es
	movw	%ax, %fs
	movw	%ax, %gs
	movw	%ax, %ss

	/* ESI still has original EBX, put it back */
	xorq	%rbx, %rbx
	movl	%esi, %ebx

	/* 
	 * Layout check from original TBOOT. If EBX did not contain the
	 * physaddr of the entry point we would probably be lost way before
	 * we got here. But check it anyway then check the page tables passed
	 * to SENTER.
	 */
	leaq	post_launch_entry(%rip), %rax
	cmpl	%eax, %ebx
	jne 	1f

	/*
	 * Verify last entry in MLE page table is the one we expected
	 * this is sufficient because: 1) all addrs must be phys increasing
	 * and 2) tboot is phys contig -- therefore if any page were moved to
	 * a different phys addr then the last page would have to be different
	 * from tboot's last page. Note in paging mode identity mapped so
	 * phys == virt for this verification.
	 *
	 * Get addr of MLE page table from OsSinitData, with the start of
	 * TXT heap == BiosDataSize.
	 */
	movq	$TXT_PRIV_CONFIG_REGS_BASE, %rcx
	mov	TXTCR_HEAP_BASE(%rcx), %rax
	addq	(%rax), %rax                       /* skip BiosData */
	addq	(%rax), %rax                       /* skip OsMleData */
	movq	(MLE_PGTBL_OFFSET + 8)(%rax), %rax /* addr of MLE page table */
	movq	(%rax), %rax /* PDP -> PD page */
	andq	$PAGE_MASK, %rax
	movq	(%rax), %rax /* PD -> PT page */
	andq	$PAGE_MASK, %rax
	/* Get the MLE size; this value was stored in the MLE and measured */
	leaq	_mle_size(%rip), %rdx
	movq	%rdx, %rcx
	/* Use size to move past last valid PTE then one back */
	shrq	$PAGE_SHIFT - 3, %rcx /* Like size div 512 */
	subq	$8, %rcx
	addq	%rcx, %rax
	movq	(%rax), %rax          /* PTE of last page */
	andq	$PAGE_MASK, %rax
	/* Calculate expected addr of last page */
	addq	%rdx, %rbx
	decq	%rbx             /* MLE start + size - 1 addr of last byte */
	andq	$PAGE_MASK, %rbx /* ...rounded to page start */
	/* Are they equal? */
	cmpq	%rbx, %rax
	je	start64
	/* Else fall through and die */

	/* Layout check failed so TXT RESET with a special error code */
1:
	movq	$TXT_PRIV_CONFIG_REGS_BASE, %rcx
	movl	$LAYOUT_ERR, TXTCR_ERRORCODE(%rcx)
	/* unlock memory config (and serialize) */
	movb	$1, TXTCR_CMD_UNLOCK_MEM_CONFIG(%rcx)
	movq	TXTCR_STS(%rcx), %rax
	/* TXT RESET */
	movb	$1, TXTCR_CMD_RESET(%rcx)
	movb	$6, %al
	movw	$0xcf9, %dx
	outb	%al, %dx /* for debug chipsets where TXT RESET may not work */
	ud2

start64:
	/* Clear the .bss of pre-launch stuffs */
	leaq	_bss_start(%rip), %rdi
	leaq	_bss_size(%rip), %rcx
	xorq	%rax, %rax
	rep stosb

	/* Load up a stack */
	leaq	bsp_stack(%rip), %rax
	movq	%rax, %rsp

	/* Fixup IDT table and descriptor, load and STI */
	leaq	idt_desc64_end(%rip), %rax
	leaq	idt_table64(%rip), %rcx
	movq	%rcx, -8(%rax)

	leaq	int_handler(%rip), %rax
	xorq	%rdx, %rdx
1:
	cmpw	$256, %dx
	jz	1f
	movq	%rax, %rbx
	movw	%bx, (%rcx)
	shrq	$16, %rbx
	movw	%bx, 12(%rcx)
	shrq	$16, %rbx
	movl	%ebx, 16(%rcx)
	cmpw	$18, %dx
	jnz	2f
	/* MCE vector */
	movl	$0x8f00, 8(%rcx) /* P, DPL=0, 64b, Trap */
2:
	incw	%dx
	addq	$16, %rcx
	jmp	1b
1:
	lidt	idt_desc64(%rip)
	sti

	/* Enable MCE */
	movq	%cr4, %rax
	orq	$CR4_MCE, %rax
	movq	%rax, %cr4

	/* And we are outa here... */
	callq	post_launch
	ud2

/*
 * vmexit handler
 */
ENTRY(vmx_asm_vmexit_handler)
        call vmx_vmexit_handler
	/* fall through to loop if callee returns (shouldn't happen) */

ENTRY(_mini_guest)
	pause
	/* TODO rest */
	ret

ENTRY(bsp_stack_ref)
	leaq	bsp_stack(%rip), %rax
	ret

/*
 * Interrupt handler
 */
int_handler:
	call handle_exception
	ud2

/* GDT */
	.align 16
gdt_desc64:
	.word	gdt_table64_end - gdt_table64 - 1 /* Limit */
	.long	0x00000000 /* Base */
gdt_desc64_end:

gdt_desc64_ptr:
	.long	0x00000000 /* Pointer to GDT descriptor */

	.align 16
gdt_table64:
	/* Null Segment */
	.quad	0x0000000000000000
	/* Code Segment */
	.word	0x0000 /* Limit 1 */
	.word	0x0000 /* Base 1 */
	.byte	0x00   /* Base 2 */
	.byte	0x9a   /* P=1 DPL=0 11=code C=0 R=1 A=0 */
	.byte	0x20   /* G=0 D=0 L=1 D=0 AVL=0 Limit 2 */
	.byte	0x00   /* Base 3 */
	/* Data Segment */
	.word	0x0000 /* Limit 1 */
	.word	0x0000 /* Base 1 */
	.byte	0x00   /* Base 2 */
	.byte	0x92   /* P=1 DPL=0 10=code C=0 W=1 A=0 */ 
	.byte	0x00   /* G=0 D=0 L=0 D=0 AVL=0 Limit 2 */  
	.byte	0x00   /* Base 3 */
gdt_table64_end:

/* IDT */
	.align 16
idt_desc64:
	.word	idt_table64_end - idt_table64 - 1 /* Limit */
	.quad	0x0000000000000000 /* Base */
idt_desc64_end:

	.align	16

idt_table64:
	.rept	256
		.word	0x0000     /* Offset 15 - 0 */
		.word	CS_SEL64   /* Segment selector */
		.word	0x8e00     /* P, DPL=0, 64b, Interrupt (default) */
		.word	0x0000     /* Offset 31 - 16 */
		.long	0x00000000 /* Offset 63 - 32 */
		.long	0x00000000 /* Reserved */
	.endr
idt_table64_end:

.global _mle_size
_mle_size:
	.quad	0x0000000000000000 /* MLE size */

.global _bss_size
_bss_size:
	.quad	0x0000000000000000 /* .bss size */

.section ".data"
	.align PAGE_SIZE, 0

.section ".rdata"
	.align PAGE_SIZE, 0

/*
 * shared data page with kernel (i.e. Xen)
 * (put at end so that not split e820 region for tboot)
 */
.section ".tbootsh","w"
	.align PAGE_SIZE, 0

	.globl _tboot_shared
_tboot_shared:
	.fill PAGE_SIZE,1,0
	.align PAGE_SIZE, 0

.section ".bss"
	.align PAGE_SIZE, 0

.global _bss_start
_bss_start:

/* Stacks */

bsp_stack_end:
        .fill BSP_STACK_SIZE, 1, 0
bsp_stack:

ap_stacks_end:
        .fill AP_STACK_SIZE * NR_CPUS, 1, 0
ap_stacks:

/* Page Table and VMCS data for AP bringup */

        .align PAGE_SIZE, 0
	.globl idle_pg_table
idle_pg_table:
        .fill 1*PAGE_SIZE,1,0

        .align PAGE_SIZE, 0
	.globl host_vmcs
host_vmcs:
        .fill 1*PAGE_SIZE,1,0

        .align PAGE_SIZE, 0
	.global ap_vmcs /* the input info when os/vmm kerneltrap into tboot */
ap_vmcs:
        .fill NR_CPUS * PAGE_SIZE, 1, 0