aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/x86_32/entry.S
blob: aeef45059c231512932cd56d71c3210607f21304 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
/*
 * Hypercall and fault low-level handling routines.
 *
 * Copyright (c) 2002-2004, K A Fraser
 * Copyright (c) 1991, 1992 Linus Torvalds
 * 
 * Calling back to a guest OS:
 * ===========================
 * 
 * First, we require that all callbacks (either via a supplied
 * interrupt-descriptor-table, or via the special event or failsafe callbacks
 * in the shared-info-structure) are to ring 1. This just makes life easier,
 * in that it means we don't have to do messy GDT/LDT lookups to find
 * out which the privilege-level of the return code-selector. That code
 * would just be a hassle to write, and would need to account for running
 * off the end of the GDT/LDT, for example. For all callbacks we check
 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that 
 * we're safe as don't allow a guest OS to install ring-0 privileges into the
 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
 * rather than the correct ring) and bad things are bound to ensue -- IRET is
 * likely to fault, and we may end up killing the domain (no harm can
 * come to Xen, though).
 *      
 * When doing a callback, we check if the return CS is in ring 0. If so,
 * callback is delayed until next return to ring != 0.
 * If return CS is in ring 1, then we create a callback frame
 * starting at return SS/ESP. The base of the frame does an intra-privilege
 * interrupt-return.
 * If return CS is in ring > 1, we create a callback frame starting
 * at SS/ESP taken from appropriate section of the current TSS. The base
 * of the frame does an inter-privilege interrupt-return.
 * 
 * Note that the "failsafe callback" uses a special stackframe:
 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
 *   return_CS, return_EFLAGS[, return_ESP, return_SS] }
 * That is, original values for DS/ES/FS/GS are placed on stack rather than
 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
 * saved/restored in guest OS. Furthermore, if we load them we may cause
 * a fault if they are invalid, which is a hassle to deal with. We avoid
 * that problem if we don't load them :-) This property allows us to use
 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
 * on return to ring != 0, we can simply package it up as a return via
 * the failsafe callback, and let the guest OS sort it out (perhaps by
 * killing an application process). Note that we also do this for any
 * faulting IRET -- just let the guest OS handle it via the event
 * callback.
 *
 * We terminate a domain in the following cases:
 *  - creating a callback stack frame (due to bad ring-1 stack).
 *  - faulting IRET on entry to failsafe callback handler.
 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
 * handler in good order (absolutely no faults allowed!).
 */

#include <xen/config.h>
#include <xen/errno.h>
#include <xen/softirq.h>
#include <asm/asm_defns.h>
#include <asm/apicdef.h>
#include <public/xen.h>

#define GET_CURRENT(reg)   \
        movl $8192-4, reg; \
        orl  %esp, reg;    \
        andl $~3,reg;      \
        movl (reg),reg;

        ALIGN
restore_all_guest:
        testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
        jnz  failsafe_callback
        testl $X86_EFLAGS_VM,XREGS_eflags(%esp)
        jnz  restore_all_vm86
FLT1:   movl XREGS_ds(%esp),%ds
FLT2:   movl XREGS_es(%esp),%es
FLT3:   movl XREGS_fs(%esp),%fs
FLT4:   movl XREGS_gs(%esp),%gs
restore_all_vm86:
        popl %ebx
        popl %ecx
        popl %edx
        popl %esi
        popl %edi
        popl %ebp
        popl %eax
        addl $4,%esp
FLT5:   iret
.section .fixup,"ax"
FIX5:   subl  $28,%esp
        pushl 28(%esp)                 # error_code/entry_vector
        movl  %eax,XREGS_eax+4(%esp)
        movl  %ebp,XREGS_ebp+4(%esp)
        movl  %edi,XREGS_edi+4(%esp)
        movl  %esi,XREGS_esi+4(%esp)
        movl  %edx,XREGS_edx+4(%esp)
        movl  %ecx,XREGS_ecx+4(%esp)
        movl  %ebx,XREGS_ebx+4(%esp)
FIX1:   SET_XEN_SEGMENTS(a)
        movl  %eax,%fs
        movl  %eax,%gs
        sti
        popl  %esi
        pushfl                         # EFLAGS
        movl  $__HYPERVISOR_CS,%eax
        pushl %eax                     # CS
        movl  $DBLFLT1,%eax
        pushl %eax                     # EIP
        pushl %esi                     # error_code/entry_vector
        jmp   error_code
DBLFLT1:GET_CURRENT(%ebx)
        jmp   test_all_events
DBLFIX1:GET_CURRENT(%ebx)
        testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
        jnz   domain_crash             # cannot reenter failsafe code
        orb   $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
        jmp   test_all_events          # will return via failsafe code
.previous
.section __pre_ex_table,"a"
	.long FLT1,FIX1
	.long FLT2,FIX1
	.long FLT3,FIX1
	.long FLT4,FIX1
	.long FLT5,FIX5
.previous
.section __ex_table,"a"
        .long DBLFLT1,DBLFIX1
.previous

/* No special register assumptions */
failsafe_callback:
        GET_CURRENT(%ebx)
        andb $~TF_failsafe_return,DOMAIN_thread_flags(%ebx)
        leal DOMAIN_trap_bounce(%ebx),%edx
        movl DOMAIN_failsafe_addr(%ebx),%eax
        movl %eax,TRAPBOUNCE_eip(%edx)
        movl DOMAIN_failsafe_sel(%ebx),%eax
        movw %ax,TRAPBOUNCE_cs(%edx)
        movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
        call create_bounce_frame
        popl %ebx
        popl %ecx
        popl %edx
        popl %esi
        popl %edi
        popl %ebp
        popl %eax
        addl $4,%esp
FLT6:   iret 
.section .fixup,"ax"
FIX6:   pushl %ebx
        GET_CURRENT(%ebx)
        orb   $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
        pop   %ebx
        jmp   FIX5
.section __pre_ex_table,"a"
	.long FLT6,FIX6
.previous

        ALIGN
restore_all_xen:
	popl %ebx
	popl %ecx
	popl %edx
	popl %esi
	popl %edi
	popl %ebp
	popl %eax
        addl $4,%esp
        iret

        ALIGN
ENTRY(hypercall)
        subl $4,%esp
	SAVE_ALL(b)
        sti
        GET_CURRENT(%ebx)
	andl $(NR_hypercalls-1),%eax
	call *SYMBOL_NAME(hypercall_table)(,%eax,4)

ret_from_hypercall:
        movl %eax,XREGS_eax(%esp)       # save the return value

test_all_events:
        xorl %ecx,%ecx
        notl %ecx
        cli                             # tests must not race interrupts
/*test_softirqs:*/  
        movl DOMAIN_processor(%ebx),%eax
        shl  $6,%eax                    # sizeof(irq_cpustat) == 64
        test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
        jnz  process_softirqs
/*test_guest_events:*/
        movl DOMAIN_shared_info(%ebx),%eax
        testb $0xFF,SHINFO_upcall_mask(%eax)
        jnz  restore_all_guest
        testb $0xFF,SHINFO_upcall_pending(%eax)
        jz   restore_all_guest
/*process_guest_events:*/
        leal DOMAIN_trap_bounce(%ebx),%edx
        movl DOMAIN_event_addr(%ebx),%eax
        movl %eax,TRAPBOUNCE_eip(%edx)
        movl DOMAIN_event_sel(%ebx),%eax
        movw %ax,TRAPBOUNCE_cs(%edx)
        movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
        call create_bounce_frame
        movl DOMAIN_shared_info(%ebx),%eax
        movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery
        jmp  restore_all_guest

        ALIGN
process_softirqs:
        sti       
        call SYMBOL_NAME(do_softirq)
        jmp  test_all_events
                
/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
/*   {EIP, CS, EFLAGS, [ESP, SS]}                                        */
/* %edx == trap_bounce, %ebx == task_struct                              */
/* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
create_bounce_frame:
        movl XREGS_eflags+4(%esp),%ecx
        movb XREGS_cs+4(%esp),%cl
        testl $(2|X86_EFLAGS_VM),%ecx
        jz   ring1 /* jump if returning to an existing ring-1 activation */
        /* obtain ss/esp from TSS -- no current ring-1 activations */
        movl DOMAIN_processor(%ebx),%eax
        /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
        movl %eax, %ecx
        shll $7, %ecx
        shll $13, %eax
        addl %ecx,%eax
        addl $init_tss + 12,%eax
        movl (%eax),%esi /* tss->esp1 */
FLT7:   movl 4(%eax),%gs /* tss->ss1  */
        testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
        jz   nvm86_1
	subl $16,%esi       /* push ES/DS/FS/GS (VM86 stack frame) */
	movl XREGS_es+4(%esp),%eax
FLT8:   movl %eax,%gs:(%esi)
	movl XREGS_ds+4(%esp),%eax
FLT9:   movl %eax,%gs:4(%esi)
	movl XREGS_fs+4(%esp),%eax
FLT10:  movl %eax,%gs:8(%esi)
	movl XREGS_gs+4(%esp),%eax
FLT11:  movl %eax,%gs:12(%esi)
nvm86_1:subl $8,%esi        /* push SS/ESP (inter-priv iret) */
        movl XREGS_esp+4(%esp),%eax
FLT12:  movl %eax,%gs:(%esi) 
        movl XREGS_ss+4(%esp),%eax
FLT13:  movl %eax,%gs:4(%esi) 
        jmp 1f
ring1:  /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
        movl XREGS_esp+4(%esp),%esi
FLT14:  movl XREGS_ss+4(%esp),%gs 
1:      /* Construct a stack frame: EFLAGS, CS/EIP */
        subl $12,%esi
        movl XREGS_eip+4(%esp),%eax
FLT15:  movl %eax,%gs:(%esi) 
        movl XREGS_cs+4(%esp),%eax
FLT16:  movl %eax,%gs:4(%esi) 
        movl XREGS_eflags+4(%esp),%eax
FLT17:  movl %eax,%gs:8(%esi)
        movb TRAPBOUNCE_flags(%edx),%cl
        test $TBF_EXCEPTION_ERRCODE,%cl
        jz   1f
        subl $4,%esi                    # push error_code onto guest frame
        movl TRAPBOUNCE_error_code(%edx),%eax
FLT18:  movl %eax,%gs:(%esi)
        testb $TBF_EXCEPTION_CR2,%cl
        jz   2f
        subl $4,%esi                    # push %cr2 onto guest frame
        movl TRAPBOUNCE_cr2(%edx),%eax
FLT19:  movl %eax,%gs:(%esi)
1:      testb $TBF_FAILSAFE,%cl
        jz   2f
        subl $16,%esi                # add DS/ES/FS/GS to failsafe stack frame
        testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
        jz   nvm86_2
        xorl %eax,%eax               # VM86: we write zero selector values
FLT20:  movl %eax,%gs:(%esi) 
FLT21:  movl %eax,%gs:4(%esi)
FLT22:  movl %eax,%gs:8(%esi) 
FLT23:  movl %eax,%gs:12(%esi)
        jmp  2f
nvm86_2:movl XREGS_ds+4(%esp),%eax   # non-VM86: write real selector values
FLT24:  movl %eax,%gs:(%esi) 
        movl XREGS_es+4(%esp),%eax
FLT25:  movl %eax,%gs:4(%esi)
        movl XREGS_fs+4(%esp),%eax
FLT26:  movl %eax,%gs:8(%esi) 
        movl XREGS_gs+4(%esp),%eax
FLT27:  movl %eax,%gs:12(%esi)
2:      movb $0,TRAPBOUNCE_flags(%edx)
        testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
        jz   nvm86_3
        xorl %eax,%eax      /* zero DS-GS, just as a real CPU would */
        movl %eax,XREGS_ds+4(%esp)
        movl %eax,XREGS_es+4(%esp)
        movl %eax,XREGS_fs+4(%esp)
        movl %eax,XREGS_gs+4(%esp)
nvm86_3:/* Rewrite our stack frame and return to ring 1. */
        /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
        andl $0xfffcbeff,XREGS_eflags+4(%esp)
        movl %gs,XREGS_ss+4(%esp)
        movl %esi,XREGS_esp+4(%esp)
        movzwl TRAPBOUNCE_cs(%edx),%eax
        movl %eax,XREGS_cs+4(%esp)
        movl TRAPBOUNCE_eip(%edx),%eax
        movl %eax,XREGS_eip+4(%esp)
        ret
.section .fixup,"ax"
FIX7:   sti
        popl  %esi
        addl  $4,%esp                  # Discard create_b_frame return address
        pushfl                         # EFLAGS
        movl  $__HYPERVISOR_CS,%eax
        pushl %eax                     # CS
        movl  $DBLFLT2,%eax
        pushl %eax                     # EIP
        pushl %esi                     # error_code/entry_vector
        jmp   error_code
DBLFLT2:jmp   process_guest_exception_and_events
.previous
.section __pre_ex_table,"a"
	.long  FLT7,FIX7 ,  FLT8,FIX7 ,  FLT9,FIX7 , FLT10,FIX7
	.long FLT11,FIX7 , FLT12,FIX7 , FLT13,FIX7 , FLT14,FIX7
	.long FLT15,FIX7 , FLT16,FIX7 , FLT17,FIX7 , FLT18,FIX7
	.long FLT19,FIX7 , FLT20,FIX7 , FLT21,FIX7 , FLT22,FIX7
	.long FLT23,FIX7 , FLT24,FIX7 , FLT25,FIX7 , FLT26,FIX7 , FLT27,FIX7
.previous
.section __ex_table,"a"
        .long DBLFLT2,domain_crash
.previous

        ALIGN
process_guest_exception_and_events:
        leal DOMAIN_trap_bounce(%ebx),%edx
        testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
        jz   test_all_events
        cli  # create_bounce_frame needs CLI for pre-exceptions to work
        call create_bounce_frame
        jmp  test_all_events

        ALIGN
ENTRY(ret_from_intr)
        GET_CURRENT(%ebx)
        movl  XREGS_eflags(%esp),%eax
        movb  XREGS_cs(%esp),%al
        testl $(3|X86_EFLAGS_VM),%eax
        jnz   test_all_events
        jmp   restore_all_xen

ENTRY(divide_error)
	pushl $TRAP_divide_error<<16
	ALIGN
error_code:
        SAVE_ALL_NOSEGREGS(a)
        SET_XEN_SEGMENTS(a)
        testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
        jz    exception_with_ints_disabled
1:      sti                             # re-enable interrupts
        xorl  %eax,%eax
        movw  XREGS_entry_vector(%esp),%ax
        movl  %esp,%edx
	pushl %edx			# push the xen_regs pointer
	GET_CURRENT(%ebx)
	call  *SYMBOL_NAME(exception_table)(,%eax,4)
        addl  $4,%esp
        movl  XREGS_eflags(%esp),%eax
        movb  XREGS_cs(%esp),%al
        testl $(3|X86_EFLAGS_VM),%eax
	jz    restore_all_xen
        jmp   process_guest_exception_and_events

exception_with_ints_disabled:
        movl  XREGS_eflags(%esp),%eax
        movb  XREGS_cs(%esp),%al
        testl $(3|X86_EFLAGS_VM),%eax   # interrupts disabled outside Xen?
        jnz   1b                        # it really does happen!
                                        #  (e.g., DOM0 X server)
        pushl XREGS_eip(%esp)
        call  search_pre_exception_table
        addl  $4,%esp
        testl %eax,%eax                 # no fixup code for faulting EIP?
        jz    FATAL_exception_with_ints_disabled
        movl  %eax,XREGS_eip(%esp)
        movl  %esp,%esi
        subl  $4,%esp
        movl  %esp,%edi
        movl  $XREGS_kernel_sizeof/4,%ecx
        rep;  movsl                     # make room for error_code/entry_vector
        movl  XREGS_error_code(%esp),%eax # error_code/entry_vector
        movl  %eax,XREGS_kernel_sizeof(%esp)
        jmp   restore_all_xen           # return to fixup code

FATAL_exception_with_ints_disabled:
        xorl  %esi,%esi
        movw  XREGS_entry_vector(%esp),%si
        movl  %esp,%edx
	pushl %edx			# push the xen_regs pointer
        pushl %esi                      # push the trapnr (entry vector)
        call  SYMBOL_NAME(fatal_trap)
        ud2
                                        
ENTRY(coprocessor_error)
	pushl $TRAP_copro_error<<16
	jmp error_code

ENTRY(simd_coprocessor_error)
	pushl $TRAP_simd_error<<16
	jmp error_code

ENTRY(device_not_available)
	pushl $TRAP_no_device<<16
        jmp   error_code

ENTRY(debug)
	pushl $TRAP_debug<<16
	jmp error_code

ENTRY(int3)
	pushl $TRAP_int3<<16
	jmp error_code

ENTRY(overflow)
	pushl $TRAP_overflow<<16
	jmp error_code

ENTRY(bounds)
	pushl $TRAP_bounds<<16
	jmp error_code

ENTRY(invalid_op)
	pushl $TRAP_invalid_op<<16
	jmp error_code

ENTRY(coprocessor_segment_overrun)
	pushl $TRAP_copro_seg<<16
	jmp error_code

ENTRY(invalid_TSS)
        movw $TRAP_invalid_tss,2(%esp)
	jmp error_code

ENTRY(segment_not_present)
        movw $TRAP_no_segment,2(%esp)
	jmp error_code

ENTRY(stack_segment)
        movw $TRAP_stack_error,2(%esp)
	jmp error_code

ENTRY(general_protection)
        movw $TRAP_gp_fault,2(%esp)
	jmp error_code

ENTRY(alignment_check)
        movw $TRAP_alignment_check,2(%esp)
	jmp error_code

ENTRY(page_fault)
        movw $TRAP_page_fault,2(%esp)
	jmp error_code

ENTRY(machine_check)
        pushl $TRAP_machine_check<<16
	jmp error_code

ENTRY(spurious_interrupt_bug)
        pushl $TRAP_spurious_int<<16
	jmp error_code

ENTRY(nmi)
        # Save state but do not trash the segment registers!
        # We may otherwise be unable to reload them or copy them to ring 1. 
	pushl %eax
	SAVE_ALL_NOSEGREGS(a)

        # Check for hardware problems.
        inb   $0x61,%al
        testb $0x80,%al
        jne   nmi_parity_err
        testb $0x40,%al
        jne   nmi_io_err
        movl  %eax,%ebx
        
        # Okay, its almost a normal NMI tick. We can only process it if:
        #  A. We are the outermost Xen activation (in which case we have
        #     the selectors safely saved on our stack)
        #  B. DS-GS all contain sane Xen values.
        # In all other cases we bail without touching DS-GS, as we have
        # interrupted an enclosing Xen activation in tricky prologue or
        # epilogue code.
        movl  XREGS_eflags(%esp),%eax
        movb  XREGS_cs(%esp),%al
        testl $(3|X86_EFLAGS_VM),%eax
        jnz   do_watchdog_tick
        movl  %ds,%eax
        cmpw  $(__HYPERVISOR_DS),%ax
        jne   defer_nmi
        movl  %es,%eax
        cmpw  $(__HYPERVISOR_DS),%ax
        jne   defer_nmi

do_watchdog_tick:
        movl  $(__HYPERVISOR_DS),%edx
        movl  %edx,%ds
        movl  %edx,%es
        movl  %esp,%edx
	pushl %ebx   # reason
	pushl %edx   # regs
        call  SYMBOL_NAME(do_nmi)
	addl  $8,%esp
        movl  XREGS_eflags(%esp),%eax
        movb  XREGS_cs(%esp),%al
        testl $(3|X86_EFLAGS_VM),%eax
	jz    restore_all_xen
        GET_CURRENT(%ebx)
        jmp   restore_all_guest

defer_nmi:
        movl  $FIXMAP_apic_base,%eax
        # apic_wait_icr_idle()
1:      movl  APIC_ICR(%eax),%ebx
        testl $APIC_ICR_BUSY,%ebx
        jnz   1b
        # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
        movl  $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_LOGICAL | \
                TRAP_deferred_nmi),APIC_ICR(%eax)
        jmp   restore_all_xen

nmi_parity_err:
        # Clear and disable the parity-error line
        andb $0xf,%al
        orb  $0x4,%al
        outb %al,$0x61
        cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
        je   restore_all_xen
        bts  $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
        bts  $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
        cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
        je   restore_all_xen
        movl $(__HYPERVISOR_DS),%edx       # nmi=fatal
        movl %edx,%ds
        movl %edx,%es
        movl %esp,%edx
        push %edx
        call SYMBOL_NAME(mem_parity_error)
        addl $4,%esp
        jmp  ret_from_intr
                
nmi_io_err: 
        # Clear and disable the I/O-error line
        andb $0xf,%al
        orb  $0x8,%al
        outb %al,$0x61
        cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
        je   restore_all_xen
        bts  $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
        bts  $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
        cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
        je   restore_all_xen
        movl $(__HYPERVISOR_DS),%edx       # nmi=fatal
        movl %edx,%ds
        movl %edx,%es
        movl %esp,%edx
        push %edx
        call SYMBOL_NAME(io_check_error)                        
        addl $4,%esp
        jmp  ret_from_intr


ENTRY(setup_vm86_frame)
        # Copies the entire stack frame forwards by 16 bytes.
        .macro copy_vm86_words count=18
        .if \count
        pushl ((\count-1)*4)(%esp)
        popl  ((\count-1)*4)+16(%esp)
        copy_vm86_words "(\count-1)"
        .endif
        .endm
        copy_vm86_words
        addl $16,%esp
        ret

do_switch_vm86:
        # Discard the return address
        addl $4,%esp

        movl XREGS_eflags(%esp),%edx

        # GS:ESI == Ring-1 stack activation
        movl XREGS_esp(%esp),%esi
VFLT1:  movl XREGS_ss(%esp),%gs

        # ES:EDI == Ring-0 stack activation
        leal XREGS_eip(%esp),%edi

        # Restore the hypercall-number-clobbered EAX on our stack frame
VFLT2:  movl %gs:(%esi),%eax
        movl %eax,XREGS_eax(%esp)
        addl $4,%esi
        	
      	# Copy the VM86 activation from the ring-1 stack to the ring-0 stack
        movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx
VFLT3:  movl %gs:(%esi),%eax
        stosl
        addl $4,%esi
        loop VFLT3

        # Fix up EFLAGS
        andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp)
        andl $X86_EFLAGS_IOPL,%edx # Ignore attempts to change EFLAGS.IOPL
        jnz  1f
        orl  $X86_EFLAGS_IF,%edx   # EFLAGS.IOPL=0 => no messing with EFLAGS.IF
1:      orl  $X86_EFLAGS_VM,%edx   # Force EFLAGS.VM
        orl  %edx,XREGS_eflags(%esp)
        
        jmp test_all_events

.section __ex_table,"a"
        .long VFLT1,domain_crash
        .long VFLT2,domain_crash
        .long VFLT3,domain_crash
.previous

.data

ENTRY(exception_table)
        .long SYMBOL_NAME(do_divide_error)
        .long SYMBOL_NAME(do_debug)
        .long 0 # nmi
        .long SYMBOL_NAME(do_int3)
        .long SYMBOL_NAME(do_overflow)
        .long SYMBOL_NAME(do_bounds)
        .long SYMBOL_NAME(do_invalid_op)
        .long SYMBOL_NAME(math_state_restore)
        .long 0 # double fault
        .long SYMBOL_NAME(do_coprocessor_segment_overrun)
        .long SYMBOL_NAME(do_invalid_TSS)
        .long SYMBOL_NAME(do_segment_not_present)
        .long SYMBOL_NAME(do_stack_segment)
        .long SYMBOL_NAME(do_general_protection)
        .long SYMBOL_NAME(do_page_fault)
        .long SYMBOL_NAME(do_spurious_interrupt_bug)
        .long SYMBOL_NAME(do_coprocessor_error)
        .long SYMBOL_NAME(do_alignment_check)
        .long SYMBOL_NAME(do_machine_check)
        .long SYMBOL_NAME(do_simd_coprocessor_error)

ENTRY(hypercall_table)
        .long SYMBOL_NAME(do_set_trap_table)     /*  0 */
        .long SYMBOL_NAME(do_mmu_update)
        .long SYMBOL_NAME(do_set_gdt)
        .long SYMBOL_NAME(do_stack_switch)
        .long SYMBOL_NAME(do_set_callbacks)
        .long SYMBOL_NAME(do_fpu_taskswitch)     /*  5 */
        .long SYMBOL_NAME(do_sched_op)
        .long SYMBOL_NAME(do_dom0_op)
        .long SYMBOL_NAME(do_set_debugreg)
        .long SYMBOL_NAME(do_get_debugreg)
        .long SYMBOL_NAME(do_update_descriptor)  /* 10 */
        .long SYMBOL_NAME(do_set_fast_trap)
        .long SYMBOL_NAME(do_dom_mem_op)
        .long SYMBOL_NAME(do_multicall)
        .long SYMBOL_NAME(do_update_va_mapping)
        .long SYMBOL_NAME(do_set_timer_op)       /* 15 */
        .long SYMBOL_NAME(do_event_channel_op)
        .long SYMBOL_NAME(do_xen_version)
        .long SYMBOL_NAME(do_console_io)
        .long SYMBOL_NAME(do_physdev_op)
        .long SYMBOL_NAME(do_grant_table_op)     /* 20 */
        .long SYMBOL_NAME(do_vm_assist)
        .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
        .long SYMBOL_NAME(do_switch_vm86)
        .rept NR_hypercalls-((.-hypercall_table)/4)
        .long SYMBOL_NAME(do_ni_hypercall)
        .endr