aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/ramips/dts/AWM002-EVB.dtsi
blob: 26abc0174075b29579edb6115a500b18a81c7012 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
#include "rt5350.dtsi"

#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>

/ {
	compatible = "asiarf,awm002-evb", "ralink,rt5350-soc";

	gpio-leds {
		compatible = "gpio-leds";

		tx {
			label = "awm002-evb:green:tx";
			gpios = <&gpio0 15 GPIO_ACTIVE_LOW>;
		};

		rx {
			label = "awm002-evb:green:rx";
			gpios = <&gpio0 16 GPIO_ACTIVE_LOW>;
		};

		wps {
			label = "awm002-evb:green:wps";
			gpios = <&gpio0 21 GPIO_ACTIVE_LOW>;
		};
	};

	keys {
		compatible = "gpio-keys-polled";
		poll-interval = <20>;

		reset_wps {
			label = "reset_wps";
			gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
			linux,code = <KEY_RESTART>;
		};

		mode {
			label = "mode";
			gpios = <&gpio0 20 GPIO_ACTIVE_LOW>;
			linux,code = <BTN_0>;
		};
	};
};

&ethernet {
	mtd-mac-address = <&factory 0x28>;
};

&wmac {
	ralink,mtd-eeprom = <&factory 0>;
};

&pinctrl {
	state_default: pinctrl0 {
		gpio {
			ralink,group = "i2c", "jtag";
			ralink,function = "gpio";
		};
	};
};

&esw {
	mediatek,portmap = <0x3f>;
};

&ehci {
	status = "okay";
};

&ohci {
	status = "okay";
};
456'>456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
/*
 * Hypercall and fault low-level handling routines.
 *
 * Copyright (c) 2005, K A Fraser
 */

#include <xen/config.h>
#include <xen/errno.h>
#include <xen/softirq.h>
#include <asm/asm_defns.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#include <public/xen.h>

        ALIGN
/* %rbx: struct vcpu */
switch_to_kernel:
        leaq  VCPU_trap_bounce(%rbx),%rdx
        /* TB_eip = (32-bit syscall && syscall32_addr) ?
         *          syscall32_addr : syscall_addr */
        xor   %eax,%eax
        cmpw  $FLAT_USER_CS32,UREGS_cs(%rsp)
        cmoveq VCPU_syscall32_addr(%rbx),%rax
        testq %rax,%rax
        cmovzq VCPU_syscall_addr(%rbx),%rax
        movq  %rax,TRAPBOUNCE_eip(%rdx)
        /* TB_flags = VGCF_syscall_disables_events ? TBF_INTERRUPT : 0 */
        btl   $_VGCF_syscall_disables_events,VCPU_guest_context_flags(%rbx)
        setc  %cl
        leal  (,%rcx,TBF_INTERRUPT),%ecx
        movb  %cl,TRAPBOUNCE_flags(%rdx)
        call  create_bounce_frame
        andl  $~X86_EFLAGS_DF,UREGS_eflags(%rsp)
        jmp   test_all_events

/* %rbx: struct vcpu, interrupts disabled */
restore_all_guest:
        ASSERT_INTERRUPTS_DISABLED
        RESTORE_ALL
        testw $TRAP_syscall,4(%rsp)
        jz    iret_exit_to_guest

        addq  $8,%rsp
        popq  %rcx                    # RIP
        popq  %r11                    # CS
        cmpw  $FLAT_USER_CS32,%r11
        popq  %r11                    # RFLAGS
        popq  %rsp                    # RSP
        je    1f
        sysretq
1:      sysretl

        ALIGN
/* No special register assumptions. */
iret_exit_to_guest:
        addq  $8,%rsp
.Lft0:  iretq

.section .fixup,"ax"
.Lfx0:  sti
        SAVE_ALL
        movq  UREGS_error_code(%rsp),%rsi
        movq  %rsp,%rax
        andq  $~0xf,%rsp
        pushq $__HYPERVISOR_DS         # SS
        pushq %rax                     # RSP
        pushfq                         # RFLAGS
        pushq $__HYPERVISOR_CS         # CS
        leaq  .Ldf0(%rip),%rax
        pushq %rax                     # RIP
        pushq %rsi                     # error_code/entry_vector
        jmp   handle_exception
.Ldf0:  GET_CURRENT(%rbx)
        jmp   test_all_events
failsafe_callback:
        GET_CURRENT(%rbx)
        leaq  VCPU_trap_bounce(%rbx),%rdx
        movq  VCPU_failsafe_addr(%rbx),%rax
        movq  %rax,TRAPBOUNCE_eip(%rdx)
        movb  $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
        bt    $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
        jnc   1f
        orb   $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
1:      call  create_bounce_frame
        jmp   test_all_events
.previous
        _ASM_PRE_EXTABLE(.Lft0, .Lfx0)
        _ASM_EXTABLE(.Ldf0, failsafe_callback)

        ALIGN
/* No special register assumptions. */
restore_all_xen:
        RESTORE_ALL
        addq  $8,%rsp
        iretq

/*
 * When entering SYSCALL from kernel mode:
 *  %rax                            = hypercall vector
 *  %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments
 *  %rcx                            = SYSCALL-saved %rip
 *  NB. We must move %r10 to %rcx for C function-calling ABI.
 *
 * When entering SYSCALL from user mode:
 *  Vector directly to the registered arch.syscall_addr.
 *
 * Initial work is done by per-CPU stack trampolines. At this point %rsp
 * has been initialised to point at the correct Xen stack, and %rsp, %rflags
 * and %cs have been saved. All other registers are still to be saved onto
 * the stack, starting with %rip, and an appropriate %ss must be saved into
 * the space left by the trampoline.
 */
ENTRY(syscall_enter)
        sti
        movl  $FLAT_KERNEL_SS,24(%rsp)
        pushq %rcx
        pushq $0
        movl  $TRAP_syscall,4(%rsp)
        movq  24(%rsp),%r11 /* Re-load user RFLAGS into %r11 before SAVE_ALL */
        SAVE_ALL
        GET_CURRENT(%rbx)
        movq  VCPU_domain(%rbx),%rcx
        testb $1,DOMAIN_is_32bit_pv(%rcx)
        jnz   compat_syscall
        testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
        jz    switch_to_kernel

/*hypercall:*/
        movq  %r10,%rcx
        cmpq  $NR_hypercalls,%rax
        jae   bad_hypercall
#ifndef NDEBUG
        /* Deliberately corrupt parameter regs not used by this hypercall. */
        pushq %rdi; pushq %rsi; pushq %rdx; pushq %rcx; pushq %r8 ; pushq %r9 
        leaq  hypercall_args_table(%rip),%r10
        movq  $6,%rcx
        sub   (%r10,%rax,1),%cl
        movq  %rsp,%rdi
        movl  $0xDEADBEEF,%eax
        rep   stosq
        popq  %r9 ; popq  %r8 ; popq  %rcx; popq  %rdx; popq  %rsi; popq  %rdi
        movq  UREGS_rax(%rsp),%rax
        pushq %rax
        pushq UREGS_rip+8(%rsp)
#define SHADOW_BYTES 16 /* Shadow EIP + shadow hypercall # */
#else
#define SHADOW_BYTES 0  /* No on-stack shadow state */
#endif
        cmpb  $0,tb_init_done(%rip)
UNLIKELY_START(ne, trace)
        call  trace_hypercall
        /* Now restore all the registers that trace_hypercall clobbered */
        movq  UREGS_rax+SHADOW_BYTES(%rsp),%rax   /* Hypercall #  */
        movq  UREGS_rdi+SHADOW_BYTES(%rsp),%rdi   /* Arg 1        */
        movq  UREGS_rsi+SHADOW_BYTES(%rsp),%rsi   /* Arg 2        */
        movq  UREGS_rdx+SHADOW_BYTES(%rsp),%rdx   /* Arg 3        */
        movq  UREGS_r10+SHADOW_BYTES(%rsp),%rcx   /* Arg 4        */
        movq  UREGS_r8 +SHADOW_BYTES(%rsp),%r8    /* Arg 5        */
        movq  UREGS_r9 +SHADOW_BYTES(%rsp),%r9    /* Arg 6        */
#undef SHADOW_BYTES
UNLIKELY_END(trace)
        leaq  hypercall_table(%rip),%r10
        PERFC_INCR(hypercalls, %rax, %rbx)
        callq *(%r10,%rax,8)
#ifndef NDEBUG
        /* Deliberately corrupt parameter regs used by this hypercall. */
        popq  %r10         # Shadow RIP
        cmpq  %r10,UREGS_rip+8(%rsp)
        popq  %rcx         # Shadow hypercall index
        jne   skip_clobber /* If RIP has changed then don't clobber. */
        leaq  hypercall_args_table(%rip),%r10
        movb  (%r10,%rcx,1),%cl
        movl  $0xDEADBEEF,%r10d
        cmpb  $1,%cl; jb skip_clobber; movq %r10,UREGS_rdi(%rsp)
        cmpb  $2,%cl; jb skip_clobber; movq %r10,UREGS_rsi(%rsp)
        cmpb  $3,%cl; jb skip_clobber; movq %r10,UREGS_rdx(%rsp)
        cmpb  $4,%cl; jb skip_clobber; movq %r10,UREGS_r10(%rsp)
        cmpb  $5,%cl; jb skip_clobber; movq %r10,UREGS_r8(%rsp)
        cmpb  $6,%cl; jb skip_clobber; movq %r10,UREGS_r9(%rsp)
skip_clobber:
#endif
        movq  %rax,UREGS_rax(%rsp)       # save the return value

/* %rbx: struct vcpu */
test_all_events:
        cli                             # tests must not race interrupts
/*test_softirqs:*/  
        movl  VCPU_processor(%rbx),%eax
        shl   $IRQSTAT_shift,%rax
        leaq  irq_stat(%rip),%rcx
        testl $~0,(%rcx,%rax,1)
        jnz   process_softirqs
        testb $1,VCPU_mce_pending(%rbx)
        jnz   process_mce
.Ltest_guest_nmi:
        testb $1,VCPU_nmi_pending(%rbx)
        jnz   process_nmi
test_guest_events:
        movq  VCPU_vcpu_info(%rbx),%rax
        testb $0xFF,VCPUINFO_upcall_mask(%rax)
        jnz   restore_all_guest
        testb $0xFF,VCPUINFO_upcall_pending(%rax)
        jz    restore_all_guest
/*process_guest_events:*/
        sti
        leaq  VCPU_trap_bounce(%rbx),%rdx
        movq  VCPU_event_addr(%rbx),%rax
        movq  %rax,TRAPBOUNCE_eip(%rdx)
        movb  $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
        call  create_bounce_frame
        jmp   test_all_events

        ALIGN
/* %rbx: struct vcpu */
process_softirqs:
        sti       
        call do_softirq
        jmp  test_all_events

        ALIGN
/* %rbx: struct vcpu */
process_mce:
        testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
        jnz  .Ltest_guest_nmi
        sti
        movb $0,VCPU_mce_pending(%rbx)
        call set_guest_machinecheck_trapbounce
        test %eax,%eax
        jz   test_all_events
        movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
        movb %dl,VCPU_mce_old_mask(%rbx)            # iret hypercall
        orl  $1 << VCPU_TRAP_MCE,%edx
        movb %dl,VCPU_async_exception_mask(%rbx)
        jmp  process_trap

        ALIGN
/* %rbx: struct vcpu */
process_nmi:
        testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
        jnz  test_guest_events
        sti
        movb $0,VCPU_nmi_pending(%rbx)
        call set_guest_nmi_trapbounce
        test %eax,%eax
        jz   test_all_events
        movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
        movb %dl,VCPU_nmi_old_mask(%rbx)            # iret hypercall
        orl  $1 << VCPU_TRAP_NMI,%edx
        movb %dl,VCPU_async_exception_mask(%rbx)
        /* FALLTHROUGH */
process_trap:
        leaq VCPU_trap_bounce(%rbx),%rdx
        call create_bounce_frame
        jmp  test_all_events

bad_hypercall:
        movq $-ENOSYS,UREGS_rax(%rsp)
        jmp  test_all_events

ENTRY(sysenter_entry)
        sti
        pushq $FLAT_USER_SS
        pushq $0
        pushfq
        .globl sysenter_eflags_saved
sysenter_eflags_saved:
        pushq $0
        pushq $0
        pushq $0
        movl  $TRAP_syscall,4(%rsp)
        SAVE_ALL
        GET_CURRENT(%rbx)
        cmpb  $0,VCPU_sysenter_disables_events(%rbx)
        movq  $0,UREGS_rip(%rsp) /* null rip */
        movl  $3,UREGS_cs(%rsp)  /* ring 3 null cs */
        movq  VCPU_sysenter_addr(%rbx),%rax
        setne %cl
        leaq  VCPU_trap_bounce(%rbx),%rdx
        testq %rax,%rax
        leal  (,%rcx,TBF_INTERRUPT),%ecx
UNLIKELY_START(z, sysenter_gpf)
        movq  VCPU_trap_ctxt(%rbx),%rsi
        movl  $TRAP_gp_fault,UREGS_entry_vector(%rsp)
        subq  $2,UREGS_rip(%rsp)
        movl  %eax,TRAPBOUNCE_error_code(%rdx)
        movq  TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_eip(%rsi),%rax
        testb $4,TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_flags(%rsi)
        setnz %cl
        leal  TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE(,%rcx,TBF_INTERRUPT),%ecx
UNLIKELY_END(sysenter_gpf)
        movq  VCPU_domain(%rbx),%rdi
        movq  %rax,TRAPBOUNCE_eip(%rdx)
        movb  %cl,TRAPBOUNCE_flags(%rdx)
        testb $1,DOMAIN_is_32bit_pv(%rdi)
        jnz   compat_sysenter
        jmp   .Lbounce_exception

ENTRY(int80_direct_trap)
        pushq $0
        SAVE_ALL

        cmpb  $0,untrusted_msi(%rip)
UNLIKELY_START(ne, msi_check)
        movl  $0x80,%edi
        call  check_for_unexpected_msi
        RESTORE_ALL
        SAVE_ALL
UNLIKELY_END(msi_check)

        GET_CURRENT(%rbx)

        /* Check that the callback is non-null. */
        leaq  VCPU_int80_bounce(%rbx),%rdx
        cmpb  $0,TRAPBOUNCE_flags(%rdx)
        jz    int80_slow_path

        movq  VCPU_domain(%rbx),%rax
        testb $1,DOMAIN_is_32bit_pv(%rax)
        jnz   compat_int80_direct_trap

        call  create_bounce_frame
        jmp   test_all_events

int80_slow_path:
        /* 
         * Setup entry vector and error code as if this was a GPF caused by an
         * IDT entry with DPL==0.
         */
        movl  $((0x80 << 3) | 0x2),UREGS_error_code(%rsp)
        movl  $TRAP_gp_fault,UREGS_entry_vector(%rsp)
        /* A GPF wouldn't have incremented the instruction pointer. */
        subq  $2,UREGS_rip(%rsp)
        jmp   handle_exception_saved

/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK:                     */
/*   { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS }   */
/* %rdx: trap_bounce, %rbx: struct vcpu                                  */
/* On return only %rbx and %rdx are guaranteed non-clobbered.            */
create_bounce_frame:
        ASSERT_INTERRUPTS_ENABLED
        testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
        jnz   1f
        /* Push new frame at registered guest-OS stack base. */
        pushq %rdx
        movq  %rbx,%rdi
        call  toggle_guest_mode
        popq  %rdx
        movq  VCPU_kernel_sp(%rbx),%rsi
        jmp   2f
1:      /* In kernel context already: push new frame at existing %rsp. */
        movq  UREGS_rsp+8(%rsp),%rsi
        andb  $0xfc,UREGS_cs+8(%rsp)    # Indicate kernel context to guest.
2:      andq  $~0xf,%rsi                # Stack frames are 16-byte aligned.
        movq  $HYPERVISOR_VIRT_START,%rax
        cmpq  %rax,%rsi
        movq  $HYPERVISOR_VIRT_END+60,%rax
        sbb   %ecx,%ecx                 # In +ve address space? Then okay.
        cmpq  %rax,%rsi
        adc   %ecx,%ecx                 # Above Xen private area? Then okay.
        jg    domain_crash_synchronous
        movb  TRAPBOUNCE_flags(%rdx),%cl
        subq  $40,%rsi
        movq  UREGS_ss+8(%rsp),%rax
.Lft2:  movq  %rax,32(%rsi)             # SS
        movq  UREGS_rsp+8(%rsp),%rax
.Lft3:  movq  %rax,24(%rsi)             # RSP
        movq  VCPU_vcpu_info(%rbx),%rax
        pushq VCPUINFO_upcall_mask(%rax)
        testb $TBF_INTERRUPT,%cl
        setnz %ch                       # TBF_INTERRUPT -> set upcall mask
        orb   %ch,VCPUINFO_upcall_mask(%rax)
        popq  %rax
        shlq  $32,%rax                  # Bits 32-39: saved_upcall_mask
        movw  UREGS_cs+8(%rsp),%ax      # Bits  0-15: CS
.Lft4:  movq  %rax,8(%rsi)              # CS / saved_upcall_mask
        shrq  $32,%rax
        testb $0xFF,%al                 # Bits 0-7: saved_upcall_mask
        setz  %ch                       # %ch == !saved_upcall_mask
        movl  UREGS_eflags+8(%rsp),%eax
        andl  $~X86_EFLAGS_IF,%eax
        addb  %ch,%ch                   # Bit 9 (EFLAGS.IF)
        orb   %ch,%ah                   # Fold EFLAGS.IF into %eax
.Lft5:  movq  %rax,16(%rsi)             # RFLAGS
        movq  UREGS_rip+8(%rsp),%rax
.Lft6:  movq  %rax,(%rsi)               # RIP
        testb $TBF_EXCEPTION_ERRCODE,%cl
        jz    1f
        subq  $8,%rsi
        movl  TRAPBOUNCE_error_code(%rdx),%eax
.Lft7:  movq  %rax,(%rsi)               # ERROR CODE
1:      testb $TBF_FAILSAFE,%cl
UNLIKELY_START(nz, bounce_failsafe)
        subq  $32,%rsi
        movl  %gs,%eax
.Lft8:  movq  %rax,24(%rsi)             # GS
        movl  %fs,%eax
.Lft9:  movq  %rax,16(%rsi)             # FS
        movl  %es,%eax
.Lft10: movq  %rax,8(%rsi)              # ES
        movl  %ds,%eax
.Lft11: movq  %rax,(%rsi)               # DS
UNLIKELY_END(bounce_failsafe)
        subq  $16,%rsi
        movq  UREGS_r11+8(%rsp),%rax
.Lft12: movq  %rax,8(%rsi)              # R11
        movq  UREGS_rcx+8(%rsp),%rax
.Lft13: movq  %rax,(%rsi)               # RCX
        /* Rewrite our stack frame and return to guest-OS mode. */
        /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
        /* Also clear AC: alignment checks shouldn't trigger in kernel mode. */
        movl  $TRAP_syscall,UREGS_entry_vector+8(%rsp)
        andl  $~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|\
                 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
        movq  $FLAT_KERNEL_SS,UREGS_ss+8(%rsp)
        movq  %rsi,UREGS_rsp+8(%rsp)
        movq  $FLAT_KERNEL_CS,UREGS_cs+8(%rsp)
        movq  TRAPBOUNCE_eip(%rdx),%rax
        testq %rax,%rax
        jz    domain_crash_synchronous
        movq  %rax,UREGS_rip+8(%rsp)
        ret
        _ASM_EXTABLE(.Lft2,  domain_crash_synchronous)
        _ASM_EXTABLE(.Lft3,  domain_crash_synchronous)
        _ASM_EXTABLE(.Lft4,  domain_crash_synchronous)
        _ASM_EXTABLE(.Lft5,  domain_crash_synchronous)
        _ASM_EXTABLE(.Lft6,  domain_crash_synchronous)
        _ASM_EXTABLE(.Lft7,  domain_crash_synchronous)
        _ASM_EXTABLE(.Lft8,  domain_crash_synchronous)
        _ASM_EXTABLE(.Lft9,  domain_crash_synchronous)
        _ASM_EXTABLE(.Lft10, domain_crash_synchronous)
        _ASM_EXTABLE(.Lft11, domain_crash_synchronous)
        _ASM_EXTABLE(.Lft12, domain_crash_synchronous)
        _ASM_EXTABLE(.Lft13, domain_crash_synchronous)

domain_crash_synchronous_string:
        .asciz "domain_crash_sync called from entry.S\n"

ENTRY(domain_crash_synchronous)
        # Get out of the guest-save area of the stack.
        GET_CPUINFO_FIELD(CPUINFO_guest_cpu_user_regs,%rax)
        movq  %rax,%rsp
        # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
        GET_CURRENT(%rax)
        movq  VCPU_domain(%rax),%rax
        testb $1,DOMAIN_is_32bit_pv(%rax)
        setz  %al
        leal  (%rax,%rax,2),%eax
        orb   %al,UREGS_cs(%rsp)
        # printk(domain_crash_synchronous_string)
        leaq  domain_crash_synchronous_string(%rip),%rdi
        xorl  %eax,%eax
        call  printk
        jmp  __domain_crash_synchronous

/* No special register assumptions. */
ENTRY(ret_from_intr)
        GET_CURRENT(%rbx)
        testb $3,UREGS_cs(%rsp)
        jz    restore_all_xen
        movq  VCPU_domain(%rbx),%rax
        testb $1,DOMAIN_is_32bit_pv(%rax)
        jz    test_all_events
        jmp   compat_test_all_events

ENTRY(page_fault)
        movl  $TRAP_page_fault,4(%rsp)
/* No special register assumptions. */
	.globl handle_exception
handle_exception:
        SAVE_ALL
handle_exception_saved:
        testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
        jz    exception_with_ints_disabled
        sti
1:      movq  %rsp,%rdi
        movl  UREGS_entry_vector(%rsp),%eax
        leaq  exception_table(%rip),%rdx
        GET_CURRENT(%rbx)
        PERFC_INCR(exceptions, %rax, %rbx)
        callq *(%rdx,%rax,8)
        testb $3,UREGS_cs(%rsp)
        jz    restore_all_xen
        leaq  VCPU_trap_bounce(%rbx),%rdx
        movq  VCPU_domain(%rbx),%rax
        testb $1,DOMAIN_is_32bit_pv(%rax)
        jnz   compat_post_handle_exception
        testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
        jz    test_all_events
.Lbounce_exception:
        call  create_bounce_frame
        movb  $0,TRAPBOUNCE_flags(%rdx)
        jmp   test_all_events

/* No special register assumptions. */
exception_with_ints_disabled:
        testb $3,UREGS_cs(%rsp)         # interrupts disabled outside Xen?
        jnz   FATAL_exception_with_ints_disabled
        movq  %rsp,%rdi
        call  search_pre_exception_table
        testq %rax,%rax                 # no fixup code for faulting EIP?
        jz    1b
        movq  %rax,UREGS_rip(%rsp)
        subq  $8,UREGS_rsp(%rsp)        # add ec/ev to previous stack frame
        testb $15,UREGS_rsp(%rsp)       # return %rsp is now aligned?
        jz    1f                        # then there is a pad quadword already
        movq  %rsp,%rsi
        subq  $8,%rsp
        movq  %rsp,%rdi
        movq  $UREGS_kernel_sizeof/8,%rcx
        rep;  movsq                     # make room for ec/ev
1:      movq  UREGS_error_code(%rsp),%rax # ec/ev
        movq  %rax,UREGS_kernel_sizeof(%rsp)
        jmp   restore_all_xen           # return to fixup code

/* No special register assumptions. */
FATAL_exception_with_ints_disabled:
        movl  UREGS_entry_vector(%rsp),%edi
        movq  %rsp,%rsi
        call  fatal_trap
        ud2

ENTRY(divide_error)
        pushq $0
        movl  $TRAP_divide_error,4(%rsp)
        jmp   handle_exception

ENTRY(coprocessor_error)
        pushq $0
        movl  $TRAP_copro_error,4(%rsp)
        jmp   handle_exception

ENTRY(simd_coprocessor_error)
        pushq $0
        movl  $TRAP_simd_error,4(%rsp)
        jmp   handle_exception

ENTRY(device_not_available)
        pushq $0
        movl  $TRAP_no_device,4(%rsp)
        jmp   handle_exception

ENTRY(debug)
        pushq $0
        movl  $TRAP_debug,4(%rsp)
        jmp   handle_exception

ENTRY(int3)
        pushq $0
        movl  $TRAP_int3,4(%rsp)
        jmp   handle_exception

ENTRY(overflow)
        pushq $0
        movl  $TRAP_overflow,4(%rsp)
        jmp   handle_exception

ENTRY(bounds)
        pushq $0
        movl  $TRAP_bounds,4(%rsp)
        jmp   handle_exception

ENTRY(invalid_op)
        pushq $0
        movl  $TRAP_invalid_op,4(%rsp)
        jmp   handle_exception

ENTRY(coprocessor_segment_overrun)
        pushq $0
        movl  $TRAP_copro_seg,4(%rsp)
        jmp   handle_exception

ENTRY(invalid_TSS)
        movl  $TRAP_invalid_tss,4(%rsp)
        jmp   handle_exception

ENTRY(segment_not_present)
        movl  $TRAP_no_segment,4(%rsp)
        jmp   handle_exception

ENTRY(stack_segment)
        movl  $TRAP_stack_error,4(%rsp)
        jmp   handle_exception

ENTRY(general_protection)
        movl  $TRAP_gp_fault,4(%rsp)
        jmp   handle_exception

ENTRY(alignment_check)
        movl  $TRAP_alignment_check,4(%rsp)
        jmp   handle_exception

ENTRY(spurious_interrupt_bug)
        pushq $0
        movl  $TRAP_spurious_int,4(%rsp)
        jmp   handle_exception

ENTRY(double_fault)
        movl  $TRAP_double_fault,4(%rsp)
        SAVE_ALL
        movq  %rsp,%rdi
        call  do_double_fault
        ud2

        .pushsection .init.text, "ax", @progbits
ENTRY(early_page_fault)
        SAVE_ALL
        movq  %rsp,%rdi
        call  do_early_page_fault
        jmp   restore_all_xen
        .popsection

handle_ist_exception:
        SAVE_ALL
        testb $3,UREGS_cs(%rsp)
        jz    1f
        /* Interrupted guest context. Copy the context to stack bottom. */
        GET_CPUINFO_FIELD(CPUINFO_guest_cpu_user_regs,%rdi)
        movq  %rsp,%rsi
        movl  $UREGS_kernel_sizeof/8,%ecx
        movq  %rdi,%rsp
        rep   movsq
1:      movq  %rsp,%rdi
        movl  UREGS_entry_vector(%rsp),%eax
        leaq  exception_table(%rip),%rdx
        callq *(%rdx,%rax,8)
        jmp   ret_from_intr

ENTRY(nmi)
        pushq $0
        movl  $TRAP_nmi,4(%rsp)
        jmp   handle_ist_exception

ENTRY(machine_check)
        pushq $0
        movl  $TRAP_machine_check,4(%rsp)
        jmp   handle_ist_exception

.section .rodata, "a", @progbits

ENTRY(exception_table)
        .quad do_divide_error
        .quad do_debug
        .quad do_nmi
        .quad do_int3
        .quad do_overflow
        .quad do_bounds
        .quad do_invalid_op
        .quad do_device_not_available
        .quad 0 # double_fault
        .quad do_coprocessor_segment_overrun
        .quad do_invalid_TSS
        .quad do_segment_not_present
        .quad do_stack_segment
        .quad do_general_protection
        .quad do_page_fault
        .quad do_spurious_interrupt_bug
        .quad do_coprocessor_error
        .quad do_alignment_check
        .quad do_machine_check
        .quad do_simd_coprocessor_error

ENTRY(hypercall_table)
        .quad do_set_trap_table     /*  0 */
        .quad do_mmu_update
        .quad do_set_gdt
        .quad do_stack_switch
        .quad do_set_callbacks
        .quad do_fpu_taskswitch     /*  5 */
        .quad do_sched_op_compat
        .quad do_platform_op
        .quad do_set_debugreg
        .quad do_get_debugreg
        .quad do_update_descriptor  /* 10 */
        .quad do_ni_hypercall
        .quad do_memory_op
        .quad do_multicall
        .quad do_update_va_mapping
        .quad do_set_timer_op       /* 15 */
        .quad do_event_channel_op_compat
        .quad do_xen_version
        .quad do_console_io
        .quad do_physdev_op_compat
        .quad do_grant_table_op     /* 20 */
        .quad do_vm_assist
        .quad do_update_va_mapping_otherdomain
        .quad do_iret
        .quad do_vcpu_op
        .quad do_set_segment_base   /* 25 */
        .quad do_mmuext_op
        .quad do_xsm_op
        .quad do_nmi_op
        .quad do_sched_op
        .quad do_callback_op        /* 30 */
        .quad do_xenoprof_op
        .quad do_event_channel_op
        .quad do_physdev_op
        .quad do_hvm_op
        .quad do_sysctl             /* 35 */
        .quad do_domctl
        .quad do_kexec_op
        .quad do_tmem_op
        .rept __HYPERVISOR_arch_0-((.-hypercall_table)/8)
        .quad do_ni_hypercall
        .endr
        .quad do_mca                /* 48 */
        .rept NR_hypercalls-((.-hypercall_table)/8)
        .quad do_ni_hypercall
        .endr

ENTRY(hypercall_args_table)
        .byte 1 /* do_set_trap_table    */  /*  0 */
        .byte 4 /* do_mmu_update        */
        .byte 2 /* do_set_gdt           */
        .byte 2 /* do_stack_switch      */
        .byte 3 /* do_set_callbacks     */
        .byte 1 /* do_fpu_taskswitch    */  /*  5 */
        .byte 2 /* do_sched_op_compat   */
        .byte 1 /* do_platform_op       */
        .byte 2 /* do_set_debugreg      */
        .byte 1 /* do_get_debugreg      */
        .byte 2 /* do_update_descriptor */  /* 10 */
        .byte 0 /* do_ni_hypercall      */
        .byte 2 /* do_memory_op         */
        .byte 2 /* do_multicall         */
        .byte 3 /* do_update_va_mapping */
        .byte 1 /* do_set_timer_op      */  /* 15 */
        .byte 1 /* do_event_channel_op_compat */
        .byte 2 /* do_xen_version       */
        .byte 3 /* do_console_io        */
        .byte 1 /* do_physdev_op_compat */
        .byte 3 /* do_grant_table_op    */  /* 20 */
        .byte 2 /* do_vm_assist         */
        .byte 4 /* do_update_va_mapping_otherdomain */
        .byte 0 /* do_iret              */
        .byte 3 /* do_vcpu_op           */
        .byte 2 /* do_set_segment_base  */  /* 25 */
        .byte 4 /* do_mmuext_op         */
        .byte 1 /* do_xsm_op            */
        .byte 2 /* do_nmi_op            */
        .byte 2 /* do_sched_op          */
        .byte 2 /* do_callback_op       */  /* 30 */
        .byte 2 /* do_xenoprof_op       */
        .byte 2 /* do_event_channel_op  */
        .byte 2 /* do_physdev_op        */
        .byte 2 /* do_hvm_op            */
        .byte 1 /* do_sysctl            */  /* 35 */
        .byte 1 /* do_domctl            */
        .byte 2 /* do_kexec             */
        .byte 1 /* do_tmem_op           */
        .rept __HYPERVISOR_arch_0-(.-hypercall_args_table)
        .byte 0 /* do_ni_hypercall      */
        .endr
        .byte 1 /* do_mca               */  /* 48 */
        .rept NR_hypercalls-(.-hypercall_args_table)
        .byte 0 /* do_ni_hypercall      */
        .endr