aboutsummaryrefslogtreecommitdiffstats
path: root/Projects/TempDataLogger/Config/AppConfig.h
blob: d457081ae15341eb971443d48dcbdcffccbbb3a7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
/*
             LUFA Library
     Copyright (C) Dean Camera, 2013.

  dean [at] fourwalledcubicle [dot] com
           www.lufa-lib.org
*/

/*
  Copyright 2013  Dean Camera (dean [at] fourwalledcubicle [dot] com)

  Permission to use, copy, modify, distribute, and sell this
  software and its documentation for any purpose is hereby granted
  without fee, provided that the above copyright notice appear in
  all copies and that both that the copyright notice and this
  permission notice and warranty disclaimer appear in supporting
  documentation, and that the name of the author not be used in
  advertising or publicity pertaining to distribution of the
  software without specific, written prior permission.

  The author disclaims all warranties with regard to this
  software, including all implied warranties of merchantability
  and fitness.  In no event shall the author be liable for any
  special, indirect or consequential damages or any damages
  whatsoever resulting from loss of use, data or profits, whether
  in an action of contract, negligence or other tortious action,
  arising out of or in connection with the use or performance of
  this software.
*/

/** \file
 *  \brief Application Configuration Header File
 *
 *  This is a header file which is be used to configure some of
 *  the application's compile time options, as an alternative to
 *  specifying the compile time constants supplied through a 
 *  makefile or build system.
 *
 *  For information on what each token does, refer to the 
 *  \ref Sec_Options section of the application documentation.
 */

#ifndef _APP_CONFIG_H_
#define _APP_CONFIG_H_

//	#define DUMMY_RTC

#endif
d='n288' href='#n288'>288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
/*
 * vmx_platform.c: handling x86 platform related MMIO instructions
 * Copyright (c) 2004, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 */

#include <xen/config.h>
#include <xen/types.h>
#include <xen/mm.h>
#include <asm/shadow.h>
#include <xen/domain_page.h>
#include <asm/page.h>
#include <xen/event.h>
#include <xen/trace.h>
#include <asm/vmx.h>
#include <asm/vmx_platform.h>
#include <public/io/ioreq.h>

#include <xen/lib.h>
#include <xen/sched.h>
#include <asm/current.h>
#if CONFIG_PAGING_LEVELS >= 3
#include <asm/shadow_64.h>
#endif
#ifdef CONFIG_VMX

#define DECODE_success  1
#define DECODE_failure  0

#if defined (__x86_64__)
void store_cpu_user_regs(struct cpu_user_regs *regs)
{
    __vmread(GUEST_SS_SELECTOR, &regs->ss);
    __vmread(GUEST_RSP, &regs->rsp);
    __vmread(GUEST_RFLAGS, &regs->rflags);
    __vmread(GUEST_CS_SELECTOR, &regs->cs);
    __vmread(GUEST_DS_SELECTOR, &regs->ds);
    __vmread(GUEST_ES_SELECTOR, &regs->es);
    __vmread(GUEST_RIP, &regs->rip);
}

static inline long __get_reg_value(unsigned long reg, int size)
{
    switch(size) {
    case BYTE_64:
        return (char)(reg & 0xFF);
    case WORD:
        return (short)(reg & 0xFFFF);
    case LONG:
        return (int)(reg & 0xFFFFFFFF);
    case QUAD:
        return (long)(reg);
    default:
        printf("Error: (__get_reg_value) Invalid reg size\n");
        domain_crash_synchronous();
    }
}

long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
{
    if (size == BYTE) {
        switch (index) {
        case 0: /* %al */
            return (char)(regs->rax & 0xFF);
        case 1: /* %cl */
            return (char)(regs->rcx & 0xFF);
        case 2: /* %dl */
            return (char)(regs->rdx & 0xFF);
        case 3: /* %bl */
            return (char)(regs->rbx & 0xFF);
        case 4: /* %ah */
            return (char)((regs->rax & 0xFF00) >> 8);
        case 5: /* %ch */
            return (char)((regs->rcx & 0xFF00) >> 8);
        case 6: /* %dh */
            return (char)((regs->rdx & 0xFF00) >> 8);
        case 7: /* %bh */
            return (char)((regs->rbx & 0xFF00) >> 8);
        default:
            printf("Error: (get_reg_value) Invalid index value\n");
            domain_crash_synchronous();
        }
        /* NOTREACHED */
    }

    switch (index) {
    case 0: return __get_reg_value(regs->rax, size);
    case 1: return __get_reg_value(regs->rcx, size);
    case 2: return __get_reg_value(regs->rdx, size);
    case 3: return __get_reg_value(regs->rbx, size);
    case 4: return __get_reg_value(regs->rsp, size);
    case 5: return __get_reg_value(regs->rbp, size);
    case 6: return __get_reg_value(regs->rsi, size);
    case 7: return __get_reg_value(regs->rdi, size);
    case 8: return __get_reg_value(regs->r8, size);
    case 9: return __get_reg_value(regs->r9, size);
    case 10: return __get_reg_value(regs->r10, size);
    case 11: return __get_reg_value(regs->r11, size);
    case 12: return __get_reg_value(regs->r12, size);
    case 13: return __get_reg_value(regs->r13, size);
    case 14: return __get_reg_value(regs->r14, size);
    case 15: return __get_reg_value(regs->r15, size);
    default:
        printf("Error: (get_reg_value) Invalid index value\n");
        domain_crash_synchronous();
    }
}
#elif defined (__i386__)
void store_cpu_user_regs(struct cpu_user_regs *regs)
{
    __vmread(GUEST_SS_SELECTOR, &regs->ss);
    __vmread(GUEST_RSP, &regs->esp);
    __vmread(GUEST_RFLAGS, &regs->eflags);
    __vmread(GUEST_CS_SELECTOR, &regs->cs);
    __vmread(GUEST_DS_SELECTOR, &regs->ds);
    __vmread(GUEST_ES_SELECTOR, &regs->es);
    __vmread(GUEST_RIP, &regs->eip);
}

static inline long __get_reg_value(unsigned long reg, int size)
{
    switch(size) {
    case WORD:
        return (short)(reg & 0xFFFF);
    case LONG:
        return (int)(reg & 0xFFFFFFFF);
    default:
        printf("Error: (__get_reg_value) Invalid reg size\n");
        domain_crash_synchronous();
    }
}

long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
{
    if (size == BYTE) {
        switch (index) {
        case 0: /* %al */
            return (char)(regs->eax & 0xFF);
        case 1: /* %cl */
            return (char)(regs->ecx & 0xFF);
        case 2: /* %dl */
            return (char)(regs->edx & 0xFF);
        case 3: /* %bl */
            return (char)(regs->ebx & 0xFF);
        case 4: /* %ah */
            return (char)((regs->eax & 0xFF00) >> 8);
        case 5: /* %ch */
            return (char)((regs->ecx & 0xFF00) >> 8);
        case 6: /* %dh */
            return (char)((regs->edx & 0xFF00) >> 8);
        case 7: /* %bh */
            return (char)((regs->ebx & 0xFF00) >> 8);
        default:
            printf("Error: (get_reg_value) Invalid index value\n");
            domain_crash_synchronous();
        }
    }

    switch (index) {
    case 0: return __get_reg_value(regs->eax, size);
    case 1: return __get_reg_value(regs->ecx, size);
    case 2: return __get_reg_value(regs->edx, size);
    case 3: return __get_reg_value(regs->ebx, size);
    case 4: return __get_reg_value(regs->esp, size);
    case 5: return __get_reg_value(regs->ebp, size);
    case 6: return __get_reg_value(regs->esi, size);
    case 7: return __get_reg_value(regs->edi, size);
    default:
        printf("Error: (get_reg_value) Invalid index value\n");
        domain_crash_synchronous();
    }
}
#endif

static inline unsigned char *check_prefix(unsigned char *inst,
                                          struct instruction *thread_inst, unsigned char *rex_p)
{
    while (1) {
        switch (*inst) {
            /* rex prefix for em64t instructions */
        case 0x40 ... 0x4e:
            *rex_p = *inst;
            break;
        case 0xf3: /* REPZ */
            thread_inst->flags = REPZ;
            break;
        case 0xf2: /* REPNZ */
            thread_inst->flags = REPNZ;
            break;
        case 0xf0: /* LOCK */
            break;
        case 0x2e: /* CS */
        case 0x36: /* SS */
        case 0x3e: /* DS */
        case 0x26: /* ES */
        case 0x64: /* FS */
        case 0x65: /* GS */
            thread_inst->seg_sel = *inst;
            break;
        case 0x66: /* 32bit->16bit */
            thread_inst->op_size = WORD;
            break;
        case 0x67:
            printf("Error: Not handling 0x67 (yet)\n");
            domain_crash_synchronous();
            break;
        default:
            return inst;
        }
        inst++;
    }
}

static inline unsigned long get_immediate(int op16,const unsigned char *inst, int op_size)
{
    int mod, reg, rm;
    unsigned long val = 0;
    int i;

    mod = (*inst >> 6) & 3;
    reg = (*inst >> 3) & 7;
    rm = *inst & 7;

    inst++; //skip ModR/M byte
    if (mod != 3 && rm == 4) {
        inst++; //skip SIB byte
    }

    switch(mod) {
    case 0:
        if (rm == 5 || rm == 4) {
            if (op16)
                inst = inst + 2; //disp16, skip 2 bytes
            else
                inst = inst + 4; //disp32, skip 4 bytes
        }
        break;
    case 1:
        inst++; //disp8, skip 1 byte
        break;
    case 2:
        if (op16)
            inst = inst + 2; //disp16, skip 2 bytes
        else
            inst = inst + 4; //disp32, skip 4 bytes
        break;
    }

    if (op_size == QUAD)
        op_size = LONG;

    for (i = 0; i < op_size; i++) {
        val |= (*inst++ & 0xff) << (8 * i);
    }

    return val;
}

static inline int get_index(const unsigned char *inst, unsigned char rex)
{
    int mod, reg, rm;
    int rex_r, rex_b;

    mod = (*inst >> 6) & 3;
    reg = (*inst >> 3) & 7;
    rm = *inst & 7;

    rex_r = (rex >> 2) & 1;
    rex_b = rex & 1;

    //Only one operand in the instruction is register
    if (mod == 3) {
        return (rm + (rex_b << 3));
    } else {
        return (reg + (rex_r << 3));
    }
    return 0;
}

static void init_instruction(struct instruction *mmio_inst)
{
    mmio_inst->instr = 0;
    mmio_inst->op_size = 0;
    mmio_inst->immediate = 0;
    mmio_inst->seg_sel = 0;

    mmio_inst->operand[0] = 0;
    mmio_inst->operand[1] = 0;

    mmio_inst->flags = 0;
}

#define GET_OP_SIZE_FOR_BYTE(op_size)   \
    do {    \
     if (rex)   \
     op_size = BYTE_64;  \
 else    \
     op_size = BYTE;  \
    } while(0)

#define GET_OP_SIZE_FOR_NONEBYTE(op_size)   \
    do {    \
     if (rex & 0x8)   \
     op_size = QUAD;  \
 else if (op_size != WORD) \
     op_size = LONG;  \
    } while(0)


/*
 * Decode mem,accumulator operands (as in <opcode> m8/m16/m32, al,ax,eax)
 */
static int mem_acc(unsigned char size, struct instruction *instr)
{
    instr->operand[0] = mk_operand(size, 0, 0, MEMORY);
    instr->operand[1] = mk_operand(size, 0, 0, REGISTER);
    return DECODE_success;
}

/*
 * Decode accumulator,mem operands (as in <opcode> al,ax,eax, m8/m16/m32)
 */
static int acc_mem(unsigned char size, struct instruction *instr)
{
    instr->operand[0] = mk_operand(size, 0, 0, REGISTER);
    instr->operand[1] = mk_operand(size, 0, 0, MEMORY);
    return DECODE_success;
}

/*
 * Decode mem,reg operands (as in <opcode> r32/16, m32/16)
 */
static int mem_reg(unsigned char size, unsigned char *opcode,
                   struct instruction *instr, unsigned char rex)
{
    int index = get_index(opcode + 1, rex);

    instr->operand[0] = mk_operand(size, 0, 0, MEMORY);
    instr->operand[1] = mk_operand(size, index, 0, REGISTER);
    return DECODE_success;
}

/*
 * Decode reg,mem operands (as in <opcode> m32/16, r32/16)
 */
static int reg_mem(unsigned char size, unsigned char *opcode,
                   struct instruction *instr, unsigned char rex)
{
    int index = get_index(opcode + 1, rex);

    instr->operand[0] = mk_operand(size, index, 0, REGISTER);
    instr->operand[1] = mk_operand(size, 0, 0, MEMORY);
    return DECODE_success;
}

static int vmx_decode(unsigned char *opcode, struct instruction *instr)
{
    unsigned long eflags;
    int index, vm86 = 0;
    unsigned char rex = 0;
    unsigned char tmp_size = 0;

    init_instruction(instr);

    opcode = check_prefix(opcode, instr, &rex);

    __vmread(GUEST_RFLAGS, &eflags);
    if (eflags & X86_EFLAGS_VM)
        vm86 = 1;

    if (vm86) { /* meaning is reversed */
        if (instr->op_size == WORD)
            instr->op_size = LONG;
        else if (instr->op_size == LONG)
            instr->op_size = WORD;
        else if (instr->op_size == 0)
            instr->op_size = WORD;
    }

    switch (*opcode) {
    case 0x0B: /* or m32/16, r32/16 */
        instr->instr = INSTR_OR;
        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
        return mem_reg(instr->op_size, opcode, instr, rex);

    case 0x20: /* and r8, m8 */
        instr->instr = INSTR_AND;
        GET_OP_SIZE_FOR_BYTE(instr->op_size);
        return reg_mem(instr->op_size, opcode, instr, rex);

    case 0x21: /* and r32/16, m32/16 */
        instr->instr = INSTR_AND;
        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
        return reg_mem(instr->op_size, opcode, instr, rex);

    case 0x23: /* and m32/16, r32/16 */
        instr->instr = INSTR_AND;
        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
        return mem_reg(instr->op_size, opcode, instr, rex);

    case 0x30: /* xor r8, m8 */
        instr->instr = INSTR_XOR;
        GET_OP_SIZE_FOR_BYTE(instr->op_size);
        return reg_mem(instr->op_size, opcode, instr, rex);

    case 0x31: /* xor r32/16, m32/16 */
        instr->instr = INSTR_XOR;
        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
        return reg_mem(instr->op_size, opcode, instr, rex);

    case 0x39: /* cmp r32/16, m32/16 */
        instr->instr = INSTR_CMP;
        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
        return reg_mem(instr->op_size, opcode, instr, rex);

    case 0x80:
    case 0x81:
        if (((opcode[1] >> 3) & 7) == 7) { /* cmp $imm, m32/16 */
            instr->instr = INSTR_CMP;

            if (opcode[0] == 0x80)
                GET_OP_SIZE_FOR_BYTE(instr->op_size);
            else
                GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);

            instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
            instr->immediate = get_immediate(vm86, opcode+1, BYTE);
            instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);

            return DECODE_success;
        } else
            return DECODE_failure;

    case 0x84:  /* test m8, r8 */
        instr->instr = INSTR_TEST;
        instr->op_size = BYTE;
        GET_OP_SIZE_FOR_BYTE(tmp_size);
        return mem_reg(tmp_size, opcode, instr, rex);

    case 0x88: /* mov r8, m8 */
        instr->instr = INSTR_MOV;
        instr->op_size = BYTE;
        GET_OP_SIZE_FOR_BYTE(tmp_size);
        return reg_mem(tmp_size, opcode, instr, rex);

    case 0x89: /* mov r32/16, m32/16 */
        instr->instr = INSTR_MOV;
        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
        return reg_mem(instr->op_size, opcode, instr, rex);

    case 0x8A: /* mov m8, r8 */
        instr->instr = INSTR_MOV;
        instr->op_size = BYTE;
        GET_OP_SIZE_FOR_BYTE(tmp_size);
        return mem_reg(tmp_size, opcode, instr, rex);

    case 0x8B: /* mov m32/16, r32/16 */
        instr->instr = INSTR_MOV;
        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
        return mem_reg(instr->op_size, opcode, instr, rex);

    case 0xA0: /* mov <addr>, al */
        instr->instr = INSTR_MOV;
        instr->op_size = BYTE;
        GET_OP_SIZE_FOR_BYTE(tmp_size);
        return mem_acc(tmp_size, instr);

    case 0xA1: /* mov <addr>, ax/eax */
        instr->instr = INSTR_MOV;
        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
        return mem_acc(instr->op_size, instr);

    case 0xA2: /* mov al, <addr> */
        instr->instr = INSTR_MOV;
        instr->op_size = BYTE;
        GET_OP_SIZE_FOR_BYTE(tmp_size);
        return acc_mem(tmp_size, instr);

    case 0xA3: /* mov ax/eax, <addr> */
        instr->instr = INSTR_MOV;
        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
        return acc_mem(instr->op_size, instr);

    case 0xA4: /* movsb */
        instr->instr = INSTR_MOVS;
        instr->op_size = BYTE;
        return DECODE_success;

    case 0xA5: /* movsw/movsl */
        instr->instr = INSTR_MOVS;
        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
        return DECODE_success;

    case 0xAA: /* stosb */
        instr->instr = INSTR_STOS;
        instr->op_size = BYTE;
        return DECODE_success;

    case 0xAB: /* stosw/stosl */
        instr->instr = INSTR_STOS;
        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
        return DECODE_success;

    case 0xC6:
        if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm8, m8 */
            instr->instr = INSTR_MOV;
            instr->op_size = BYTE;

            instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
            instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
            instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);

            return DECODE_success;
        } else
            return DECODE_failure;

    case 0xC7:
        if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm16/32, m16/32 */
            instr->instr = INSTR_MOV;
            GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);

            instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
            instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
            instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);

            return DECODE_success;
        } else
            return DECODE_failure;

    case 0xF6:
        if (((opcode[1] >> 3) & 7) == 0) { /* testb $imm8, m8 */
            instr->instr = INSTR_TEST;
            instr->op_size = BYTE;

            instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
            instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
            instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);

            return DECODE_success;
        } else
            return DECODE_failure;

    case 0x0F:
        break;

    default:
        printf("%x, This opcode isn't handled yet!\n", *opcode);
        return DECODE_failure;
    }

    switch (*++opcode) {
    case 0xB6: /* movz m8, r16/r32 */
        instr->instr = INSTR_MOVZ;
        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
        index = get_index(opcode + 1, rex);
        instr->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
        instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
        return DECODE_success;

    case 0xB7: /* movz m16, r32 */
        instr->instr = INSTR_MOVZ;
        index = get_index(opcode + 1, rex);
        if (rex & 0x8) {
            instr->op_size = LONG;
            instr->operand[1] = mk_operand(QUAD, index, 0, REGISTER);
        } else {
            instr->op_size = WORD;
            instr->operand[1] = mk_operand(LONG, index, 0, REGISTER);
        }
        instr->operand[0] = mk_operand(instr->op_size, 0, 0, MEMORY);
        return DECODE_success;

    default:
        printf("0f %x, This opcode isn't handled yet\n", *opcode);
        return DECODE_failure;
    }
}

int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_len)
{
    if (inst_len > MAX_INST_LEN || inst_len <= 0)
        return 0;
    if (!vmx_copy(buf, guest_eip, inst_len, VMX_COPY_IN))
        return 0;
    return inst_len;
}

void send_mmio_req(unsigned char type, unsigned long gpa,
                   unsigned long count, int size, long value, int dir, int pvalid)
{
    struct vcpu *v = current;
    vcpu_iodata_t *vio;
    ioreq_t *p;
    int vm86;
    struct cpu_user_regs *regs;
    extern long evtchn_send(int lport);

    regs = current->arch.arch_vmx.mmio_op.inst_decoder_regs;

    vio = get_vio(v->domain, v->vcpu_id);
    if (vio == NULL) {
        printf("bad shared page\n");
        domain_crash_synchronous();
    }

    p = &vio->vp_ioreq;

    vm86 = regs->eflags & X86_EFLAGS_VM;

    if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
        printf("VMX I/O has not yet completed\n");
        domain_crash_synchronous();
    }

    set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
    p->dir = dir;
    p->pdata_valid = pvalid;

    p->type = type;
    p->size = size;
    p->addr = gpa;
    p->count = count;
    p->df = regs->eflags & EF_DF ? 1 : 0;

    if (pvalid) {
        if (vmx_paging_enabled(current))
            p->u.pdata = (void *) gva_to_gpa(value);
        else
            p->u.pdata = (void *) value; /* guest VA == guest PA */
    } else
        p->u.data = value;

    p->state = STATE_IOREQ_READY;

    if (vmx_mmio_intercept(p)){
        p->state = STATE_IORESP_READY;
        vmx_io_assist(v);
        return;
    }

    evtchn_send(iopacket_port(v->domain));
    vmx_wait_io();
}

static void mmio_operands(int type, unsigned long gpa, struct instruction *inst,
                          struct mmio_op *mmio_opp, struct cpu_user_regs *regs)
{
    unsigned long value = 0;
    int index, size;

    size = operand_size(inst->operand[0]);

    mmio_opp->flags = inst->flags;
    mmio_opp->instr = inst->instr;
    mmio_opp->operand[0] = inst->operand[0]; /* source */
    mmio_opp->operand[1] = inst->operand[1]; /* destination */
    mmio_opp->immediate = inst->immediate;

    if (inst->operand[0] & REGISTER) { /* dest is memory */
        index = operand_index(inst->operand[0]);
        value = get_reg_value(size, index, 0, regs);
        send_mmio_req(type, gpa, 1, inst->op_size, value, IOREQ_WRITE, 0);
    } else if (inst->operand[0] & IMMEDIATE) { /* dest is memory */
        value = inst->immediate;
        send_mmio_req(type, gpa, 1, inst->op_size, value, IOREQ_WRITE, 0);
    } else if (inst->operand[0] & MEMORY) { /* dest is register */
        /* send the request and wait for the value */
        send_mmio_req(type, gpa, 1, inst->op_size, 0, IOREQ_READ, 0);
    } else {
        printf("mmio_operands: invalid operand\n");
        domain_crash_synchronous();
    }
}

#define GET_REPEAT_COUNT() \
     (mmio_inst.flags & REPZ ? (vm86 ? regs->ecx & 0xFFFF : regs->ecx) : 1)

void handle_mmio(unsigned long va, unsigned long gpa)
{
    unsigned long eip, eflags, cs;
    unsigned long inst_len, inst_addr;
    struct mmio_op *mmio_opp;
    struct cpu_user_regs *regs;
    struct instruction mmio_inst;
    unsigned char inst[MAX_INST_LEN];
    int i, vm86, ret;

    mmio_opp = &current->arch.arch_vmx.mmio_op;
    regs = mmio_opp->inst_decoder_regs;

    __vmread(GUEST_RIP, &eip);
    __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
    __vmread(GUEST_RFLAGS, &eflags);
    vm86 = eflags & X86_EFLAGS_VM;

    if (vm86) {
        __vmread(GUEST_CS_SELECTOR, &cs);
        inst_addr = (cs << 4) + eip;
    } else
        inst_addr = eip;

    memset(inst, 0, MAX_INST_LEN);
    ret = inst_copy_from_guest(inst, inst_addr, inst_len);
    if (ret != inst_len) {
        printf("handle_mmio - EXIT: get guest instruction fault\n");
        domain_crash_synchronous();
    }

    init_instruction(&mmio_inst);

    if (vmx_decode(inst, &mmio_inst) == DECODE_failure) {
        printf("mmio opcode: va 0x%lx, gpa 0x%lx, len %ld:",
               va, gpa, inst_len);
        for (i = 0; i < inst_len; i++)
            printf(" %02x", inst[i] & 0xFF);
        printf("\n");
        domain_crash_synchronous();
    }

    store_cpu_user_regs(regs);
    regs->eip += inst_len; /* advance %eip */

    switch (mmio_inst.instr) {
    case INSTR_MOV:
        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mmio_opp, regs);
        break;

    case INSTR_MOVS:
    {
        unsigned long count = GET_REPEAT_COUNT();
        unsigned long size = mmio_inst.op_size;
        int sign = regs->eflags & EF_DF ? -1 : 1;
        unsigned long addr = 0;
        int dir;

        /* determine non-MMIO address */
        if (vm86) {
            unsigned long seg;

            __vmread(GUEST_ES_SELECTOR, &seg);
            if (((seg << 4) + (regs->edi & 0xFFFF)) == va) {
                dir = IOREQ_WRITE;
                __vmread(GUEST_DS_SELECTOR, &seg);
                addr = (seg << 4) + (regs->esi & 0xFFFF);
            } else {
                dir = IOREQ_READ;
                addr = (seg << 4) + (regs->edi & 0xFFFF);
            }
        } else {
            if (va == regs->edi) {
                dir = IOREQ_WRITE;
                addr = regs->esi;
            } else {
                dir = IOREQ_READ;
                addr = regs->edi;
            }
        }

        mmio_opp->flags = mmio_inst.flags;
        mmio_opp->instr = mmio_inst.instr;

        /*
         * In case of a movs spanning multiple pages, we break the accesses
         * up into multiple pages (the device model works with non-continguous
         * physical guest pages). To copy just one page, we adjust %ecx and
         * do not advance %eip so that the next "rep movs" copies the next page.
         * Unaligned accesses, for example movsl starting at PGSZ-2, are
         * turned into a single copy where we handle the overlapping memory
         * copy ourself. After this copy succeeds, "rep movs" is executed
         * again.
         */
        if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
            unsigned long value = 0;

            mmio_opp->flags |= OVERLAP;

            regs->eip -= inst_len; /* do not advance %eip */

            if (dir == IOREQ_WRITE)
                vmx_copy(&value, addr, size, VMX_COPY_IN);
            send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, 0);
        } else {
            if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) {
                regs->eip -= inst_len; /* do not advance %eip */

                if (sign > 0)
                    count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
                else
                    count = (addr & ~PAGE_MASK) / size;
            }

            send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, 1);
        }
        break;
    }

    case INSTR_MOVZ:
        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mmio_opp, regs);
        break;

    case INSTR_STOS:
        /*
         * Since the destination is always in (contiguous) mmio space we don't
         * need to break it up into pages.
         */
        mmio_opp->flags = mmio_inst.flags;
        mmio_opp->instr = mmio_inst.instr;
        send_mmio_req(IOREQ_TYPE_COPY, gpa,
                      GET_REPEAT_COUNT(), mmio_inst.op_size, regs->eax, IOREQ_WRITE, 0);
        break;

    case INSTR_OR:
        mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mmio_opp, regs);
        break;

    case INSTR_AND:
        mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mmio_opp, regs);
        break;

    case INSTR_XOR:
        mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mmio_opp, regs);
        break;

    case INSTR_CMP:        /* Pass through */
    case INSTR_TEST:
        mmio_opp->flags = mmio_inst.flags;
        mmio_opp->instr = mmio_inst.instr;
        mmio_opp->operand[0] = mmio_inst.operand[0]; /* source */
        mmio_opp->operand[1] = mmio_inst.operand[1]; /* destination */
        mmio_opp->immediate = mmio_inst.immediate;

        /* send the request and wait for the value */
        send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, mmio_inst.op_size, 0, IOREQ_READ, 0);
        break;

    default:
        printf("Unhandled MMIO instruction\n");
        domain_crash_synchronous();
    }
}

#endif /* CONFIG_VMX */

/*
 * Local variables:
 * mode: C
 * c-set-style: "BSD"
 * c-basic-offset: 4
 * tab-width: 4
 * indent-tabs-mode: nil
 * End:
 */