1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
|
/*
* vcpu.h: HVM per vcpu definitions
*
* Copyright (c) 2005, International Business Machines Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
#ifndef __ASM_X86_HVM_VCPU_H__
#define __ASM_X86_HVM_VCPU_H__
#include <xen/tasklet.h>
#include <asm/hvm/io.h>
#include <asm/hvm/vlapic.h>
#include <asm/hvm/vmx/vmcs.h>
#include <asm/hvm/svm/vmcb.h>
#include <asm/mtrr.h>
enum hvm_io_state {
HVMIO_none = 0,
HVMIO_dispatched,
HVMIO_awaiting_completion,
HVMIO_handle_mmio_awaiting_completion,
HVMIO_handle_pio_awaiting_completion,
HVMIO_completed
};
struct hvm_vcpu {
/* Guest control-register and EFER values, just as the guest sees them. */
unsigned long guest_cr[5];
unsigned long guest_efer;
/*
* Processor-visible control-register values, while guest executes.
* CR0, CR4: Used as a cache of VMCS contents by VMX only.
* CR1, CR2: Never used (guest_cr[2] is always processor-visible CR2).
* CR3: Always used and kept up to date by paging subsystem.
*/
unsigned long hw_cr[5];
/*
* The save area for Processor Extended States and the bitmask of the
* XSAVE/XRSTOR features. They are used by: 1) when a vcpu (which has
* dirtied FPU/SSE) is scheduled out we XSAVE the states here; 2) in
* #NM handler, we XRSTOR the states we XSAVE-ed;
*/
void *xsave_area;
uint64_t xcr0;
struct vlapic vlapic;
s64 cache_tsc_offset;
u64 guest_time;
/* Lock and list for virtual platform timers. */
spinlock_t tm_lock;
struct list_head tm_list;
int xen_port;
bool_t flag_dr_dirty;
bool_t debug_state_latch;
bool_t single_step;
u64 asid_generation;
u32 asid;
u32 msr_tsc_aux;
/* VPMU */
struct vpmu_struct vpmu;
union {
struct arch_vmx_struct vmx;
struct arch_svm_struct svm;
} u;
struct tasklet assert_evtchn_irq_tasklet;
struct mtrr_state mtrr;
u64 pat_cr;
/* In mode delay_for_missed_ticks, VCPUs have differing guest times. */
int64_t stime_offset;
/* Which cache mode is this VCPU in (CR0:CD/NW)? */
u8 cache_mode;
/* I/O request in flight to device model. */
enum hvm_io_state io_state;
unsigned long io_data;
int io_size;
/*
* HVM emulation:
* Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
* The latter is known to be an MMIO frame (not RAM).
* This translation is only valid if @mmio_gva is non-zero.
*/
unsigned long mmio_gva;
unsigned long mmio_gpfn;
/* Callback into x86_emulate when emulating FPU/MMX/XMM instructions. */
void (*fpu_exception_callback)(void *, struct cpu_user_regs *);
void *fpu_exception_callback_arg;
/* We may read up to m128 as a number of device-model transactions. */
paddr_t mmio_large_read_pa;
uint8_t mmio_large_read[16];
unsigned int mmio_large_read_bytes;
/* We may write up to m128 as a number of device-model transactions. */
paddr_t mmio_large_write_pa;
unsigned int mmio_large_write_bytes;
};
#endif /* __ASM_X86_HVM_VCPU_H__ */
|