aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/asm-x86/desc.h
blob: 354b8897ee89519a0e8b7df125f98bcbb5b7332a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
#ifndef __ARCH_DESC_H
#define __ARCH_DESC_H

/*
 * Xen reserves a memory page of GDT entries.
 * No guest GDT entries exist beyond the Xen reserved area.
 */
#define NR_RESERVED_GDT_PAGES   1
#define NR_RESERVED_GDT_BYTES   (NR_RESERVED_GDT_PAGES * PAGE_SIZE)
#define NR_RESERVED_GDT_ENTRIES (NR_RESERVED_GDT_BYTES / 8)

#define LAST_RESERVED_GDT_PAGE  \
    (FIRST_RESERVED_GDT_PAGE + NR_RESERVED_GDT_PAGES - 1)
#define LAST_RESERVED_GDT_BYTE  \
    (FIRST_RESERVED_GDT_BYTE + NR_RESERVED_GDT_BYTES - 1)
#define LAST_RESERVED_GDT_ENTRY \
    (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)

#define LDT_ENTRY_SIZE 8

#define FLAT_COMPAT_RING1_CS 0xe019  /* GDT index 259 */
#define FLAT_COMPAT_RING1_DS 0xe021  /* GDT index 260 */
#define FLAT_COMPAT_RING1_SS 0xe021  /* GDT index 260 */
#define FLAT_COMPAT_RING3_CS 0xe02b  /* GDT index 261 */
#define FLAT_COMPAT_RING3_DS 0xe033  /* GDT index 262 */
#define FLAT_COMPAT_RING3_SS 0xe033  /* GDT index 262 */

#define FLAT_COMPAT_KERNEL_DS FLAT_COMPAT_RING1_DS
#define FLAT_COMPAT_KERNEL_CS FLAT_COMPAT_RING1_CS
#define FLAT_COMPAT_KERNEL_SS FLAT_COMPAT_RING1_SS
#define FLAT_COMPAT_USER_DS   FLAT_COMPAT_RING3_DS
#define FLAT_COMPAT_USER_CS   FLAT_COMPAT_RING3_CS
#define FLAT_COMPAT_USER_SS   FLAT_COMPAT_RING3_SS

#define TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8)
#define LDT_ENTRY (TSS_ENTRY + 2)
#define PER_CPU_GDT_ENTRY (LDT_ENTRY + 2)

#ifndef __ASSEMBLY__

#define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)

/* Fix up the RPL of a guest segment selector. */
#define __fixup_guest_selector(d, sel)                             \
({                                                                 \
    uint16_t _rpl = GUEST_KERNEL_RPL(d);                           \
    (sel) = (((sel) & 3) >= _rpl) ? (sel) : (((sel) & ~3) | _rpl); \
})

/* Stack selectors don't need fixing up if the kernel runs in ring 0. */
#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
#define fixup_guest_stack_selector(d, ss) ((void)0)
#else
#define fixup_guest_stack_selector(d, ss) __fixup_guest_selector(d, ss)
#endif

/*
 * Code selectors are always fixed up. It allows the Xen exit stub to detect
 * return to guest context, even when the guest kernel runs in ring 0.
 */
#define fixup_guest_code_selector(d, cs)  __fixup_guest_selector(d, cs)

/*
 * We need this function because enforcing the correct guest kernel RPL is
 * unsufficient if the selector is poked into an interrupt, trap or call gate.
 * The selector RPL is ignored when a gate is accessed. We must therefore make
 * sure that the selector does not reference a Xen-private segment.
 * 
 * Note that selectors used only by IRET do not need to be checked. If the
 * descriptor DPL fiffers from CS RPL then we'll #GP.
 * 
 * Stack and data selectors do not need to be checked. If DS, ES, FS, GS are
 * DPL < CPL then they'll be cleared automatically. If SS RPL or DPL differs
 * from CS RPL then we'll #GP.
 */
#define guest_gate_selector_okay(d, sel)                                \
    ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */        \
     ((sel) == (!is_pv_32on64_domain(d) ?                               \
                FLAT_KERNEL_CS :                /* Xen default seg? */  \
                FLAT_COMPAT_KERNEL_CS)) ||                              \
     ((sel) & 4))                               /* LDT seg? */

#endif /* __ASSEMBLY__ */

/* These are bitmasks for the high 32 bits of a descriptor table entry. */
#define _SEGMENT_TYPE    (15<< 8)
#define _SEGMENT_WR      ( 1<< 9) /* Writeable (data) or Readable (code)
                                     segment */
#define _SEGMENT_EC      ( 1<<10) /* Expand-down or Conforming segment */
#define _SEGMENT_CODE    ( 1<<11) /* Code (vs data) segment for non-system
                                     segments */
#define _SEGMENT_S       ( 1<<12) /* System descriptor (yes iff S==0) */
#define _SEGMENT_DPL     ( 3<<13) /* Descriptor Privilege Level */
#define _SEGMENT_P       ( 1<<15) /* Segment Present */
#define _SEGMENT_L       ( 1<<21) /* 64-bit segment */
#define _SEGMENT_DB      ( 1<<22) /* 16- or 32-bit segment */
#define _SEGMENT_G       ( 1<<23) /* Granularity */

#ifndef __ASSEMBLY__

struct desc_struct {
    u32 a, b;
};

typedef struct {
    u64 a, b;
} idt_entry_t;

/* Write the lower 64 bits of an IDT Entry. This relies on the upper 32
 * bits of the address not changing, which is a safe assumption as all
 * functions we are likely to load will live inside the 1GB
 * code/data/bss address range.
 *
 * Ideally, we would use cmpxchg16b, but this is not supported on some
 * old AMD 64bit capable processors, and has no safe equivalent.
 */
static inline void _write_gate_lower(volatile idt_entry_t *gate,
                                     const idt_entry_t *new)
{
    ASSERT(gate->b == new->b);
    gate->a = new->a;
}

#define _set_gate(gate_addr,type,dpl,addr)               \
do {                                                     \
    (gate_addr)->a = 0;                                  \
    wmb(); /* disable gate /then/ rewrite */             \
    (gate_addr)->b =                                     \
        ((unsigned long)(addr) >> 32);                   \
    wmb(); /* rewrite /then/ enable gate */              \
    (gate_addr)->a =                                     \
        (((unsigned long)(addr) & 0xFFFF0000UL) << 32) | \
        ((unsigned long)(dpl) << 45) |                   \
        ((unsigned long)(type) << 40) |                  \
        ((unsigned long)(addr) & 0xFFFFUL) |             \
        ((unsigned long)__HYPERVISOR_CS64 << 16) |       \
        (1UL << 47);                                     \
} while (0)

static inline void _set_gate_lower(idt_entry_t *gate, unsigned long type,
                                   unsigned long dpl, void *addr)
{
    idt_entry_t idte;
    idte.b = gate->b;
    idte.a =
        (((unsigned long)(addr) & 0xFFFF0000UL) << 32) |
        ((unsigned long)(dpl) << 45) |
        ((unsigned long)(type) << 40) |
        ((unsigned long)(addr) & 0xFFFFUL) |
        ((unsigned long)__HYPERVISOR_CS64 << 16) |
        (1UL << 47);
    _write_gate_lower(gate, &idte);
}

/* Update the lower half handler of an IDT Entry, without changing any
 * other configuration. */
static inline void _update_gate_addr_lower(idt_entry_t *gate, void *addr)
{
    idt_entry_t idte;
    idte.a = gate->a;

    idte.b = ((unsigned long)(addr) >> 32);
    idte.a &= 0x0000FFFFFFFF0000ULL;
    idte.a |= (((unsigned long)(addr) & 0xFFFF0000UL) << 32) |
        ((unsigned long)(addr) & 0xFFFFUL);

    _write_gate_lower(gate, &idte);
}

#define _set_tssldt_desc(desc,addr,limit,type)           \
do {                                                     \
    (desc)[0].b = (desc)[1].b = 0;                       \
    wmb(); /* disable entry /then/ rewrite */            \
    (desc)[0].a =                                        \
        ((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF);   \
    (desc)[1].a = (u32)(((unsigned long)(addr)) >> 32);  \
    wmb(); /* rewrite /then/ enable entry */             \
    (desc)[0].b =                                        \
        ((u32)(addr) & 0xFF000000U) |                    \
        ((u32)(type) << 8) | 0x8000U |                   \
        (((u32)(addr) & 0x00FF0000U) >> 16);             \
} while (0)

struct desc_ptr {
	unsigned short limit;
	unsigned long base;
} __attribute__((__packed__)) ;

extern struct desc_struct boot_cpu_gdt_table[];
DECLARE_PER_CPU(struct desc_struct *, gdt_table);
extern struct desc_struct boot_cpu_compat_gdt_table[];
DECLARE_PER_CPU(struct desc_struct *, compat_gdt_table);

extern void set_intr_gate(unsigned int irq, void * addr);
extern void load_TR(void);

#endif /* !__ASSEMBLY__ */

#endif /* __ARCH_DESC_H */