aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/asm-x86/mm.h
blob: 5068294ded3e8d38fb97df53acb86abbd2d84334 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
#ifndef __ASM_X86_MM_H__
#define __ASM_X86_MM_H__

#include <xen/config.h>
#include <xen/list.h>
#include <xen/spinlock.h>
#include <xen/perfc.h>
#include <xen/sched.h>

#include <asm/processor.h>
#include <asm/atomic.h>
#include <asm/desc.h>
#include <asm/flushtlb.h>
#include <asm/io.h>

#include <hypervisor-ifs/hypervisor-if.h>

/*
 * Per-page-frame information.
 * 
 * Every architecture must ensure the following:
 *  1. 'struct pfn_info' contains a 'struct list_head list'.
 *  2. Provide a PFN_ORDER() macro for accessing the order of a free page.
 */
#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)

struct pfn_info
{
    /* Each frame can be threaded onto a doubly-linked list. */
    struct list_head list;

    /* Reference count and various PGC_xxx flags and fields. */
    u32 count_info;

    /* Context-dependent fields follow... */
    union {

        /* Page is in use: ((count_info & PGC_count_mask) != 0). */
        struct {
            /* Owner of this page (NULL if page is anonymous). */
            struct domain *domain;
            /* Type reference count and various PGT_xxx flags and fields. */
            u32 type_info;
        } inuse;

        /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
        struct {
            /* Mask of possibly-tainted TLBs. */
            unsigned long cpu_mask;
            /* Order-size of the free chunk this page is the head of. */
            u8 order;
        } free;

    } u;

    /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
    u32 tlbflush_timestamp;
};

 /* The following page types are MUTUALLY EXCLUSIVE. */
#define PGT_none            (0<<29) /* no special uses of this page */
#define PGT_l1_page_table   (1<<29) /* using this page as an L1 page table? */
#define PGT_l2_page_table   (2<<29) /* using this page as an L2 page table? */
#define PGT_l3_page_table   (3<<29) /* using this page as an L3 page table? */
#define PGT_l4_page_table   (4<<29) /* using this page as an L4 page table? */
#define PGT_gdt_page        (5<<29) /* using this page in a GDT? */
#define PGT_ldt_page        (6<<29) /* using this page in an LDT? */
#define PGT_writable_page   (7<<29) /* has writable mappings of this page? */
#define PGT_type_mask       (7<<29) /* Bits 29-31. */
 /* Has this page been validated for use as its current type? */
#define _PGT_validated      28
#define PGT_validated       (1U<<_PGT_validated)
 /* Owning guest has pinned this page to its current type? */
#define _PGT_pinned         27
#define PGT_pinned          (1U<<_PGT_pinned)
 /* The 10 most significant bits of virt address if this is a page table. */
#define PGT_va_shift        17
#define PGT_va_mask         (((1U<<10)-1)<<PGT_va_shift)
 /* Is the back pointer still mutable (i.e. not fixed yet)? */
#define PGT_va_mutable      (((1U<<10)-1)<<PGT_va_shift)
 /* Is the back pointer unknown (e.g., p.t. is mapped at multiple VAs)? */
#define PGT_va_unknown      (((1U<<10)-2)<<PGT_va_shift)
 /* 17-bit count of uses of this frame as its current type. */
#define PGT_count_mask      ((1U<<17)-1)

 /* Cleared when the owning guest 'frees' this page. */
#define _PGC_allocated      31
#define PGC_allocated       (1U<<_PGC_allocated)
 /* 31-bit count of references to this frame. */
#define PGC_count_mask      ((1U<<31)-1)

/* We trust the slab allocator in slab.c, and our use of it. */
#define PageSlab(page)	    (1)
#define PageSetSlab(page)   ((void)0)
#define PageClearSlab(page) ((void)0)

#define IS_XEN_HEAP_FRAME(_pfn) (page_to_phys(_pfn) < xenheap_phys_end)

#define SHARE_PFN_WITH_DOMAIN(_pfn, _dom)                                   \
    do {                                                                    \
        (_pfn)->u.inuse.domain = (_dom);                                    \
        /* The incremented type count is intended to pin to 'writable'. */  \
        (_pfn)->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;  \
        wmb(); /* install valid domain ptr before updating refcnt. */       \
        spin_lock(&(_dom)->page_alloc_lock);                                \
        /* _dom holds an allocation reference */                            \
        ASSERT((_pfn)->count_info == 0);                                    \
        (_pfn)->count_info |= PGC_allocated | 1;                            \
        if ( unlikely((_dom)->xenheap_pages++ == 0) )                       \
            get_knownalive_domain(_dom);                                    \
        list_add_tail(&(_pfn)->list, &(_dom)->xenpage_list);                \
        spin_unlock(&(_dom)->page_alloc_lock);                              \
    } while ( 0 )

#define INVALID_P2M_ENTRY (~0UL)

extern struct pfn_info *frame_table;
extern unsigned long frame_table_size;
extern unsigned long max_page;
void init_frametable(void *frametable_vstart, unsigned long nr_pages);

int alloc_page_type(struct pfn_info *page, unsigned int type);
void free_page_type(struct pfn_info *page, unsigned int type);

static inline void put_page(struct pfn_info *page)
{
    u32 nx, x, y = page->count_info;

    do {
        x  = y;
        nx = x - 1;
    }
    while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );

    if ( unlikely((nx & PGC_count_mask) == 0) )
        free_domheap_page(page);
}


static inline int get_page(struct pfn_info *page,
                           struct domain *domain)
{
    u32 x, nx, y = page->count_info;
    struct domain *d, *nd = page->u.inuse.domain;

    do {
        x  = y;
        nx = x + 1;
        d  = nd;
        if ( unlikely((x & PGC_count_mask) == 0) ||  /* Not allocated? */
             unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
             unlikely(d != domain) )                 /* Wrong owner? */
        {
            DPRINTK("Error pfn %08lx: ed=%p, sd=%p, caf=%08x, taf=%08x\n",
                    page_to_pfn(page), domain, d,
                    x, page->u.inuse.type_info);
            return 0;
        }
        __asm__ __volatile__(
            LOCK_PREFIX "cmpxchg8b %3"
            : "=d" (nd), "=a" (y), "=c" (d),
              "=m" (*(volatile u64 *)(&page->count_info))
            : "0" (d), "1" (x), "c" (d), "b" (nx) );
    }
    while ( unlikely(nd != d) || unlikely(y != x) );

    return 1;
}

void put_page_type(struct pfn_info *page);
int  get_page_type(struct pfn_info *page, u32 type);

static inline void put_page_and_type(struct pfn_info *page)
{
    put_page_type(page);
    put_page(page);
}


static inline int get_page_and_type(struct pfn_info *page,
                                    struct domain *domain,
                                    u32 type)
{
    int rc = get_page(page, domain);

    if ( likely(rc) && unlikely(!get_page_type(page, type)) )
    {
        put_page(page);
        rc = 0;
    }

    return rc;
}

#define ASSERT_PAGE_IS_TYPE(_p, _t)                            \
    ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \
    ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
#define ASSERT_PAGE_IS_DOMAIN(_p, _d)                          \
    ASSERT(((_p)->count_info & PGC_count_mask) != 0);          \
    ASSERT((_p)->u.inuse.domain == (_d))

int check_descriptor(unsigned long *d);

/*
 * Use currently-executing domain's pagetables on the specified CPUs.
 * i.e., stop borrowing someone else's tables if you are the idle domain.
 */
void synchronise_pagetables(unsigned long cpu_mask);

/*
 * The MPT (machine->physical mapping table) is an array of word-sized
 * values, indexed on machine frame number. It is expected that guest OSes
 * will use it to store a "physical" frame number to give the appearance of
 * contiguous (or near contiguous) physical memory.
 */
#undef  machine_to_phys_mapping
#ifdef __x86_64__
extern unsigned long *machine_to_phys_mapping;
#else
#define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
#endif

/* Part of the domain API. */
int do_mmu_update(mmu_update_t *updates, int count, int *success_count);

#define DEFAULT_GDT_ENTRIES     (LAST_RESERVED_GDT_ENTRY+1)
#define DEFAULT_GDT_ADDRESS     ((unsigned long)gdt_table)

#ifdef MEMORY_GUARD
void *memguard_init(void *heap_start);
void memguard_guard_range(void *p, unsigned long l);
void memguard_unguard_range(void *p, unsigned long l);
int memguard_is_guarded(void *p);
#else
#define memguard_init(_s)              (_s)
#define memguard_guard_range(_p,_l)    ((void)0)
#define memguard_unguard_range(_p,_l)  ((void)0)
#define memguard_is_guarded(_p)        (0)
#endif


typedef struct {
    void	(*enable)(struct domain *);
    void	(*disable)(struct domain *);
} vm_assist_info_t;
extern vm_assist_info_t vm_assist_info[];


/* Writable Pagetables */
typedef struct {
    /* Linear address where the guest is updating the p.t. page. */
    unsigned long l1va;
    /* Copy of the p.t. page, taken before guest is given write access. */
    l1_pgentry_t *page;
    /* A temporary Xen mapping of the actual p.t. page. */
    l1_pgentry_t *pl1e;
    /* Index in L2 page table where this L1 p.t. is always hooked. */
    unsigned int l2_idx; /* NB. Only used for PTWR_PT_ACTIVE. */
} ptwr_ptinfo_t;

typedef struct {
    ptwr_ptinfo_t ptinfo[2];
} __cacheline_aligned ptwr_info_t;

extern ptwr_info_t ptwr_info[];

#define PTWR_PT_ACTIVE 0
#define PTWR_PT_INACTIVE 1

#define PTWR_CLEANUP_ACTIVE 1
#define PTWR_CLEANUP_INACTIVE 2

void ptwr_flush(const int);
int ptwr_do_page_fault(unsigned long);

#define __cleanup_writable_pagetable(_what)                                 \
do {                                                                        \
    int cpu = smp_processor_id();                                           \
    if ((_what) & PTWR_CLEANUP_ACTIVE)                                      \
        if (ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va)                     \
            ptwr_flush(PTWR_PT_ACTIVE);                                     \
    if ((_what) & PTWR_CLEANUP_INACTIVE)                                    \
        if (ptwr_info[cpu].ptinfo[PTWR_PT_INACTIVE].l1va)                   \
            ptwr_flush(PTWR_PT_INACTIVE);                                   \
} while ( 0 )

#define cleanup_writable_pagetable(_d, _w)                                \
    do {                                                                  \
        if ( unlikely(VM_ASSIST((_d), VMASST_TYPE_writable_pagetables)) ) \
        __cleanup_writable_pagetable(_w);                                 \
    } while ( 0 )

#ifndef NDEBUG
void audit_domain(struct domain *d);
void audit_domains(void);
#else
#define audit_domain(_d) ((void)0)
#define audit_domains()  ((void)0)
#endif

#endif /* __ASM_X86_MM_H__ */