aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/mm/shadow/types.h
blob: 8397e7ae2c06a4780cf15aa9c03f934d2b2a9d47 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
/******************************************************************************
 * arch/x86/mm/shadow/types.h
 * 
 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
 * 
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#ifndef _XEN_SHADOW_TYPES_H
#define _XEN_SHADOW_TYPES_H

// Map a shadow page
static inline void *
map_shadow_page(mfn_t smfn)
{
    // XXX -- Possible optimization/measurement question for 32-bit and PAE
    //        hypervisors:
    //        How often is this smfn already available in the shadow linear
    //        table?  Might it be worth checking that table first,
    //        presumably using the reverse map hint in the page_info of this
    //        smfn, rather than calling map_domain_page()?
    //
    return sh_map_domain_page(smfn);
}

// matching unmap for map_shadow_page()
static inline void
unmap_shadow_page(void *p)
{
    sh_unmap_domain_page(p);
}

/* 
 * Define various types for handling pagetabels, based on these options:
 * SHADOW_PAGING_LEVELS : Number of levels of shadow pagetables
 * GUEST_PAGING_LEVELS  : Number of levels of guest pagetables
 */

#if (CONFIG_PAGING_LEVELS < SHADOW_PAGING_LEVELS) 
#error Cannot have more levels of shadow pagetables than host pagetables
#endif

#if (SHADOW_PAGING_LEVELS < GUEST_PAGING_LEVELS) 
#error Cannot have more levels of guest pagetables than shadow pagetables
#endif

#if SHADOW_PAGING_LEVELS == 2
#define SHADOW_L1_PAGETABLE_ENTRIES    1024
#define SHADOW_L2_PAGETABLE_ENTRIES    1024
#define SHADOW_L1_PAGETABLE_SHIFT        12
#define SHADOW_L2_PAGETABLE_SHIFT        22
#endif

#if SHADOW_PAGING_LEVELS == 3
#define SHADOW_L1_PAGETABLE_ENTRIES     512
#define SHADOW_L2_PAGETABLE_ENTRIES     512
#define SHADOW_L3_PAGETABLE_ENTRIES       4
#define SHADOW_L1_PAGETABLE_SHIFT        12
#define SHADOW_L2_PAGETABLE_SHIFT        21
#define SHADOW_L3_PAGETABLE_SHIFT        30
#endif

#if SHADOW_PAGING_LEVELS == 4
#define SHADOW_L1_PAGETABLE_ENTRIES     512
#define SHADOW_L2_PAGETABLE_ENTRIES     512
#define SHADOW_L3_PAGETABLE_ENTRIES     512
#define SHADOW_L4_PAGETABLE_ENTRIES     512
#define SHADOW_L1_PAGETABLE_SHIFT        12
#define SHADOW_L2_PAGETABLE_SHIFT        21
#define SHADOW_L3_PAGETABLE_SHIFT        30
#define SHADOW_L4_PAGETABLE_SHIFT        39
#endif

/* Types of the shadow page tables */
typedef l1_pgentry_t shadow_l1e_t;
typedef l2_pgentry_t shadow_l2e_t;
#if SHADOW_PAGING_LEVELS >= 3
typedef l3_pgentry_t shadow_l3e_t;
#if SHADOW_PAGING_LEVELS >= 4
typedef l4_pgentry_t shadow_l4e_t;
#endif
#endif

/* Access functions for them */
static inline paddr_t shadow_l1e_get_paddr(shadow_l1e_t sl1e)
{ return l1e_get_paddr(sl1e); }
static inline paddr_t shadow_l2e_get_paddr(shadow_l2e_t sl2e)
{ return l2e_get_paddr(sl2e); }
#if SHADOW_PAGING_LEVELS >= 3
static inline paddr_t shadow_l3e_get_paddr(shadow_l3e_t sl3e)
{ return l3e_get_paddr(sl3e); }
#if SHADOW_PAGING_LEVELS >= 4
static inline paddr_t shadow_l4e_get_paddr(shadow_l4e_t sl4e)
{ return l4e_get_paddr(sl4e); }
#endif
#endif

static inline mfn_t shadow_l1e_get_mfn(shadow_l1e_t sl1e)
{ return _mfn(l1e_get_pfn(sl1e)); }
static inline mfn_t shadow_l2e_get_mfn(shadow_l2e_t sl2e)
{ return _mfn(l2e_get_pfn(sl2e)); }
#if SHADOW_PAGING_LEVELS >= 3
static inline mfn_t shadow_l3e_get_mfn(shadow_l3e_t sl3e)
{ return _mfn(l3e_get_pfn(sl3e)); }
#if SHADOW_PAGING_LEVELS >= 4
static inline mfn_t shadow_l4e_get_mfn(shadow_l4e_t sl4e)
{ return _mfn(l4e_get_pfn(sl4e)); }
#endif
#endif

static inline u32 shadow_l1e_get_flags(shadow_l1e_t sl1e)
{ return l1e_get_flags(sl1e); }
static inline u32 shadow_l2e_get_flags(shadow_l2e_t sl2e)
{ return l2e_get_flags(sl2e); }
#if SHADOW_PAGING_LEVELS >= 3
static inline u32 shadow_l3e_get_flags(shadow_l3e_t sl3e)
{ return l3e_get_flags(sl3e); }
#if SHADOW_PAGING_LEVELS >= 4
static inline u32 shadow_l4e_get_flags(shadow_l4e_t sl4e)
{ return l4e_get_flags(sl4e); }
#endif
#endif

static inline shadow_l1e_t
shadow_l1e_remove_flags(shadow_l1e_t sl1e, u32 flags)
{ l1e_remove_flags(sl1e, flags); return sl1e; }

static inline shadow_l1e_t shadow_l1e_empty(void) 
{ return l1e_empty(); }
static inline shadow_l2e_t shadow_l2e_empty(void) 
{ return l2e_empty(); }
#if SHADOW_PAGING_LEVELS >= 3
static inline shadow_l3e_t shadow_l3e_empty(void) 
{ return l3e_empty(); }
#if SHADOW_PAGING_LEVELS >= 4
static inline shadow_l4e_t shadow_l4e_empty(void) 
{ return l4e_empty(); }
#endif
#endif

static inline shadow_l1e_t shadow_l1e_from_mfn(mfn_t mfn, u32 flags)
{ return l1e_from_pfn(mfn_x(mfn), flags); }
static inline shadow_l2e_t shadow_l2e_from_mfn(mfn_t mfn, u32 flags)
{ return l2e_from_pfn(mfn_x(mfn), flags); }
#if SHADOW_PAGING_LEVELS >= 3
static inline shadow_l3e_t shadow_l3e_from_mfn(mfn_t mfn, u32 flags)
{ return l3e_from_pfn(mfn_x(mfn), flags); }
#if SHADOW_PAGING_LEVELS >= 4
static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
{ return l4e_from_pfn(mfn_x(mfn), flags); }
#endif
#endif

#define shadow_l1_table_offset(a) l1_table_offset(a)
#define shadow_l2_table_offset(a) l2_table_offset(a)
#define shadow_l3_table_offset(a) l3_table_offset(a)
#define shadow_l4_table_offset(a) l4_table_offset(a)

/**************************************************************************/
/* Access to the linear mapping of shadow page tables. */

/* Offsets into each level of the linear mapping for a virtual address. */
#define shadow_l1_linear_offset(_a)                                           \
        (((_a) & VADDR_MASK) >> SHADOW_L1_PAGETABLE_SHIFT)
#define shadow_l2_linear_offset(_a)                                           \
        (((_a) & VADDR_MASK) >> SHADOW_L2_PAGETABLE_SHIFT)
#define shadow_l3_linear_offset(_a)                                           \
        (((_a) & VADDR_MASK) >> SHADOW_L3_PAGETABLE_SHIFT)
#define shadow_l4_linear_offset(_a)                                           \
        (((_a) & VADDR_MASK) >> SHADOW_L4_PAGETABLE_SHIFT)

/* Where to find each level of the linear mapping.  For PV guests, we use 
 * the shadow linear-map self-entry as many times as we need.  For HVM 
 * guests, the shadow doesn't have a linear-map self-entry so we must use 
 * the monitor-table's linear-map entry N-1 times and then the shadow-map 
 * entry once. */
#define __sh_linear_l1_table ((shadow_l1e_t *)(SH_LINEAR_PT_VIRT_START))
#define __sh_linear_l2_table ((shadow_l2e_t *)                               \
    (__sh_linear_l1_table + shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)))

// shadow linear L3 and L4 tables only exist in 4 level paging...
#if SHADOW_PAGING_LEVELS == 4
#define __sh_linear_l3_table ((shadow_l3e_t *)                               \
    (__sh_linear_l2_table + shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)))
#define __sh_linear_l4_table ((shadow_l4e_t *)                               \
    (__sh_linear_l3_table + shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)))
#endif

#define sh_linear_l1_table(v) ({ \
    ASSERT(current == (v)); \
    __sh_linear_l1_table; \
})

// XXX -- these should not be conditional on is_hvm_vcpu(v), but rather on
//        shadow_mode_external(d)...
//
#define sh_linear_l2_table(v) ({ \
    ASSERT(current == (v)); \
    ((shadow_l2e_t *) \
     (is_hvm_vcpu(v) ? __linear_l1_table : __sh_linear_l1_table) + \
     shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)); \
})

#if SHADOW_PAGING_LEVELS >= 4
#define sh_linear_l3_table(v) ({ \
    ASSERT(current == (v)); \
    ((shadow_l3e_t *) \
     (is_hvm_vcpu(v) ? __linear_l2_table : __sh_linear_l2_table) + \
      shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)); \
})

// we use l4_pgentry_t instead of shadow_l4e_t below because shadow_l4e_t is
// not defined for when xen_levels==4 & shadow_levels==3...
#define sh_linear_l4_table(v) ({ \
    ASSERT(current == (v)); \
    ((l4_pgentry_t *) \
     (is_hvm_vcpu(v) ? __linear_l3_table : __sh_linear_l3_table) + \
      shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)); \
})
#endif

#if GUEST_PAGING_LEVELS == 2

#include "../page-guest32.h"

#define GUEST_L1_PAGETABLE_ENTRIES     1024
#define GUEST_L2_PAGETABLE_ENTRIES     1024
#define GUEST_L1_PAGETABLE_SHIFT         12
#define GUEST_L2_PAGETABLE_SHIFT         22

/* Type of the guest's frame numbers */
TYPE_SAFE(u32,gfn)
#undef INVALID_GFN
#define INVALID_GFN ((u32)(-1u))
#define SH_PRI_gfn "05x"

/* Types of the guest's page tables */
typedef l1_pgentry_32_t guest_l1e_t;
typedef l2_pgentry_32_t guest_l2e_t;

/* Access functions for them */
static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e)
{ return l1e_get_paddr_32(gl1e); }
static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e)
{ return l2e_get_paddr_32(gl2e); }

static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
{ return _gfn(l1e_get_paddr_32(gl1e) >> PAGE_SHIFT); }
static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
{ return _gfn(l2e_get_paddr_32(gl2e) >> PAGE_SHIFT); }

static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
{ return l1e_get_flags_32(gl1e); }
static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
{ return l2e_get_flags_32(gl2e); }

static inline guest_l1e_t guest_l1e_add_flags(guest_l1e_t gl1e, u32 flags)
{ l1e_add_flags_32(gl1e, flags); return gl1e; }
static inline guest_l2e_t guest_l2e_add_flags(guest_l2e_t gl2e, u32 flags)
{ l2e_add_flags_32(gl2e, flags); return gl2e; }

static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
{ return l1e_from_pfn_32(gfn_x(gfn), flags); }
static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
{ return l2e_from_pfn_32(gfn_x(gfn), flags); }

#define guest_l1_table_offset(a) l1_table_offset_32(a)
#define guest_l2_table_offset(a) l2_table_offset_32(a)

/* The shadow types needed for the various levels. */
#define SH_type_l1_shadow  SH_type_l1_32_shadow
#define SH_type_l2_shadow  SH_type_l2_32_shadow
#define SH_type_fl1_shadow SH_type_fl1_32_shadow

#else /* GUEST_PAGING_LEVELS != 2 */

#if GUEST_PAGING_LEVELS == 3
#define GUEST_L1_PAGETABLE_ENTRIES      512
#define GUEST_L2_PAGETABLE_ENTRIES      512
#define GUEST_L3_PAGETABLE_ENTRIES        4
#define GUEST_L1_PAGETABLE_SHIFT         12
#define GUEST_L2_PAGETABLE_SHIFT         21
#define GUEST_L3_PAGETABLE_SHIFT         30
#else /* GUEST_PAGING_LEVELS == 4 */
#define GUEST_L1_PAGETABLE_ENTRIES      512
#define GUEST_L2_PAGETABLE_ENTRIES      512
#define GUEST_L3_PAGETABLE_ENTRIES      512
#define GUEST_L4_PAGETABLE_ENTRIES      512
#define GUEST_L1_PAGETABLE_SHIFT         12
#define GUEST_L2_PAGETABLE_SHIFT         21
#define GUEST_L3_PAGETABLE_SHIFT         30
#define GUEST_L4_PAGETABLE_SHIFT         39
#endif

/* Type of the guest's frame numbers */
TYPE_SAFE(unsigned long,gfn)
#undef INVALID_GFN
#define INVALID_GFN ((unsigned long)(-1ul))
#define SH_PRI_gfn "05lx"

/* Types of the guest's page tables */
typedef l1_pgentry_t guest_l1e_t;
typedef l2_pgentry_t guest_l2e_t;
typedef l3_pgentry_t guest_l3e_t;
#if GUEST_PAGING_LEVELS >= 4
typedef l4_pgentry_t guest_l4e_t;
#endif

/* Access functions for them */
static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e)
{ return l1e_get_paddr(gl1e); }
static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e)
{ return l2e_get_paddr(gl2e); }
static inline paddr_t guest_l3e_get_paddr(guest_l3e_t gl3e)
{ return l3e_get_paddr(gl3e); }
#if GUEST_PAGING_LEVELS >= 4
static inline paddr_t guest_l4e_get_paddr(guest_l4e_t gl4e)
{ return l4e_get_paddr(gl4e); }
#endif

static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
{ return _gfn(l1e_get_paddr(gl1e) >> PAGE_SHIFT); }
static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
{ return _gfn(l2e_get_paddr(gl2e) >> PAGE_SHIFT); }
static inline gfn_t guest_l3e_get_gfn(guest_l3e_t gl3e)
{ return _gfn(l3e_get_paddr(gl3e) >> PAGE_SHIFT); }
#if GUEST_PAGING_LEVELS >= 4
static inline gfn_t guest_l4e_get_gfn(guest_l4e_t gl4e)
{ return _gfn(l4e_get_paddr(gl4e) >> PAGE_SHIFT); }
#endif

static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
{ return l1e_get_flags(gl1e); }
static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
{ return l2e_get_flags(gl2e); }
static inline u32 guest_l3e_get_flags(guest_l3e_t gl3e)
{ return l3e_get_flags(gl3e); }
#if GUEST_PAGING_LEVELS >= 4
static inline u32 guest_l4e_get_flags(guest_l4e_t gl4e)
{ return l4e_get_flags(gl4e); }
#endif

static inline guest_l1e_t guest_l1e_add_flags(guest_l1e_t gl1e, u32 flags)
{ l1e_add_flags(gl1e, flags); return gl1e; }
static inline guest_l2e_t guest_l2e_add_flags(guest_l2e_t gl2e, u32 flags)
{ l2e_add_flags(gl2e, flags); return gl2e; }
static inline guest_l3e_t guest_l3e_add_flags(guest_l3e_t gl3e, u32 flags)
{ l3e_add_flags(gl3e, flags); return gl3e; }
#if GUEST_PAGING_LEVELS >= 4
static inline guest_l4e_t guest_l4e_add_flags(guest_l4e_t gl4e, u32 flags)
{ l4e_add_flags(gl4e, flags); return gl4e; }
#endif

static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
{ return l1e_from_pfn(gfn_x(gfn), flags); }
static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
{ return l2e_from_pfn(gfn_x(gfn), flags); }
static inline guest_l3e_t guest_l3e_from_gfn(gfn_t gfn, u32 flags)
{ return l3e_from_pfn(gfn_x(gfn), flags); }
#if GUEST_PAGING_LEVELS >= 4
static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 flags)
{ return l4e_from_pfn(gfn_x(gfn), flags); }
#endif

#define guest_l1_table_offset(a) l1_table_offset(a)
#define guest_l2_table_offset(a) l2_table_offset(a)
#define guest_l3_table_offset(a) l3_table_offset(a)
#define guest_l4_table_offset(a) l4_table_offset(a)

/* The shadow types needed for the various levels. */
#if GUEST_PAGING_LEVELS == 3
#define SH_type_l1_shadow  SH_type_l1_pae_shadow
#define SH_type_fl1_shadow SH_type_fl1_pae_shadow
#define SH_type_l2_shadow  SH_type_l2_pae_shadow
#define SH_type_l2h_shadow SH_type_l2h_pae_shadow
#else
#define SH_type_l1_shadow  SH_type_l1_64_shadow
#define SH_type_fl1_shadow SH_type_fl1_64_shadow
#define SH_type_l2_shadow  SH_type_l2_64_shadow
#define SH_type_l2h_shadow SH_type_l2h_64_shadow
#define SH_type_l3_shadow  SH_type_l3_64_shadow
#define SH_type_l4_shadow  SH_type_l4_64_shadow
#endif

#endif /* GUEST_PAGING_LEVELS != 2 */

#define VALID_GFN(m) (m != INVALID_GFN)

static inline int
valid_gfn(gfn_t m)
{
    return VALID_GFN(gfn_x(m));
}

/* Translation between mfns and gfns */

// vcpu-specific version of gfn_to_mfn().  This is where we hide the dirty
// little secret that, for hvm guests with paging disabled, nearly all of the
// shadow code actually think that the guest is running on *untranslated* page
// tables (which is actually domain->phys_table).
//

static inline mfn_t
vcpu_gfn_to_mfn(struct vcpu *v, gfn_t gfn)
{
    if ( !paging_vcpu_mode_translate(v) )
        return _mfn(gfn_x(gfn));
    return gfn_to_mfn(v->domain, gfn_x(gfn));
}

static inline paddr_t
gfn_to_paddr(gfn_t gfn)
{
    return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT;
}

/* Type used for recording a walk through guest pagetables.  It is
 * filled in by the pagetable walk function, and also used as a cache
 * for later walks.  
 * Any non-null pointer in this structure represents a mapping of guest
 * memory.  We must always call walk_init() before using a walk_t, and 
 * call walk_unmap() when we're done. 
 * The "Effective l1e" field is used when there isn't an l1e to point to, 
 * but we have fabricated an l1e for propagation to the shadow (e.g., 
 * for splintering guest superpages into many shadow l1 entries).  */
typedef struct shadow_walk_t walk_t;
struct shadow_walk_t 
{
    unsigned long va;           /* Address we were looking for */
#if GUEST_PAGING_LEVELS >= 3
#if GUEST_PAGING_LEVELS >= 4
    guest_l4e_t *l4e;           /* Pointer to guest's level 4 entry */
#endif
    guest_l3e_t *l3e;           /* Pointer to guest's level 3 entry */
#endif
    guest_l2e_t *l2e;           /* Pointer to guest's level 2 entry */
    guest_l1e_t *l1e;           /* Pointer to guest's level 1 entry */
    guest_l1e_t eff_l1e;        /* Effective level 1 entry */
#if GUEST_PAGING_LEVELS >= 4
    mfn_t l4mfn;                /* MFN that the level 4 entry is in */
    mfn_t l3mfn;                /* MFN that the level 3 entry is in */
#endif
    mfn_t l2mfn;                /* MFN that the level 2 entry is in */
    mfn_t l1mfn;                /* MFN that the level 1 entry is in */
};

/* macros for dealing with the naming of the internal function names of the
 * shadow code's external entry points.
 */
#define INTERNAL_NAME(name) \
    SHADOW_INTERNAL_NAME(name, SHADOW_PAGING_LEVELS, GUEST_PAGING_LEVELS)

/* macros for renaming the primary entry points, so that they are more
 * easily distinguished from a debugger
 */
#define sh_page_fault              INTERNAL_NAME(sh_page_fault)
#define sh_invlpg                  INTERNAL_NAME(sh_invlpg)
#define sh_gva_to_gfn              INTERNAL_NAME(sh_gva_to_gfn)
#define sh_update_cr3              INTERNAL_NAME(sh_update_cr3)
#define sh_rm_write_access_from_l1 INTERNAL_NAME(sh_rm_write_access_from_l1)
#define sh_rm_mappings_from_l1     INTERNAL_NAME(sh_rm_mappings_from_l1)
#define sh_remove_l1_shadow        INTERNAL_NAME(sh_remove_l1_shadow)
#define sh_remove_l2_shadow        INTERNAL_NAME(sh_remove_l2_shadow)
#define sh_remove_l3_shadow        INTERNAL_NAME(sh_remove_l3_shadow)
#define sh_map_and_validate_gl4e   INTERNAL_NAME(sh_map_and_validate_gl4e)
#define sh_map_and_validate_gl3e   INTERNAL_NAME(sh_map_and_validate_gl3e)
#define sh_map_and_validate_gl2e   INTERNAL_NAME(sh_map_and_validate_gl2e)
#define sh_map_and_validate_gl2he  INTERNAL_NAME(sh_map_and_validate_gl2he)
#define sh_map_and_validate_gl1e   INTERNAL_NAME(sh_map_and_validate_gl1e)
#define sh_destroy_l4_shadow       INTERNAL_NAME(sh_destroy_l4_shadow)
#define sh_destroy_l3_shadow       INTERNAL_NAME(sh_destroy_l3_shadow)
#define sh_destroy_l2_shadow       INTERNAL_NAME(sh_destroy_l2_shadow)
#define sh_destroy_l1_shadow       INTERNAL_NAME(sh_destroy_l1_shadow)
#define sh_unhook_32b_mappings     INTERNAL_NAME(sh_unhook_32b_mappings)
#define sh_unhook_pae_mappings     INTERNAL_NAME(sh_unhook_pae_mappings)
#define sh_unhook_64b_mappings     INTERNAL_NAME(sh_unhook_64b_mappings)
#define sh_paging_mode             INTERNAL_NAME(sh_paging_mode)
#define sh_detach_old_tables       INTERNAL_NAME(sh_detach_old_tables)
#define sh_x86_emulate_write       INTERNAL_NAME(sh_x86_emulate_write)
#define sh_x86_emulate_cmpxchg     INTERNAL_NAME(sh_x86_emulate_cmpxchg)
#define sh_x86_emulate_cmpxchg8b   INTERNAL_NAME(sh_x86_emulate_cmpxchg8b)
#define sh_audit_l1_table          INTERNAL_NAME(sh_audit_l1_table)
#define sh_audit_fl1_table         INTERNAL_NAME(sh_audit_fl1_table)
#define sh_audit_l2_table          INTERNAL_NAME(sh_audit_l2_table)
#define sh_audit_l3_table          INTERNAL_NAME(sh_audit_l3_table)
#define sh_audit_l4_table          INTERNAL_NAME(sh_audit_l4_table)
#define sh_guess_wrmap             INTERNAL_NAME(sh_guess_wrmap)
#define sh_clear_shadow_entry      INTERNAL_NAME(sh_clear_shadow_entry)

/* The sh_guest_(map|get)_* functions only depends on the number of config
 * levels
 */
#define sh_guest_map_l1e                                       \
        SHADOW_INTERNAL_NAME(sh_guest_map_l1e,                \
                              CONFIG_PAGING_LEVELS,             \
                              CONFIG_PAGING_LEVELS)
#define sh_guest_get_eff_l1e                                   \
        SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e,            \
                              CONFIG_PAGING_LEVELS,             \
                              CONFIG_PAGING_LEVELS)

/* sh_make_monitor_table only depends on the number of shadow levels */
#define sh_make_monitor_table                                  \
        SHADOW_INTERNAL_NAME(sh_make_monitor_table,           \
                              SHADOW_PAGING_LEVELS,             \
                              SHADOW_PAGING_LEVELS)
#define sh_destroy_monitor_table                               \
        SHADOW_INTERNAL_NAME(sh_destroy_monitor_table,        \
                              SHADOW_PAGING_LEVELS,             \
                              SHADOW_PAGING_LEVELS)


#if SHADOW_PAGING_LEVELS == 3
#define MFN_FITS_IN_HVM_CR3(_MFN) !(mfn_x(_MFN) >> 20)
#endif

#if SHADOW_PAGING_LEVELS == 2
#define SH_PRI_pte "08x"
#else /* SHADOW_PAGING_LEVELS >= 3 */
#ifndef __x86_64__
#define SH_PRI_pte "016llx"
#else
#define SH_PRI_pte "016lx"
#endif
#endif /* SHADOW_PAGING_LEVELS >= 3 */

#if GUEST_PAGING_LEVELS == 2
#define SH_PRI_gpte "08x"
#else /* GUEST_PAGING_LEVELS >= 3 */
#ifndef __x86_64__
#define SH_PRI_gpte "016llx"
#else
#define SH_PRI_gpte "016lx"
#endif
#endif /* GUEST_PAGING_LEVELS >= 3 */

static inline u32
accumulate_guest_flags(struct vcpu *v, walk_t *gw)
{
    u32 accumulated_flags;

    // We accumulate the permission flags with bitwise ANDing.
    // This works for the PRESENT bit, RW bit, and USER bit.
    // For the NX bit, however, the polarity is wrong, so we accumulate the
    // inverse of the NX bit.
    //
    accumulated_flags =  guest_l1e_get_flags(gw->eff_l1e) ^ _PAGE_NX_BIT;
    accumulated_flags &= guest_l2e_get_flags(*gw->l2e) ^ _PAGE_NX_BIT;

    // Note that PAE guests do not have USER or RW or NX bits in their L3s.
    //
#if GUEST_PAGING_LEVELS == 3
    accumulated_flags &=
        ~_PAGE_PRESENT | (guest_l3e_get_flags(*gw->l3e) & _PAGE_PRESENT);
#elif GUEST_PAGING_LEVELS >= 4
    accumulated_flags &= guest_l3e_get_flags(*gw->l3e) ^ _PAGE_NX_BIT;
    accumulated_flags &= guest_l4e_get_flags(*gw->l4e) ^ _PAGE_NX_BIT;
#endif

    // Revert the NX bit back to its original polarity
    accumulated_flags ^= _PAGE_NX_BIT;

    // In 64-bit PV guests, the _PAGE_USER bit is implied in all guest
    // entries (since even the guest kernel runs in ring 3).
    //
    if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_vcpu(v) )
        accumulated_flags |= _PAGE_USER;

    return accumulated_flags;
}


#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
/******************************************************************************
 * We implement a "fast path" for two special cases: faults that require
 * MMIO emulation, and faults where the guest PTE is not present.  We
 * record these as shadow l1 entries that have reserved bits set in
 * them, so we can spot them immediately in the fault handler and handle
 * them without needing to hold the shadow lock or walk the guest
 * pagetables.
 *
 * This is only feasible for PAE and 64bit Xen: 32-bit non-PAE PTEs don't
 * have reserved bits that we can use for this.
 */

#define SH_L1E_MAGIC 0xffffffff00000001ULL
static inline int sh_l1e_is_magic(shadow_l1e_t sl1e)
{
    return ((sl1e.l1 & SH_L1E_MAGIC) == SH_L1E_MAGIC);
}

/* Guest not present: a single magic value */
static inline shadow_l1e_t sh_l1e_gnp(void) 
{
    return (shadow_l1e_t){ -1ULL };
}

static inline int sh_l1e_is_gnp(shadow_l1e_t sl1e) 
{
    return (sl1e.l1 == sh_l1e_gnp().l1);
}

/* MMIO: an invalid PTE that contains the GFN of the equivalent guest l1e.
 * We store 28 bits of GFN in bits 4:32 of the entry.
 * The present bit is set, and the U/S and R/W bits are taken from the guest.
 * Bit 3 is always 0, to differentiate from gnp above.  */
#define SH_L1E_MMIO_MAGIC       0xffffffff00000001ULL
#define SH_L1E_MMIO_MAGIC_MASK  0xffffffff00000009ULL
#define SH_L1E_MMIO_GFN_MASK    0x00000000fffffff0ULL
#define SH_L1E_MMIO_GFN_SHIFT   4

static inline shadow_l1e_t sh_l1e_mmio(gfn_t gfn, u32 gflags) 
{
    return (shadow_l1e_t) { (SH_L1E_MMIO_MAGIC 
                             | (gfn_x(gfn) << SH_L1E_MMIO_GFN_SHIFT) 
                             | (gflags & (_PAGE_USER|_PAGE_RW))) };
}

static inline int sh_l1e_is_mmio(shadow_l1e_t sl1e) 
{
    return ((sl1e.l1 & SH_L1E_MMIO_MAGIC_MASK) == SH_L1E_MMIO_MAGIC);
}

static inline gfn_t sh_l1e_mmio_get_gfn(shadow_l1e_t sl1e) 
{
    return _gfn((sl1e.l1 & SH_L1E_MMIO_GFN_MASK) >> SH_L1E_MMIO_GFN_SHIFT);
}

static inline u32 sh_l1e_mmio_get_flags(shadow_l1e_t sl1e) 
{
    return (u32)((sl1e.l1 & (_PAGE_USER|_PAGE_RW)));
}

#else

#define sh_l1e_gnp() shadow_l1e_empty()
#define sh_l1e_mmio(_gfn, _flags) shadow_l1e_empty()
#define sh_l1e_is_magic(_e) (0)

#endif /* SHOPT_FAST_FAULT_PATH */


#endif /* _XEN_SHADOW_TYPES_H */

/*
 * Local variables:
 * mode: C
 * c-set-style: "BSD"
 * c-basic-offset: 4
 * indent-tabs-mode: nil
 * End:
 */