aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-11-22 18:44:08 +0100
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-11-22 18:44:08 +0100
commit869e86d8e6a9f2d3665f6467d25bacad624b8d1e (patch)
tree909327fd4c29d504badc1f9728e39e2c030ba84f
parent49e566a9aaec934a8a7a530826ded0c033044799 (diff)
downloadxen-869e86d8e6a9f2d3665f6467d25bacad624b8d1e.tar.gz
xen-869e86d8e6a9f2d3665f6467d25bacad624b8d1e.tar.bz2
xen-869e86d8e6a9f2d3665f6467d25bacad624b8d1e.zip
Add -Wdeclaration-after-statement to Xen and tools build.
Fix the compile errors that result from this. Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--Config.mk3
-rw-r--r--tools/libxc/xc_private.h5
-rw-r--r--tools/libxc/xg_private.h2
-rw-r--r--tools/xenstore/xenstore_client.c4
-rw-r--r--xen/arch/x86/audit.c21
-rw-r--r--xen/arch/x86/dm/vmx_vioapic.c2
-rw-r--r--xen/arch/x86/mm.c17
-rw-r--r--xen/arch/x86/shadow.c74
-rw-r--r--xen/arch/x86/shadow32.c94
-rw-r--r--xen/arch/x86/shadow_public.c35
-rw-r--r--xen/arch/x86/vmx.c51
-rw-r--r--xen/arch/x86/x86_32/traps.c2
-rw-r--r--xen/common/sched_sedf.c10
-rw-r--r--xen/include/asm-x86/shadow.h3
14 files changed, 171 insertions, 152 deletions
diff --git a/Config.mk b/Config.mk
index 3b24b07abe..b537fa5d4b 100644
--- a/Config.mk
+++ b/Config.mk
@@ -8,6 +8,7 @@ XEN_TARGET_X86_PAE ?= n
# Tools to run on system hosting the build
HOSTCC = gcc
HOSTCFLAGS = -Wall -Werror -Wstrict-prototypes -O2 -fomit-frame-pointer
+HOSTCFLAGS += -Wdeclaration-after-statement
AS = $(CROSS_COMPILE)as
LD = $(CROSS_COMPILE)ld
@@ -38,6 +39,8 @@ EXTRA_INCLUDES += $(EXTRA_PREFIX)/include
EXTRA_LIB += $(EXTRA_PREFIX)/$(LIBDIR)
endif
+CFLAGS += -Wdeclaration-after-statement
+
LDFLAGS += $(foreach i, $(EXTRA_LIB), -L$(i))
CFLAGS += $(foreach i, $(EXTRA_INCLUDES), -I$(i))
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
index 2a3164c1ac..8c19ecd218 100644
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -21,9 +21,8 @@
reason, we must zero the privcmd_hypercall_t or dom0_op_t instance before a
call, if using valgrind. */
#ifdef VALGRIND
-#define DECLARE_HYPERCALL privcmd_hypercall_t hypercall; \
- memset(&hypercall, 0, sizeof(hypercall))
-#define DECLARE_DOM0_OP dom0_op_t op; memset(&op, 0, sizeof(op))
+#define DECLARE_HYPERCALL privcmd_hypercall_t hypercall = { 0 }
+#define DECLARE_DOM0_OP dom0_op_t op = { 0 }
#else
#define DECLARE_HYPERCALL privcmd_hypercall_t hypercall
#define DECLARE_DOM0_OP dom0_op_t op
diff --git a/tools/libxc/xg_private.h b/tools/libxc/xg_private.h
index a2dac8b0f3..f96c187d95 100644
--- a/tools/libxc/xg_private.h
+++ b/tools/libxc/xg_private.h
@@ -20,7 +20,7 @@
reason, we must zero the dom0_op_t instance before a call, if using
valgrind. */
#ifdef VALGRIND
-#define DECLARE_DOM0_OP dom0_op_t op; memset(&op, 0, sizeof(op))
+#define DECLARE_DOM0_OP dom0_op_t op = { 0 }
#else
#define DECLARE_DOM0_OP dom0_op_t op
#endif
diff --git a/tools/xenstore/xenstore_client.c b/tools/xenstore/xenstore_client.c
index 2d36520653..f8e1f5c893 100644
--- a/tools/xenstore/xenstore_client.c
+++ b/tools/xenstore/xenstore_client.c
@@ -109,7 +109,7 @@ perform(int optind, int argc, char **argv, struct xs_handle *xsh,
necessary.
*/
- char *path = argv[optind];
+ char *slash, *path = argv[optind];
if (tidy) {
/* Copy path, because we can't modify argv because we will need it
@@ -123,7 +123,7 @@ perform(int optind, int argc, char **argv, struct xs_handle *xsh,
return 1;
}
- char *slash = strrchr(p, '/');
+ slash = strrchr(p, '/');
if (slash) {
char *val;
*slash = '\0';
diff --git a/xen/arch/x86/audit.c b/xen/arch/x86/audit.c
index 1eec298354..f63c7905aa 100644
--- a/xen/arch/x86/audit.c
+++ b/xen/arch/x86/audit.c
@@ -55,10 +55,11 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
void _adjust(struct pfn_info *page, int adjtype ADJUST_EXTRA_ARGS)
{
+ int count;
+
if ( adjtype )
{
- // adjust the type count
- //
+ /* adjust the type count */
int tcount = page->u.inuse.type_info & PGT_count_mask;
tcount += dir;
ttot++;
@@ -92,10 +93,8 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
page->u.inuse.type_info += dir;
}
- // adjust the general count
- //
- int count = page->count_info & PGC_count_mask;
- count += dir;
+ /* adjust the general count */
+ count = (page->count_info & PGC_count_mask) + dir;
ctot++;
if ( count < 0 )
@@ -124,6 +123,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
{
unsigned long *pt = map_domain_page(mfn);
int i;
+ u32 page_type;
for ( i = 0; i < l2limit; i++ )
{
@@ -147,8 +147,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
continue;
}
- u32 page_type = l1page->u.inuse.type_info & PGT_type_mask;
-
+ page_type = l1page->u.inuse.type_info & PGT_type_mask;
if ( page_type != PGT_l1_shadow )
{
printk("Audit %d: [Shadow L2 mfn=%lx i=%x] "
@@ -174,8 +173,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
continue;
}
- u32 page_type = l1page->u.inuse.type_info & PGT_type_mask;
-
+ page_type = l1page->u.inuse.type_info & PGT_type_mask;
if ( page_type == PGT_l2_page_table )
{
printk("Audit %d: [%x] Found %s Linear PT "
@@ -741,6 +739,7 @@ void _audit_domain(struct domain *d, int flags)
while ( list_ent != &d->page_list )
{
u32 page_type;
+ unsigned long pfn;
page = list_entry(list_ent, struct pfn_info, list);
mfn = page_to_pfn(page);
@@ -797,7 +796,7 @@ void _audit_domain(struct domain *d, int flags)
printk("out of sync page mfn=%lx is not a page table\n", mfn);
errors++;
}
- unsigned long pfn = __mfn_to_gpfn(d, mfn);
+ pfn = __mfn_to_gpfn(d, mfn);
if ( !__shadow_status(d, pfn, PGT_snapshot) )
{
printk("out of sync page mfn=%lx doesn't have a snapshot\n",
diff --git a/xen/arch/x86/dm/vmx_vioapic.c b/xen/arch/x86/dm/vmx_vioapic.c
index a54d9417fb..b955575e5e 100644
--- a/xen/arch/x86/dm/vmx_vioapic.c
+++ b/xen/arch/x86/dm/vmx_vioapic.c
@@ -52,8 +52,6 @@ static void ioapic_enable(vmx_vioapic_t *s, uint8_t enable)
static void ioapic_dump_redir(vmx_vioapic_t *s, uint8_t entry)
{
- ASSERT(s);
-
RedirStatus redir = s->redirtbl[entry];
VMX_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_dump_redir "
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e1083c8b48..ae46581953 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -521,10 +521,10 @@ get_page_from_l3e(
l3_pgentry_t l3e, unsigned long pfn,
struct domain *d, unsigned long vaddr)
{
- ASSERT( !shadow_mode_refcounts(d) );
-
int rc;
+ ASSERT(!shadow_mode_refcounts(d));
+
if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
return 1;
@@ -1880,19 +1880,18 @@ int do_mmuext_op(
case MMUEXT_SET_LDT:
{
+ unsigned long ptr = op.arg1.linear_addr;
+ unsigned long ents = op.arg2.nr_ents;
+
if ( shadow_mode_external(d) )
{
MEM_LOG("ignoring SET_LDT hypercall from external "
"domain %u", d->domain_id);
okay = 0;
- break;
}
-
- unsigned long ptr = op.arg1.linear_addr;
- unsigned long ents = op.arg2.nr_ents;
- if ( ((ptr & (PAGE_SIZE-1)) != 0) ||
- (ents > 8192) ||
- !array_access_ok(ptr, ents, LDT_ENTRY_SIZE) )
+ else if ( ((ptr & (PAGE_SIZE-1)) != 0) ||
+ (ents > 8192) ||
+ !array_access_ok(ptr, ents, LDT_ENTRY_SIZE) )
{
okay = 0;
MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%lx", ptr, ents);
diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
index 14ed7ad4ed..daad0b68d9 100644
--- a/xen/arch/x86/shadow.c
+++ b/xen/arch/x86/shadow.c
@@ -207,6 +207,7 @@ alloc_shadow_page(struct domain *d,
struct pfn_info *page;
unsigned long smfn;
int pin = 0;
+ void *l1, *lp;
// Currently, we only keep pre-zero'ed pages around for use as L1's...
// This will change. Soon.
@@ -232,19 +233,19 @@ alloc_shadow_page(struct domain *d,
if (!page)
goto no_shadow_page;
- void *l1_0 = map_domain_page(page_to_pfn(page));
- memset(l1_0, 0, PAGE_SIZE);
- unmap_domain_page(l1_0);
+ l1 = map_domain_page(page_to_pfn(page));
+ memset(l1, 0, PAGE_SIZE);
+ unmap_domain_page(l1);
- void *l1_1 = map_domain_page(page_to_pfn(page+1));
- memset(l1_1, 0, PAGE_SIZE);
- unmap_domain_page(l1_1);
+ l1 = map_domain_page(page_to_pfn(page+1));
+ memset(l1, 0, PAGE_SIZE);
+ unmap_domain_page(l1);
#else
page = alloc_domheap_page(NULL);
if (!page)
goto no_shadow_page;
- void *l1 = map_domain_page(page_to_pfn(page));
+ l1 = map_domain_page(page_to_pfn(page));
memset(l1, 0, PAGE_SIZE);
unmap_domain_page(l1);
#endif
@@ -255,7 +256,7 @@ alloc_shadow_page(struct domain *d,
if (!page)
goto no_shadow_page;
- void *l1 = map_domain_page(page_to_pfn(page));
+ l1 = map_domain_page(page_to_pfn(page));
memset(l1, 0, PAGE_SIZE);
unmap_domain_page(l1);
}
@@ -279,7 +280,7 @@ alloc_shadow_page(struct domain *d,
if (!page)
goto no_shadow_page;
- void *lp = map_domain_page(page_to_pfn(page));
+ lp = map_domain_page(page_to_pfn(page));
memset(lp, 0, PAGE_SIZE);
unmap_domain_page(lp);
}
@@ -588,9 +589,11 @@ static void shadow_map_l1_into_current_l2(unsigned long va)
}
#ifndef NDEBUG
- l2_pgentry_t old_sl2e;
- __shadow_get_l2e(v, va, &old_sl2e);
- ASSERT( !(l2e_get_flags(old_sl2e) & _PAGE_PRESENT) );
+ {
+ l2_pgentry_t old_sl2e;
+ __shadow_get_l2e(v, va, &old_sl2e);
+ ASSERT(!(l2e_get_flags(old_sl2e) & _PAGE_PRESENT));
+ }
#endif
#if CONFIG_PAGING_LEVELS >=3
@@ -952,14 +955,16 @@ __mark_mfn_out_of_sync(struct vcpu *v, unsigned long gpfn,
ASSERT(pfn_valid(mfn));
#ifndef NDEBUG
- u32 type = page->u.inuse.type_info & PGT_type_mask;
- if ( shadow_mode_refcounts(d) )
- {
- ASSERT(type == PGT_writable_page);
- }
- else
{
- ASSERT(type && (type < PGT_l4_page_table));
+ u32 type = page->u.inuse.type_info & PGT_type_mask;
+ if ( shadow_mode_refcounts(d) )
+ {
+ ASSERT(type == PGT_writable_page);
+ }
+ else
+ {
+ ASSERT(type && (type < PGT_l4_page_table));
+ }
}
#endif
@@ -1438,6 +1443,8 @@ static int resync_all(struct domain *d, u32 stype)
int need_flush = 0, external = shadow_mode_external(d);
int unshadow;
int changed;
+ u32 min_max_shadow, min_max_snapshot;
+ int min_shadow, max_shadow, min_snapshot, max_snapshot;
ASSERT(shadow_lock_is_acquired(d));
@@ -1466,7 +1473,7 @@ static int resync_all(struct domain *d, u32 stype)
continue;
}
- FSH_LOG("resyncing t=%08x gpfn=%lx gmfn=%lx smfn=%lx snapshot_mfn=%lx",
+ FSH_LOG("resyncing t=%08x gpfn=%lx gmfn=%lx smfn=%lx snapshot_mfn=%lx",
stype, entry->gpfn, entry->gmfn, smfn, entry->snapshot_mfn);
// Compare guest's new contents to its snapshot, validating
@@ -1482,16 +1489,16 @@ static int resync_all(struct domain *d, u32 stype)
unshadow = 0;
- u32 min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp;
- int min_shadow = SHADOW_MIN(min_max_shadow);
- int max_shadow = SHADOW_MAX(min_max_shadow);
+ min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp;
+ min_shadow = SHADOW_MIN(min_max_shadow);
+ max_shadow = SHADOW_MAX(min_max_shadow);
- u32 min_max_snapshot =
- pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
- int min_snapshot = SHADOW_MIN(min_max_snapshot);
- int max_snapshot = SHADOW_MAX(min_max_snapshot);
+ min_max_snapshot= pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
+ min_snapshot = SHADOW_MIN(min_max_snapshot);
+ max_snapshot = SHADOW_MAX(min_max_snapshot);
- switch ( stype ) {
+ switch ( stype )
+ {
case PGT_l1_shadow:
{
guest_l1_pgentry_t *guest1 = guest;
@@ -1680,9 +1687,9 @@ static int resync_all(struct domain *d, u32 stype)
changed = 0;
for ( i = 0; i < GUEST_ROOT_PAGETABLE_ENTRIES; i++ )
{
+ guest_root_pgentry_t new_root_e = guest_root[i];
if ( !is_guest_l4_slot(i) && !external )
continue;
- guest_root_pgentry_t new_root_e = guest_root[i];
if ( root_entry_has_changed(
new_root_e, snapshot_root[i], PAGE_FLAG_MASK))
{
@@ -1749,6 +1756,7 @@ static void sync_all(struct domain *d)
{
struct out_of_sync_entry *entry;
int need_flush = 0;
+ l1_pgentry_t *ppte, opte, npte;
perfc_incrc(shadow_sync_all);
@@ -1764,11 +1772,10 @@ static void sync_all(struct domain *d)
if ( entry->writable_pl1e & (sizeof(l1_pgentry_t)-1) )
continue;
- l1_pgentry_t *ppte = (l1_pgentry_t *)(
+ ppte = (l1_pgentry_t *)(
(char *)map_domain_page(entry->writable_pl1e >> PAGE_SHIFT) +
(entry->writable_pl1e & ~PAGE_MASK));
- l1_pgentry_t opte = *ppte;
- l1_pgentry_t npte = opte;
+ opte = npte = *ppte;
l1e_remove_flags(npte, _PAGE_RW);
if ( (l1e_get_flags(npte) & _PAGE_PRESENT) &&
@@ -2821,6 +2828,7 @@ static inline unsigned long init_bl2(l4_pgentry_t *spl4e, unsigned long smfn)
unsigned int count;
unsigned long sl2mfn;
struct pfn_info *page;
+ void *l2;
memset(spl4e, 0, PAGE_SIZE);
@@ -2835,7 +2843,7 @@ static inline unsigned long init_bl2(l4_pgentry_t *spl4e, unsigned long smfn)
for (count = 0; count < PDP_ENTRIES; count++)
{
sl2mfn = page_to_pfn(page+count);
- void *l2 = map_domain_page(sl2mfn);
+ l2 = map_domain_page(sl2mfn);
memset(l2, 0, PAGE_SIZE);
unmap_domain_page(l2);
spl4e[count] = l4e_from_pfn(sl2mfn, _PAGE_PRESENT);
diff --git a/xen/arch/x86/shadow32.c b/xen/arch/x86/shadow32.c
index e340ddf628..bcbbdd5cc2 100644
--- a/xen/arch/x86/shadow32.c
+++ b/xen/arch/x86/shadow32.c
@@ -208,6 +208,7 @@ alloc_shadow_page(struct domain *d,
struct pfn_info *page;
unsigned long smfn;
int pin = 0;
+ void *l1;
// Currently, we only keep pre-zero'ed pages around for use as L1's...
// This will change. Soon.
@@ -224,7 +225,7 @@ alloc_shadow_page(struct domain *d,
else
{
page = alloc_domheap_page(NULL);
- void *l1 = map_domain_page(page_to_pfn(page));
+ l1 = map_domain_page(page_to_pfn(page));
memset(l1, 0, PAGE_SIZE);
unmap_domain_page(l1);
}
@@ -558,6 +559,7 @@ static void free_shadow_pages(struct domain *d)
int i;
struct shadow_status *x;
struct vcpu *v;
+ struct list_head *list_ent, *tmp;
/*
* WARNING! The shadow page table must not currently be in use!
@@ -697,15 +699,14 @@ static void free_shadow_pages(struct domain *d)
xfree(mfn_list);
}
- // Now free the pre-zero'ed pages from the domain
- //
- struct list_head *list_ent, *tmp;
+ /* Now free the pre-zero'ed pages from the domain */
list_for_each_safe(list_ent, tmp, &d->arch.free_shadow_frames)
{
+ struct pfn_info *page = list_entry(list_ent, struct pfn_info, list);
+
list_del(list_ent);
perfc_decr(free_l1_pages);
- struct pfn_info *page = list_entry(list_ent, struct pfn_info, list);
free_domheap_page(page);
}
@@ -1218,6 +1219,11 @@ static void free_out_of_sync_entries(struct domain *d)
void __shadow_mode_disable(struct domain *d)
{
+ struct vcpu *v;
+#ifndef NDEBUG
+ int i;
+#endif
+
if ( unlikely(!shadow_mode_enabled(d)) )
return;
@@ -1225,7 +1231,6 @@ void __shadow_mode_disable(struct domain *d)
free_writable_pte_predictions(d);
#ifndef NDEBUG
- int i;
for ( i = 0; i < shadow_ht_buckets; i++ )
{
if ( d->arch.shadow_ht[i].gpfn_and_flags != 0 )
@@ -1242,11 +1247,8 @@ void __shadow_mode_disable(struct domain *d)
free_shadow_ht_entries(d);
free_out_of_sync_entries(d);
- struct vcpu *v;
for_each_vcpu(d, v)
- {
update_pagetables(v);
- }
}
static int shadow_mode_table_op(
@@ -1423,14 +1425,18 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc)
unsigned long
gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
{
- ASSERT( shadow_mode_translate(d) );
+ unsigned long va, tabpfn;
+ l1_pgentry_t *l1, l1e;
+ l2_pgentry_t *l2, l2e;
+
+ ASSERT(shadow_mode_translate(d));
perfc_incrc(gpfn_to_mfn_foreign);
- unsigned long va = gpfn << PAGE_SHIFT;
- unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table);
- l2_pgentry_t *l2 = map_domain_page(tabpfn);
- l2_pgentry_t l2e = l2[l2_table_offset(va)];
+ va = gpfn << PAGE_SHIFT;
+ tabpfn = pagetable_get_pfn(d->arch.phys_table);
+ l2 = map_domain_page(tabpfn);
+ l2e = l2[l2_table_offset(va)];
unmap_domain_page(l2);
if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
{
@@ -1438,8 +1444,8 @@ gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
d->domain_id, gpfn, l2e_get_intpte(l2e));
return INVALID_MFN;
}
- l1_pgentry_t *l1 = map_domain_page(l2e_get_pfn(l2e));
- l1_pgentry_t l1e = l1[l1_table_offset(va)];
+ l1 = map_domain_page(l2e_get_pfn(l2e));
+ l1e = l1[l1_table_offset(va)];
unmap_domain_page(l1);
#if 0
@@ -1634,9 +1640,11 @@ void shadow_map_l1_into_current_l2(unsigned long va)
}
#ifndef NDEBUG
- l2_pgentry_t old_sl2e;
- __shadow_get_l2e(v, va, &old_sl2e);
- ASSERT( !(l2e_get_flags(old_sl2e) & _PAGE_PRESENT) );
+ {
+ l2_pgentry_t old_sl2e;
+ __shadow_get_l2e(v, va, &old_sl2e);
+ ASSERT( !(l2e_get_flags(old_sl2e) & _PAGE_PRESENT) );
+ }
#endif
if ( !get_shadow_ref(sl1mfn) )
@@ -1840,14 +1848,16 @@ __shadow_mark_mfn_out_of_sync(struct vcpu *v, unsigned long gpfn,
ASSERT(pfn_valid(mfn));
#ifndef NDEBUG
- u32 type = page->u.inuse.type_info & PGT_type_mask;
- if ( shadow_mode_refcounts(d) )
- {
- ASSERT(type == PGT_writable_page);
- }
- else
{
- ASSERT(type && (type < PGT_l4_page_table));
+ u32 type = page->u.inuse.type_info & PGT_type_mask;
+ if ( shadow_mode_refcounts(d) )
+ {
+ ASSERT(type == PGT_writable_page);
+ }
+ else
+ {
+ ASSERT(type && (type < PGT_l4_page_table));
+ }
}
#endif
@@ -2329,6 +2339,8 @@ static int resync_all(struct domain *d, u32 stype)
int need_flush = 0, external = shadow_mode_external(d);
int unshadow;
int changed;
+ u32 min_max_shadow, min_max_snapshot;
+ int min_shadow, max_shadow, min_snapshot, max_snapshot;
ASSERT(shadow_lock_is_acquired(d));
@@ -2388,14 +2400,14 @@ static int resync_all(struct domain *d, u32 stype)
if ( !smfn )
break;
- u32 min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp;
- int min_shadow = SHADOW_MIN(min_max_shadow);
- int max_shadow = SHADOW_MAX(min_max_shadow);
+ min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp;
+ min_shadow = SHADOW_MIN(min_max_shadow);
+ max_shadow = SHADOW_MAX(min_max_shadow);
- u32 min_max_snapshot =
+ min_max_snapshot =
pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
- int min_snapshot = SHADOW_MIN(min_max_snapshot);
- int max_snapshot = SHADOW_MAX(min_max_snapshot);
+ min_snapshot = SHADOW_MIN(min_max_snapshot);
+ max_snapshot = SHADOW_MAX(min_max_snapshot);
changed = 0;
@@ -2454,13 +2466,11 @@ static int resync_all(struct domain *d, u32 stype)
changed = 0;
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
{
-#if CONFIG_X86_PAE
- BUG(); /* FIXME: need type_info */
-#endif
+ l2_pgentry_t new_pde = guest2[i];
+
if ( !is_guest_l2_slot(0,i) && !external )
continue;
- l2_pgentry_t new_pde = guest2[i];
if ( l2e_has_changed(new_pde, snapshot2[i], PAGE_FLAG_MASK))
{
need_flush |= validate_pde_change(d, new_pde, &shadow2[i]);
@@ -2500,13 +2510,11 @@ static int resync_all(struct domain *d, u32 stype)
changed = 0;
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
{
-#if CONFIG_X86_PAE
- BUG(); /* FIXME: need type_info */
-#endif
+ l2_pgentry_t new_pde = guest2[i];
+
if ( !is_guest_l2_slot(0, i) && !external )
continue;
- l2_pgentry_t new_pde = guest2[i];
if ( l2e_has_changed(new_pde, snapshot2[i], PAGE_FLAG_MASK) )
{
need_flush |= validate_hl2e_change(d, new_pde, &shadow2[i]);
@@ -2554,6 +2562,7 @@ void __shadow_sync_all(struct domain *d)
{
struct out_of_sync_entry *entry;
int need_flush = 0;
+ l1_pgentry_t *ppte, opte, npte;
perfc_incrc(shadow_sync_all);
@@ -2569,11 +2578,10 @@ void __shadow_sync_all(struct domain *d)
if ( entry->writable_pl1e & (sizeof(l1_pgentry_t)-1) )
continue;
- l1_pgentry_t *ppte = (l1_pgentry_t *)(
+ ppte = (l1_pgentry_t *)(
(char *)map_domain_page(entry->writable_pl1e >> PAGE_SHIFT) +
(entry->writable_pl1e & ~PAGE_MASK));
- l1_pgentry_t opte = *ppte;
- l1_pgentry_t npte = opte;
+ opte = npte = *ppte;
l1e_remove_flags(npte, _PAGE_RW);
if ( (l1e_get_flags(npte) & _PAGE_PRESENT) &&
diff --git a/xen/arch/x86/shadow_public.c b/xen/arch/x86/shadow_public.c
index 06d5bddb4f..36454f9e51 100644
--- a/xen/arch/x86/shadow_public.c
+++ b/xen/arch/x86/shadow_public.c
@@ -786,6 +786,7 @@ void free_shadow_pages(struct domain *d)
int i;
struct shadow_status *x;
struct vcpu *v;
+ struct list_head *list_ent, *tmp;
/*
* WARNING! The shadow page table must not currently be in use!
@@ -884,15 +885,14 @@ void free_shadow_pages(struct domain *d)
xfree(mfn_list);
}
- // Now free the pre-zero'ed pages from the domain
- //
- struct list_head *list_ent, *tmp;
+ /* Now free the pre-zero'ed pages from the domain. */
list_for_each_safe(list_ent, tmp, &d->arch.free_shadow_frames)
{
+ struct pfn_info *page = list_entry(list_ent, struct pfn_info, list);
+
list_del(list_ent);
perfc_decr(free_l1_pages);
- struct pfn_info *page = list_entry(list_ent, struct pfn_info, list);
if (d->arch.ops->guest_paging_levels == PAGING_L2)
{
#if CONFIG_PAGING_LEVELS >=4
@@ -912,6 +912,11 @@ void free_shadow_pages(struct domain *d)
void __shadow_mode_disable(struct domain *d)
{
+ struct vcpu *v;
+#ifndef NDEBUG
+ int i;
+#endif
+
if ( unlikely(!shadow_mode_enabled(d)) )
return;
@@ -919,7 +924,6 @@ void __shadow_mode_disable(struct domain *d)
free_writable_pte_predictions(d);
#ifndef NDEBUG
- int i;
for ( i = 0; i < shadow_ht_buckets; i++ )
{
if ( d->arch.shadow_ht[i].gpfn_and_flags != 0 )
@@ -936,11 +940,8 @@ void __shadow_mode_disable(struct domain *d)
free_shadow_ht_entries(d);
free_out_of_sync_entries(d);
- struct vcpu *v;
for_each_vcpu(d, v)
- {
update_pagetables(v);
- }
}
@@ -1608,14 +1609,18 @@ remove_shadow(struct domain *d, unsigned long gpfn, u32 stype)
unsigned long
gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
{
- ASSERT( shadow_mode_translate(d) );
+ unsigned long va, tabpfn;
+ l1_pgentry_t *l1, l1e;
+ l2_pgentry_t *l2, l2e;
+
+ ASSERT(shadow_mode_translate(d));
perfc_incrc(gpfn_to_mfn_foreign);
- unsigned long va = gpfn << PAGE_SHIFT;
- unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table);
- l2_pgentry_t *l2 = map_domain_page(tabpfn);
- l2_pgentry_t l2e = l2[l2_table_offset(va)];
+ va = gpfn << PAGE_SHIFT;
+ tabpfn = pagetable_get_pfn(d->arch.phys_table);
+ l2 = map_domain_page(tabpfn);
+ l2e = l2[l2_table_offset(va)];
unmap_domain_page(l2);
if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
{
@@ -1623,8 +1628,8 @@ gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
d->domain_id, gpfn, l2e_get_intpte(l2e));
return INVALID_MFN;
}
- l1_pgentry_t *l1 = map_domain_page(l2e_get_pfn(l2e));
- l1_pgentry_t l1e = l1[l1_table_offset(va)];
+ l1 = map_domain_page(l2e_get_pfn(l2e));
+ l1e = l1[l1_table_offset(va)];
unmap_domain_page(l1);
#if 0
diff --git a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c
index ca29e0836e..a61eb42fb1 100644
--- a/xen/arch/x86/vmx.c
+++ b/xen/arch/x86/vmx.c
@@ -129,15 +129,14 @@ static u32 msr_data_index[VMX_MSR_COUNT] =
*/
void vmx_load_msrs(struct vcpu *n)
{
- struct msr_state *host_state;
- host_state = &percpu_msr[smp_processor_id()];
+ struct msr_state *host_state = &percpu_msr[smp_processor_id()];
+ int i;
if ( !vmx_switch_on )
return;
- while (host_state->flags){
- int i;
-
+ while ( host_state->flags )
+ {
i = find_first_set_bit(host_state->flags);
wrmsrl(msr_data_index[i], host_state->msr_items[i]);
clear_bit(i, &host_state->flags);
@@ -146,11 +145,10 @@ void vmx_load_msrs(struct vcpu *n)
static void vmx_save_init_msrs(void)
{
- struct msr_state *host_state;
- host_state = &percpu_msr[smp_processor_id()];
+ struct msr_state *host_state = &percpu_msr[smp_processor_id()];
int i;
- for (i = 0; i < VMX_MSR_COUNT; i++)
+ for ( i = 0; i < VMX_MSR_COUNT; i++ )
rdmsrl(msr_data_index[i], host_state->msr_items[i]);
}
@@ -516,23 +514,20 @@ static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs)
cpuid(input, &eax, &ebx, &ecx, &edx);
- if (input == 1) {
+ if ( input == 1 )
+ {
if ( vmx_apic_support(v->domain) &&
- !vlapic_global_enabled((VLAPIC(v))) )
+ !vlapic_global_enabled((VLAPIC(v))) )
clear_bit(X86_FEATURE_APIC, &edx);
-#ifdef __i386__
- clear_bit(X86_FEATURE_PSE, &edx);
- clear_bit(X86_FEATURE_PAE, &edx);
- clear_bit(X86_FEATURE_PSE36, &edx);
-#else
- struct vcpu *v = current;
- if (v->domain->arch.ops->guest_paging_levels == PAGING_L2)
+
+#ifdef __x86_64__
+ if ( v->domain->arch.ops->guest_paging_levels == PAGING_L2 )
+#endif
{
clear_bit(X86_FEATURE_PSE, &edx);
clear_bit(X86_FEATURE_PAE, &edx);
clear_bit(X86_FEATURE_PSE36, &edx);
}
-#endif
/* Unsupportable for virtualised CPUs. */
clear_bit(X86_FEATURE_VMXE & 31, &ecx);
@@ -1084,6 +1079,7 @@ static int vmx_set_cr0(unsigned long value)
unsigned long eip;
int paging_enabled;
unsigned long vm_entry_value;
+
/*
* CR0: We don't want to lose PE and PG.
*/
@@ -1140,14 +1136,17 @@ static int vmx_set_cr0(unsigned long value)
#endif
}
- unsigned long crn;
- /* update CR4's PAE if needed */
- __vmread(GUEST_CR4, &crn);
- if ( (!(crn & X86_CR4_PAE)) &&
- test_bit(VMX_CPU_STATE_PAE_ENABLED,
- &v->arch.arch_vmx.cpu_state)){
- VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n");
- __vmwrite(GUEST_CR4, crn | X86_CR4_PAE);
+ {
+ unsigned long crn;
+ /* update CR4's PAE if needed */
+ __vmread(GUEST_CR4, &crn);
+ if ( (!(crn & X86_CR4_PAE)) &&
+ test_bit(VMX_CPU_STATE_PAE_ENABLED,
+ &v->arch.arch_vmx.cpu_state) )
+ {
+ VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n");
+ __vmwrite(GUEST_CR4, crn | X86_CR4_PAE);
+ }
}
#endif
/*
diff --git a/xen/arch/x86/x86_32/traps.c b/xen/arch/x86/x86_32/traps.c
index 8579066f30..9dbe94178e 100644
--- a/xen/arch/x86/x86_32/traps.c
+++ b/xen/arch/x86/x86_32/traps.c
@@ -167,6 +167,7 @@ asmlinkage void smp_deferred_nmi(struct cpu_user_regs regs)
void __init percpu_traps_init(void)
{
+ struct tss_struct *tss = &doublefault_tss;
asmlinkage int hypercall(void);
if ( smp_processor_id() != 0 )
@@ -184,7 +185,6 @@ void __init percpu_traps_init(void)
* Make a separate task for double faults. This will get us debug output if
* we blow the kernel stack.
*/
- struct tss_struct *tss = &doublefault_tss;
memset(tss, 0, sizeof(*tss));
tss->ds = __HYPERVISOR_DS;
tss->es = __HYPERVISOR_DS;
diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
index b94143a4a5..9ae98f129e 100644
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -704,11 +704,12 @@ static struct task_slice sedf_do_schedule(s_time_t now)
struct list_head *waitq = WAITQ(cpu);
#if (EXTRA > EXTRA_OFF)
struct sedf_vcpu_info *inf = EDOM_INFO(current);
- struct list_head *extraq[] = {EXTRAQ(cpu, EXTRA_PEN_Q),
- EXTRAQ(cpu, EXTRA_UTIL_Q)};
+ struct list_head *extraq[] = {
+ EXTRAQ(cpu, EXTRA_PEN_Q), EXTRAQ(cpu, EXTRA_UTIL_Q)};
#endif
- struct task_slice ret;
- /*int i = 0;*/
+ struct sedf_vcpu_info *runinf, *waitinf;
+ struct task_slice ret;
+
/*idle tasks don't need any of the following stuf*/
if (is_idle_task(current->domain))
goto check_waitq;
@@ -737,7 +738,6 @@ static struct task_slice sedf_do_schedule(s_time_t now)
/*now simply pick the first domain from the runqueue, which has the
earliest deadline, because the list is sorted*/
- struct sedf_vcpu_info *runinf, *waitinf;
if (!list_empty(runq)) {
runinf = list_entry(runq->next,struct sedf_vcpu_info,list);
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 2845ceb0b7..05544a8bcd 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -173,11 +173,12 @@ extern void vmx_shadow_clear_state(struct domain *);
static inline int page_is_page_table(struct pfn_info *page)
{
struct domain *owner = page_get_owner(page);
+ u32 type_info;
if ( owner && shadow_mode_refcounts(owner) )
return page->count_info & PGC_page_table;
- u32 type_info = page->u.inuse.type_info & PGT_type_mask;
+ type_info = page->u.inuse.type_info & PGT_type_mask;
return type_info && (type_info <= PGT_l4_page_table);
}