From 33c8483360bba3536dc678a94e7f70cb75828066 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Sun, 16 Mar 2008 14:11:34 +0000 Subject: x86: Allow bitop functions to be applied only to fields of at least 4 bytes. Otherwise the 'longword' processor instructions used will overlap with adjacent fields with unpredictable consequences. This change requires some code fixup and just a few casts (mainly when operating on guest-shared fields which cannot be changed, and which by observation are clearly safe). Based on ideas from Jan Beulich Signed-off-by: Keir Fraser --- xen/arch/x86/domain.c | 4 +- xen/arch/x86/hvm/hvm.c | 4 +- xen/arch/x86/hvm/svm/vmcb.c | 14 +- xen/arch/x86/hvm/vlapic.c | 26 ++-- xen/arch/x86/hvm/vmx/vmcs.c | 12 +- xen/arch/x86/hvm/vmx/vpmu_core2.c | 16 +- xen/arch/x86/irq.c | 9 +- xen/arch/x86/mm/paging.c | 11 +- xen/arch/x86/mm/shadow/private.h | 2 +- xen/common/domain.c | 2 +- xen/common/event_channel.c | 14 +- xen/common/keyhandler.c | 6 +- xen/common/schedule.c | 2 +- xen/drivers/passthrough/vtd/iommu.c | 7 +- xen/drivers/video/vesa.c | 2 +- xen/include/asm-x86/bitops.h | 283 ++++++++++++++++++++++-------------- xen/include/asm-x86/event.h | 5 +- xen/include/asm-x86/grant_table.h | 2 +- xen/include/asm-x86/hvm/support.h | 2 +- xen/include/asm-x86/hvm/svm/vmcb.h | 2 +- xen/include/asm-x86/hvm/vmx/vmcs.h | 2 +- xen/include/asm-x86/shared.h | 8 +- xen/include/xen/shared.h | 36 ++--- 23 files changed, 266 insertions(+), 205 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 39e6d8efd2..d177e814f9 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -830,7 +830,7 @@ unmap_vcpu_info(struct vcpu *v) mfn = v->arch.vcpu_info_mfn; unmap_domain_page_global(v->vcpu_info); - v->vcpu_info = shared_info_addr(d, vcpu_info[v->vcpu_id]); + v->vcpu_info = (void *)&shared_info(d, vcpu_info[v->vcpu_id]); v->arch.vcpu_info_mfn = INVALID_MFN; put_page_and_type(mfn_to_page(mfn)); @@ -888,7 +888,7 @@ map_vcpu_info(struct vcpu *v, unsigned long mfn, unsigned offset) */ vcpu_info(v, evtchn_upcall_pending) = 1; for ( i = 0; i < BITS_PER_GUEST_LONG(d); i++ ) - set_bit(i, vcpu_info_addr(v, evtchn_pending_sel)); + set_bit(i, &vcpu_info(v, evtchn_pending_sel)); /* * Only bother to update time for the current vcpu. If we're diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 14edc025cb..f1aa4fed34 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -59,8 +59,8 @@ integer_param("hvm_debug", opt_hvm_debug_level); struct hvm_function_table hvm_funcs __read_mostly; /* I/O permission bitmap is globally shared by all HVM guests. */ -char __attribute__ ((__section__ (".bss.page_aligned"))) - hvm_io_bitmap[3*PAGE_SIZE]; +unsigned long __attribute__ ((__section__ (".bss.page_aligned"))) + hvm_io_bitmap[3*PAGE_SIZE/BYTES_PER_LONG]; void hvm_enable(struct hvm_function_table *fns) { diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c index 790e8fc800..0cf8347d85 100644 --- a/xen/arch/x86/hvm/svm/vmcb.c +++ b/xen/arch/x86/hvm/svm/vmcb.c @@ -80,27 +80,27 @@ struct host_save_area *alloc_host_save_area(void) void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr) { - char *msr_bitmap = v->arch.hvm_svm.msrpm; + unsigned long *msr_bitmap = v->arch.hvm_svm.msrpm; /* * See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address). */ if ( msr <= 0x1fff ) { - __clear_bit(msr*2, msr_bitmap + 0x000); - __clear_bit(msr*2+1, msr_bitmap + 0x000); + __clear_bit(msr*2, msr_bitmap + 0x000/BYTES_PER_LONG); + __clear_bit(msr*2+1, msr_bitmap + 0x000/BYTES_PER_LONG); } else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) { msr &= 0x1fff; - __clear_bit(msr*2, msr_bitmap + 0x800); - __clear_bit(msr*2+1, msr_bitmap + 0x800); + __clear_bit(msr*2, msr_bitmap + 0x800/BYTES_PER_LONG); + __clear_bit(msr*2+1, msr_bitmap + 0x800/BYTES_PER_LONG); } else if ( (msr >= 0xc001000) && (msr <= 0xc0011fff) ) { msr &= 0x1fff; - __clear_bit(msr*2, msr_bitmap + 0x1000); - __clear_bit(msr*2+1, msr_bitmap + 0x1000); + __clear_bit(msr*2, msr_bitmap + 0x1000/BYTES_PER_LONG); + __clear_bit(msr*2+1, msr_bitmap + 0x1000/BYTES_PER_LONG); } } diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c index a52a44f628..071f9b3c50 100644 --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -83,15 +83,17 @@ static unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] = */ #define VEC_POS(v) ((v)%32) -#define REG_POS(v) (((v)/32)* 0x10) -#define vlapic_test_and_set_vector(vec, bitmap) \ - test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)) -#define vlapic_test_and_clear_vector(vec, bitmap) \ - test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)) -#define vlapic_set_vector(vec, bitmap) \ - set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)) -#define vlapic_clear_vector(vec, bitmap) \ - clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)) +#define REG_POS(v) (((v)/32) * 0x10) +#define vlapic_test_and_set_vector(vec, bitmap) \ + test_and_set_bit(VEC_POS(vec), \ + (unsigned long *)((bitmap) + REG_POS(vec))) +#define vlapic_test_and_clear_vector(vec, bitmap) \ + test_and_clear_bit(VEC_POS(vec), \ + (unsigned long *)((bitmap) + REG_POS(vec))) +#define vlapic_set_vector(vec, bitmap) \ + set_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec))) +#define vlapic_clear_vector(vec, bitmap) \ + clear_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec))) static int vlapic_find_highest_vector(void *bitmap) { @@ -112,12 +114,14 @@ static int vlapic_find_highest_vector(void *bitmap) static int vlapic_test_and_set_irr(int vector, struct vlapic *vlapic) { - return vlapic_test_and_set_vector(vector, &vlapic->regs->data[APIC_IRR]); + return vlapic_test_and_set_vector( + vector, (unsigned long *)&vlapic->regs->data[APIC_IRR]); } static void vlapic_clear_irr(int vector, struct vlapic *vlapic) { - vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]); + vlapic_clear_vector( + vector, (unsigned long *)&vlapic->regs->data[APIC_IRR]); } static int vlapic_find_highest_irr(struct vlapic *vlapic) diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index cd0c844887..3650abed64 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -413,7 +413,7 @@ static void vmx_set_host_env(struct vcpu *v) void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr) { - char *msr_bitmap = v->arch.hvm_vmx.msr_bitmap; + unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap; /* VMX MSR bitmap supported? */ if ( msr_bitmap == NULL ) @@ -426,14 +426,14 @@ void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr) */ if ( msr <= 0x1fff ) { - __clear_bit(msr, msr_bitmap + 0x000); /* read-low */ - __clear_bit(msr, msr_bitmap + 0x800); /* write-low */ + __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */ + __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */ } else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) { msr &= 0x1fff; - __clear_bit(msr, msr_bitmap + 0x400); /* read-high */ - __clear_bit(msr, msr_bitmap + 0xc00); /* write-high */ + __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */ + __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */ } } @@ -456,7 +456,7 @@ static int construct_vmcs(struct vcpu *v) /* MSR access bitmap. */ if ( cpu_has_vmx_msr_bitmap ) { - char *msr_bitmap = alloc_xenheap_page(); + unsigned long *msr_bitmap = alloc_xenheap_page(); if ( msr_bitmap == NULL ) return -ENOMEM; diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c index 015b9e6ed1..5539874774 100644 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -101,7 +101,7 @@ static int is_core2_vpmu_msr(u32 msr_index, int *type, int *index) return 0; } -static void core2_vpmu_set_msr_bitmap(char *msr_bitmap) +static void core2_vpmu_set_msr_bitmap(unsigned long *msr_bitmap) { int i; @@ -109,12 +109,14 @@ static void core2_vpmu_set_msr_bitmap(char *msr_bitmap) for ( i = 0; i < core2_counters.num; i++ ) { clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap); - clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap+0x800); + clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), + msr_bitmap + 0x800/BYTES_PER_LONG); } for ( i = 0; i < core2_get_pmc_count(); i++ ) { clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap); - clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap+0x800); + clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), + msr_bitmap + 0x800/BYTES_PER_LONG); } /* Allow Read PMU Non-global Controls Directly. */ @@ -124,19 +126,21 @@ static void core2_vpmu_set_msr_bitmap(char *msr_bitmap) clear_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap); } -static void core2_vpmu_unset_msr_bitmap(char *msr_bitmap) +static void core2_vpmu_unset_msr_bitmap(unsigned long *msr_bitmap) { int i; for ( i = 0; i < core2_counters.num; i++ ) { set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap); - set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap+0x800); + set_bit(msraddr_to_bitpos(core2_counters.msr[i]), + msr_bitmap + 0x800/BYTES_PER_LONG); } for ( i = 0; i < core2_get_pmc_count(); i++ ) { set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap); - set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap+0x800); + set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), + msr_bitmap + 0x800/BYTES_PER_LONG); } for ( i = 0; i < core2_ctrls.num; i++ ) set_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap); diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c index 77983a6f0a..b5087f6633 100644 --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -362,13 +362,12 @@ int pirq_guest_eoi(struct domain *d, int irq) int pirq_guest_unmask(struct domain *d) { unsigned int irq; - shared_info_t *s = d->shared_info; for ( irq = find_first_bit(d->pirq_mask, NR_IRQS); irq < NR_IRQS; irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) ) { - if ( !test_bit(d->pirq_to_evtchn[irq], __shared_info_addr(d, s, evtchn_mask)) ) + if ( !test_bit(d->pirq_to_evtchn[irq], &shared_info(d, evtchn_mask)) ) __pirq_guest_eoi(d, irq); } @@ -660,13 +659,13 @@ static void dump_irqs(unsigned char key) printk("%u(%c%c%c%c)", d->domain_id, (test_bit(d->pirq_to_evtchn[irq], - shared_info_addr(d, evtchn_pending)) ? + &shared_info(d, evtchn_pending)) ? 'P' : '-'), (test_bit(d->pirq_to_evtchn[irq]/BITS_PER_GUEST_LONG(d), - vcpu_info_addr(d->vcpu[0], evtchn_pending_sel)) ? + &vcpu_info(d->vcpu[0], evtchn_pending_sel)) ? 'S' : '-'), (test_bit(d->pirq_to_evtchn[irq], - shared_info_addr(d, evtchn_mask)) ? + &shared_info(d, evtchn_mask)) ? 'M' : '-'), (test_bit(irq, d->pirq_mask) ? 'M' : '-')); diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c index c22818e755..e6c3cbb9e6 100644 --- a/xen/arch/x86/mm/paging.c +++ b/xen/arch/x86/mm/paging.c @@ -114,7 +114,8 @@ static mfn_t paging_new_log_dirty_page(struct domain *d, void **mapping_p) return mfn; } -static mfn_t paging_new_log_dirty_leaf(struct domain *d, uint8_t **leaf_p) +static mfn_t paging_new_log_dirty_leaf( + struct domain *d, unsigned long **leaf_p) { mfn_t mfn = paging_new_log_dirty_page(d, (void **)leaf_p); if ( mfn_valid(mfn) ) @@ -264,7 +265,7 @@ void paging_mark_dirty(struct domain *d, unsigned long guest_mfn) mfn_t gmfn; int changed; mfn_t mfn, *l4, *l3, *l2; - uint8_t *l1; + unsigned long *l1; int i1, i2, i3, i4; gmfn = _mfn(guest_mfn); @@ -341,7 +342,7 @@ int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc) int rv = 0, clean = 0, peek = 1; unsigned long pages = 0; mfn_t *l4, *l3, *l2; - uint8_t *l1; + unsigned long *l1; int i4, i3, i2; domain_pause(d); @@ -399,7 +400,7 @@ int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc) (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); i2++ ) { - static uint8_t zeroes[PAGE_SIZE]; + static unsigned long zeroes[PAGE_SIZE/BYTES_PER_LONG]; unsigned int bytes = PAGE_SIZE; l1 = ((l2 && mfn_valid(l2[i2])) ? map_domain_page(mfn_x(l2[i2])) : zeroes); @@ -408,7 +409,7 @@ int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc) if ( likely(peek) ) { if ( copy_to_guest_offset(sc->dirty_bitmap, pages >> 3, - l1, bytes) != 0 ) + (uint8_t *)l1, bytes) != 0 ) { rv = -EFAULT; goto out; diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h index 0c9edf63a2..68d0c8e0ab 100644 --- a/xen/arch/x86/mm/shadow/private.h +++ b/xen/arch/x86/mm/shadow/private.h @@ -483,7 +483,7 @@ sh_mfn_is_dirty(struct domain *d, mfn_t gmfn) { unsigned long pfn; mfn_t mfn, *l4, *l3, *l2; - uint8_t *l1; + unsigned long *l1; int rv; ASSERT(shadow_mode_log_dirty(d)); diff --git a/xen/common/domain.c b/xen/common/domain.c index e891c3893c..806468c052 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -154,7 +154,7 @@ struct vcpu *alloc_vcpu( if ( !is_idle_domain(d) ) { set_bit(_VPF_down, &v->pause_flags); - v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]); + v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]); } if ( sched_init_vcpu(v, cpu_id) != 0 ) diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 7777822fb6..365adf4652 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -539,7 +539,6 @@ out: void evtchn_set_pending(struct vcpu *v, int port) { struct domain *d = v->domain; - shared_info_t *s = d->shared_info; /* * The following bit operations must happen in strict order. @@ -548,12 +547,12 @@ void evtchn_set_pending(struct vcpu *v, int port) * others may require explicit memory barriers. */ - if ( test_and_set_bit(port, __shared_info_addr(d, s, evtchn_pending)) ) + if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) ) return; - if ( !test_bit (port, __shared_info_addr(d, s, evtchn_mask)) && + if ( !test_bit (port, &shared_info(d, evtchn_mask)) && !test_and_set_bit(port / BITS_PER_GUEST_LONG(d), - vcpu_info_addr(v, evtchn_pending_sel)) ) + &vcpu_info(v, evtchn_pending_sel)) ) { vcpu_mark_events_pending(v); } @@ -750,7 +749,6 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id) static long evtchn_unmask(evtchn_unmask_t *unmask) { struct domain *d = current->domain; - shared_info_t *s = d->shared_info; int port = unmask->port; struct vcpu *v; @@ -768,10 +766,10 @@ static long evtchn_unmask(evtchn_unmask_t *unmask) * These operations must happen in strict order. Based on * include/xen/event.h:evtchn_set_pending(). */ - if ( test_and_clear_bit(port, __shared_info_addr(d, s, evtchn_mask)) && - test_bit (port, __shared_info_addr(d, s, evtchn_pending)) && + if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) && + test_bit (port, &shared_info(d, evtchn_pending)) && !test_and_set_bit (port / BITS_PER_GUEST_LONG(d), - vcpu_info_addr(v, evtchn_pending_sel)) ) + &vcpu_info(v, evtchn_pending_sel)) ) { vcpu_mark_events_pending(v); } diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c index 14ed246703..b414452562 100644 --- a/xen/common/keyhandler.c +++ b/xen/common/keyhandler.c @@ -201,12 +201,12 @@ static void dump_domains(unsigned char key) printk(" Notifying guest (virq %d, port %d, stat %d/%d/%d)\n", VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG], test_bit(v->virq_to_evtchn[VIRQ_DEBUG], - shared_info_addr(d, evtchn_pending)), + &shared_info(d, evtchn_pending)), test_bit(v->virq_to_evtchn[VIRQ_DEBUG], - shared_info_addr(d, evtchn_mask)), + &shared_info(d, evtchn_mask)), test_bit(v->virq_to_evtchn[VIRQ_DEBUG] / BITS_PER_GUEST_LONG(d), - vcpu_info_addr(v, evtchn_pending_sel))); + &vcpu_info(v, evtchn_pending_sel))); send_guest_vcpu_virq(v, VIRQ_DEBUG); } } diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 3e80e4b3eb..4248ae9ea8 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -365,7 +365,7 @@ static long do_poll(struct sched_poll *sched_poll) goto out; rc = 0; - if ( test_bit(port, shared_info_addr(d, evtchn_pending)) ) + if ( test_bit(port, &shared_info(d, evtchn_pending)) ) goto out; } diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index bb93a06102..c569101f9e 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -39,8 +39,8 @@ #define domain_iommu_domid(d) ((d)->arch.hvm_domain.hvm_iommu.iommu_domid) static spinlock_t domid_bitmap_lock; /* protect domain id bitmap */ -static int domid_bitmap_size; /* domain id bitmap size in bit */ -static void *domid_bitmap; /* iommu domain id bitmap */ +static int domid_bitmap_size; /* domain id bitmap size in bits */ +static unsigned long *domid_bitmap; /* iommu domain id bitmap */ #define DID_FIELD_WIDTH 16 #define DID_HIGH_OFFSET 8 @@ -1885,7 +1885,8 @@ int iommu_setup(void) /* Allocate domain id bitmap, and set bit 0 as reserved */ domid_bitmap_size = cap_ndoms(iommu->cap); - domid_bitmap = xmalloc_bytes(domid_bitmap_size / 8); + domid_bitmap = xmalloc_array(unsigned long, + BITS_TO_LONGS(domid_bitmap_size)); if ( domid_bitmap == NULL ) goto error; memset(domid_bitmap, 0, domid_bitmap_size / 8); diff --git a/xen/drivers/video/vesa.c b/xen/drivers/video/vesa.c index d18476421c..e36e358789 100644 --- a/xen/drivers/video/vesa.c +++ b/xen/drivers/video/vesa.c @@ -219,7 +219,7 @@ static void vesa_show_line( ((font->width + 7) >> 3)); for ( b = font->width; b--; ) { - pixel = test_bit(b, bits) ? pixel_on : 0; + pixel = (*bits & (1u<> 5])) != 0; + return ((1U << (nr & 31)) & + (((const volatile unsigned int *)addr)[nr >> 5])) != 0; } -static __inline__ int variable_test_bit(int nr, const volatile void * addr) +static inline int variable_test_bit(int nr, const volatile void *addr) { - int oldbit; + int oldbit; - __asm__ __volatile__( - "btl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit) - :"m" (CONST_ADDR),"dIr" (nr)); - return oldbit; + asm volatile ( + "btl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit) + : "m" (CONST_ADDR), "Ir" (nr) : "memory" ); + return oldbit; } -#define test_bit(nr,addr) \ -(__builtin_constant_p(nr) ? \ - constant_test_bit((nr),(addr)) : \ - variable_test_bit((nr),(addr))) +#define test_bit(nr, addr) ({ \ + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ + (__builtin_constant_p(nr) ? \ + constant_test_bit((nr),(addr)) : \ + variable_test_bit((nr),(addr))); \ +}) extern unsigned int __find_first_bit( const unsigned long *addr, unsigned int size); @@ -275,8 +334,8 @@ extern unsigned int __find_next_zero_bit( /* return index of first bit set in val or BITS_PER_LONG when no bit is set */ static inline unsigned int __scanbit(unsigned long val) { - __asm__ ( "bsf %1,%0" : "=r" (val) : "r" (val), "0" (BITS_PER_LONG) ); - return (unsigned int)val; + asm ( "bsf %1,%0" : "=r" (val) : "r" (val), "0" (BITS_PER_LONG) ); + return (unsigned int)val; } /** @@ -335,10 +394,10 @@ static inline unsigned int __scanbit(unsigned long val) * Returns the bit-number of the first set bit. If no bits are set then the * result is undefined. */ -static __inline__ unsigned int find_first_set_bit(unsigned long word) +static inline unsigned int find_first_set_bit(unsigned long word) { - __asm__ ( "bsf %1,%0" : "=r" (word) : "r" (word) ); - return (unsigned int)word; + asm ( "bsf %1,%0" : "=r" (word) : "r" (word) ); + return (unsigned int)word; } /** @@ -349,10 +408,10 @@ static __inline__ unsigned int find_first_set_bit(unsigned long word) */ static inline unsigned long ffz(unsigned long word) { - __asm__("bsf %1,%0" - :"=r" (word) - :"r" (~word)); - return word; + asm ( "bsf %1,%0" + :"=r" (word) + :"r" (~word)); + return word; } /** @@ -365,13 +424,13 @@ static inline unsigned long ffz(unsigned long word) */ static inline int ffs(unsigned long x) { - long r; + long r; - __asm__("bsf %1,%0\n\t" - "jnz 1f\n\t" - "mov $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); - return (int)r+1; + asm ( "bsf %1,%0\n\t" + "jnz 1f\n\t" + "mov $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); + return (int)r+1; } /** @@ -382,13 +441,13 @@ static inline int ffs(unsigned long x) */ static inline int fls(unsigned long x) { - long r; + long r; - __asm__("bsr %1,%0\n\t" - "jnz 1f\n\t" - "mov $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); - return (int)r+1; + asm ( "bsr %1,%0\n\t" + "jnz 1f\n\t" + "mov $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); + return (int)r+1; } /** diff --git a/xen/include/asm-x86/event.h b/xen/include/asm-x86/event.h index 6b1bf6a52d..b1323089b1 100644 --- a/xen/include/asm-x86/event.h +++ b/xen/include/asm-x86/event.h @@ -30,7 +30,10 @@ static inline void vcpu_kick(struct vcpu *v) static inline void vcpu_mark_events_pending(struct vcpu *v) { - if ( test_and_set_bit(0, &vcpu_info(v, evtchn_upcall_pending)) ) + int already_pending = test_and_set_bit( + 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending)); + + if ( already_pending ) return; if ( is_hvm_vcpu(v) ) diff --git a/xen/include/asm-x86/grant_table.h b/xen/include/asm-x86/grant_table.h index d7e3c3754f..3a7fb2ab74 100644 --- a/xen/include/asm-x86/grant_table.h +++ b/xen/include/asm-x86/grant_table.h @@ -35,7 +35,7 @@ int replace_grant_host_mapping( static inline void gnttab_clear_flag(unsigned long nr, uint16_t *addr) { - clear_bit(nr, addr); + clear_bit(nr, (unsigned long *)addr); } /* Foreign mappings of HHVM-guest pages do not modify the type count. */ diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h index d2da0e87c9..cbdea537fc 100644 --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -78,7 +78,7 @@ extern unsigned int opt_hvm_debug_level; #define HVM_DBG_LOG(level, _f, _a...) #endif -extern char hvm_io_bitmap[]; +extern unsigned long hvm_io_bitmap[]; void hvm_enable(struct hvm_function_table *); diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h b/xen/include/asm-x86/hvm/svm/vmcb.h index 6dfe05da29..04fd5e0f12 100644 --- a/xen/include/asm-x86/hvm/svm/vmcb.h +++ b/xen/include/asm-x86/hvm/svm/vmcb.h @@ -448,7 +448,7 @@ struct arch_svm_struct { struct vmcb_struct *vmcb; u64 vmcb_pa; u64 asid_generation; /* ASID tracking, moved here for cache locality. */ - char *msrpm; + unsigned long *msrpm; int launch_core; bool_t vmcb_in_sync; /* VMCB sync'ed with VMSAVE? */ }; diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index db69542a35..9ce2d2a38b 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -81,7 +81,7 @@ struct arch_vmx_struct { unsigned long cstar; #endif - char *msr_bitmap; + unsigned long *msr_bitmap; unsigned int msr_count; struct vmx_msr_entry *msr_area; unsigned int host_msr_count; diff --git a/xen/include/asm-x86/shared.h b/xen/include/asm-x86/shared.h index 565aaddaaa..ceca564d62 100644 --- a/xen/include/asm-x86/shared.h +++ b/xen/include/asm-x86/shared.h @@ -3,9 +3,9 @@ #ifdef CONFIG_COMPAT -#define nmi_reason(d) (!has_32bit_shinfo(d) ? \ - (void *)&(d)->shared_info->native.arch.nmi_reason : \ - (void *)&(d)->shared_info->compat.arch.nmi_reason) +#define nmi_reason(d) (!has_32bit_shinfo(d) ? \ + (u32 *)&(d)->shared_info->native.arch.nmi_reason : \ + (u32 *)&(d)->shared_info->compat.arch.nmi_reason) #define GET_SET_SHARED(type, field) \ static inline type arch_get_##field(const struct domain *d) \ @@ -41,7 +41,7 @@ static inline void arch_set_##field(struct vcpu *v, \ #else -#define nmi_reason(d) ((void *)&(d)->shared_info->arch.nmi_reason) +#define nmi_reason(d) ((u32 *)&(d)->shared_info->arch.nmi_reason) #define GET_SET_SHARED(type, field) \ static inline type arch_get_##field(const struct domain *d) \ diff --git a/xen/include/xen/shared.h b/xen/include/xen/shared.h index 216d72116d..9738a49621 100644 --- a/xen/include/xen/shared.h +++ b/xen/include/xen/shared.h @@ -12,44 +12,36 @@ typedef union { struct compat_shared_info compat; } shared_info_t; -#define __shared_info(d, s, field) (*(!has_32bit_shinfo(d) ? \ - &(s)->native.field : \ - &(s)->compat.field)) -#define __shared_info_addr(d, s, field) (!has_32bit_shinfo(d) ? \ - (void *)&(s)->native.field : \ - (void *)&(s)->compat.field) - +/* + * Compat field is never larger than native field, so cast to that as it + * is the largest memory range it is safe for the caller to modify without + * further discrimination between compat and native cases. + */ +#define __shared_info(d, s, field) \ + (*(!has_32bit_shinfo(d) ? \ + (typeof(&(s)->compat.field))&(s)->native.field : \ + (typeof(&(s)->compat.field))&(s)->compat.field)) #define shared_info(d, field) \ __shared_info(d, (d)->shared_info, field) -#define shared_info_addr(d, field) \ - __shared_info_addr(d, (d)->shared_info, field) typedef union { struct vcpu_info native; struct compat_vcpu_info compat; } vcpu_info_t; -#define vcpu_info(v, field) (*(!has_32bit_shinfo((v)->domain) ? \ - &(v)->vcpu_info->native.field : \ - &(v)->vcpu_info->compat.field)) -#define vcpu_info_addr(v, field) (!has_32bit_shinfo((v)->domain) ? \ - (void *)&(v)->vcpu_info->native.field : \ - (void *)&(v)->vcpu_info->compat.field) +/* As above, cast to compat field type. */ +#define vcpu_info(v, field) \ + (*(!has_32bit_shinfo((v)->domain) ? \ + (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->native.field : \ + (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->compat.field)) #else typedef struct shared_info shared_info_t; - -#define __shared_info(d, s, field) ((s)->field) -#define __shared_info_addr(d, s, field) ((void *)&(s)->field) - #define shared_info(d, field) ((d)->shared_info->field) -#define shared_info_addr(d, field) ((void *)&(d)->shared_info->field) typedef struct vcpu_info vcpu_info_t; - #define vcpu_info(v, field) ((v)->vcpu_info->field) -#define vcpu_info_addr(v, field) ((void *)&(v)->vcpu_info->field) #endif -- cgit v1.2.3