aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2006-04-21 17:35:15 +0100
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2006-04-21 17:35:15 +0100
commitea608cc36d26fb5b100630b98cbe28555dad8434 (patch)
tree61f68ff13710ba157d8216da7739bc3a1a387b57
parentdadf394db4b56349c0aea6f72e406e5d3e2e80ac (diff)
downloadxen-ea608cc36d26fb5b100630b98cbe28555dad8434.tar.gz
xen-ea608cc36d26fb5b100630b98cbe28555dad8434.tar.bz2
xen-ea608cc36d26fb5b100630b98cbe28555dad8434.zip
Pull the Linux percpu interface into Xen. Implemented for
x86 and used it to eliminate the percpu_ctxt struct from arch/x86/domain.c. Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--xen/arch/x86/domain.c38
-rw-r--r--xen/arch/x86/setup.c39
-rw-r--r--xen/arch/x86/smpboot.c2
-rw-r--r--xen/arch/x86/x86_32/xen.lds.S10
-rw-r--r--xen/arch/x86/x86_64/xen.lds.S10
-rw-r--r--xen/include/asm-x86/current.h18
-rw-r--r--xen/include/asm-x86/percpu.h20
-rw-r--r--xen/include/xen/compiler.h13
-rw-r--r--xen/include/xen/percpu.h15
9 files changed, 132 insertions, 33 deletions
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index a657fba2eb..e590abf347 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -21,6 +21,12 @@
#include <xen/softirq.h>
#include <xen/grant_table.h>
#include <xen/iocap.h>
+#include <xen/kernel.h>
+#include <xen/multicall.h>
+#include <xen/irq.h>
+#include <xen/event.h>
+#include <xen/console.h>
+#include <xen/percpu.h>
#include <asm/regs.h>
#include <asm/mc146818rtc.h>
#include <asm/system.h>
@@ -30,22 +36,12 @@
#include <asm/i387.h>
#include <asm/mpspec.h>
#include <asm/ldt.h>
-#include <xen/irq.h>
-#include <xen/event.h>
#include <asm/shadow.h>
-#include <xen/console.h>
-#include <xen/elf.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/support.h>
#include <asm/msr.h>
-#include <xen/kernel.h>
-#include <xen/multicall.h>
-struct percpu_ctxt {
- struct vcpu *curr_vcpu;
- unsigned int dirty_segment_mask;
-} __cacheline_aligned;
-static struct percpu_ctxt percpu_ctxt[NR_CPUS];
+DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
static void paravirt_ctxt_switch_from(struct vcpu *v);
static void paravirt_ctxt_switch_to(struct vcpu *v);
@@ -123,11 +119,6 @@ void dump_pageframe_info(struct domain *d)
}
}
-void set_current_execstate(struct vcpu *v)
-{
- percpu_ctxt[smp_processor_id()].curr_vcpu = v;
-}
-
struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
{
struct vcpu *v;
@@ -459,6 +450,7 @@ void new_thread(struct vcpu *d,
* allowing load_segments() to avoid some expensive segment loads and
* MSR writes.
*/
+static DEFINE_PER_CPU(unsigned int, dirty_segment_mask);
#define DIRTY_DS 0x01
#define DIRTY_ES 0x02
#define DIRTY_FS 0x04
@@ -473,8 +465,8 @@ static void load_segments(struct vcpu *n)
unsigned int dirty_segment_mask, cpu = smp_processor_id();
/* Load and clear the dirty segment mask. */
- dirty_segment_mask = percpu_ctxt[cpu].dirty_segment_mask;
- percpu_ctxt[cpu].dirty_segment_mask = 0;
+ dirty_segment_mask = per_cpu(dirty_segment_mask, cpu);
+ per_cpu(dirty_segment_mask, cpu) = 0;
/* Either selector != 0 ==> reload. */
if ( unlikely((dirty_segment_mask & DIRTY_DS) | nctxt->user_regs.ds) )
@@ -601,7 +593,7 @@ static void save_segments(struct vcpu *v)
dirty_segment_mask |= DIRTY_GS_BASE_USER;
}
- percpu_ctxt[smp_processor_id()].dirty_segment_mask = dirty_segment_mask;
+ this_cpu(dirty_segment_mask) = dirty_segment_mask;
}
#define switch_kernel_stack(v) ((void)0)
@@ -638,7 +630,7 @@ static void __context_switch(void)
{
struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
unsigned int cpu = smp_processor_id();
- struct vcpu *p = percpu_ctxt[cpu].curr_vcpu;
+ struct vcpu *p = per_cpu(curr_vcpu, cpu);
struct vcpu *n = current;
ASSERT(p != n);
@@ -692,7 +684,7 @@ static void __context_switch(void)
cpu_clear(cpu, p->domain->domain_dirty_cpumask);
cpu_clear(cpu, p->vcpu_dirty_cpumask);
- percpu_ctxt[cpu].curr_vcpu = n;
+ per_cpu(curr_vcpu, cpu) = n;
}
@@ -716,7 +708,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
set_current(next);
- if ( (percpu_ctxt[cpu].curr_vcpu == next) || is_idle_vcpu(next) )
+ if ( (per_cpu(curr_vcpu, cpu) == next) || is_idle_vcpu(next) )
{
local_irq_enable();
}
@@ -758,7 +750,7 @@ int __sync_lazy_execstate(void)
local_irq_save(flags);
- switch_required = (percpu_ctxt[smp_processor_id()].curr_vcpu != current);
+ switch_required = (this_cpu(curr_vcpu) != current);
if ( switch_required )
__context_switch();
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 721b6781d8..70f2822524 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -14,6 +14,7 @@
#include <xen/domain_page.h>
#include <xen/compile.h>
#include <xen/gdbstub.h>
+#include <xen/percpu.h>
#include <public/version.h>
#include <asm/bitops.h>
#include <asm/smp.h>
@@ -159,6 +160,38 @@ void discard_initial_images(void)
init_domheap_pages(initial_images_start, initial_images_end);
}
+extern char __per_cpu_start[], __per_cpu_data_end[], __per_cpu_end[];
+
+static void percpu_init_areas(void)
+{
+ unsigned int i, data_size = __per_cpu_data_end - __per_cpu_start;
+
+ BUG_ON(data_size > PERCPU_SIZE);
+
+ for ( i = 1; i < NR_CPUS; i++ )
+ memcpy(__per_cpu_start + (i << PERCPU_SHIFT),
+ __per_cpu_start,
+ data_size);
+}
+
+static void percpu_free_unused_areas(void)
+{
+ unsigned int i, first_unused;
+
+ /* Find first unused CPU number. */
+ for ( i = 0; i < NR_CPUS; i++ )
+ if ( !cpu_online(i) )
+ break;
+ first_unused = i;
+
+ /* Check that there are no holes in cpu_online_map. */
+ for ( ; i < NR_CPUS; i++ )
+ BUG_ON(cpu_online(i));
+
+ init_xenheap_pages(__pa(__per_cpu_start) + (first_unused << PERCPU_SHIFT),
+ __pa(__per_cpu_end));
+}
+
void __init __start_xen(multiboot_info_t *mbi)
{
char *cmdline;
@@ -209,6 +242,8 @@ void __init __start_xen(multiboot_info_t *mbi)
EARLY_FAIL();
}
+ percpu_init_areas();
+
xenheap_phys_end = opt_xenheap_megabytes << 20;
if ( mbi->flags & MBI_MEMMAP )
@@ -405,7 +440,7 @@ void __init __start_xen(multiboot_info_t *mbi)
BUG_ON(idle_domain == NULL);
set_current(idle_domain->vcpu[0]);
- set_current_execstate(idle_domain->vcpu[0]);
+ this_cpu(curr_vcpu) = idle_domain->vcpu[0];
idle_vcpu[0] = current;
paging_init();
@@ -482,6 +517,8 @@ void __init __start_xen(multiboot_info_t *mbi)
printk("Brought up %ld CPUs\n", (long)num_online_cpus());
smp_cpus_done(max_cpus);
+ percpu_free_unused_areas();
+
initialise_gdb(); /* could be moved earlier */
do_initcalls();
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 81faef1f2a..d199041d30 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -531,7 +531,7 @@ void __devinit start_secondary(void *unused)
set_processor_id(cpu);
set_current(idle_vcpu[cpu]);
- set_current_execstate(idle_vcpu[cpu]);
+ this_cpu(curr_vcpu) = idle_vcpu[cpu];
percpu_traps_init();
diff --git a/xen/arch/x86/x86_32/xen.lds.S b/xen/arch/x86/x86_32/xen.lds.S
index fb76cbc73d..f58bd191f5 100644
--- a/xen/arch/x86/x86_32/xen.lds.S
+++ b/xen/arch/x86/x86_32/xen.lds.S
@@ -5,6 +5,7 @@
#include <xen/config.h>
#include <asm/page.h>
+#include <asm/percpu.h>
#undef ENTRY
#undef ALIGN
@@ -56,9 +57,16 @@ SECTIONS
__initcall_start = .;
.initcall.init : { *(.initcall.init) } :text
__initcall_end = .;
- . = ALIGN(STACK_SIZE);
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
+ __per_cpu_start = .;
+ .data.percpu : { *(.data.percpu) } :text
+ __per_cpu_data_end = .;
+ . = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT);
+ . = ALIGN(STACK_SIZE);
+ __per_cpu_end = .;
+
__bss_start = .; /* BSS */
.bss : {
*(.bss.stack_aligned)
diff --git a/xen/arch/x86/x86_64/xen.lds.S b/xen/arch/x86/x86_64/xen.lds.S
index d8685201ab..92d395d5f9 100644
--- a/xen/arch/x86/x86_64/xen.lds.S
+++ b/xen/arch/x86/x86_64/xen.lds.S
@@ -3,6 +3,7 @@
#include <xen/config.h>
#include <asm/page.h>
+#include <asm/percpu.h>
#undef ENTRY
#undef ALIGN
@@ -54,9 +55,16 @@ SECTIONS
__initcall_start = .;
.initcall.init : { *(.initcall.init) } :text
__initcall_end = .;
- . = ALIGN(STACK_SIZE);
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
+ __per_cpu_start = .;
+ .data.percpu : { *(.data.percpu) } :text
+ __per_cpu_data_end = .;
+ . = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT);
+ . = ALIGN(STACK_SIZE);
+ __per_cpu_end = .;
+
__bss_start = .; /* BSS */
.bss : {
*(.bss.stack_aligned)
diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h
index 5803141f2a..9b0b6e5245 100644
--- a/xen/include/asm-x86/current.h
+++ b/xen/include/asm-x86/current.h
@@ -16,7 +16,7 @@ struct vcpu;
struct cpu_info {
struct cpu_user_regs guest_cpu_user_regs;
unsigned int processor_id;
- struct vcpu *current_ed;
+ struct vcpu *current_vcpu;
};
static inline struct cpu_info *get_cpu_info(void)
@@ -29,12 +29,12 @@ static inline struct cpu_info *get_cpu_info(void)
return cpu_info;
}
-#define get_current() (get_cpu_info()->current_ed)
-#define set_current(_ed) (get_cpu_info()->current_ed = (_ed))
+#define get_current() (get_cpu_info()->current_vcpu)
+#define set_current(vcpu) (get_cpu_info()->current_vcpu = (vcpu))
#define current (get_current())
#define get_processor_id() (get_cpu_info()->processor_id)
-#define set_processor_id(_id) (get_cpu_info()->processor_id = (_id))
+#define set_processor_id(id) (get_cpu_info()->processor_id = (id))
#define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs)
@@ -51,8 +51,14 @@ static inline struct cpu_info *get_cpu_info(void)
"mov %0,%%"__OP"sp; jmp "STR(__fn) \
: : "r" (guest_cpu_user_regs()) : "memory" )
-#define schedule_tail(_ed) (((_ed)->arch.schedule_tail)(_ed))
+#define schedule_tail(vcpu) (((vcpu)->arch.schedule_tail)(vcpu))
-extern void set_current_execstate(struct vcpu *v);
+#include <xen/percpu.h>
+/*
+ * Which VCPU's state is currently running on each CPU?
+ * This is not necesasrily the same as 'current' as a CPU may be
+ * executing a lazy state switch.
+ */
+DECLARE_PER_CPU(struct vcpu *, curr_vcpu);
#endif /* __X86_CURRENT_H__ */
diff --git a/xen/include/asm-x86/percpu.h b/xen/include/asm-x86/percpu.h
new file mode 100644
index 0000000000..d76206587f
--- /dev/null
+++ b/xen/include/asm-x86/percpu.h
@@ -0,0 +1,20 @@
+#ifndef __X86_PERCPU_H__
+#define __X86_PERCPU_H__
+
+#define PERCPU_SHIFT 12
+#define PERCPU_SIZE (1UL << PERCPU_SHIFT)
+
+/* Separate out the type, so (int[3], foo) works. */
+#define DEFINE_PER_CPU(type, name) \
+ __attribute__((__section__(".data.percpu"))) \
+ __typeof__(type) per_cpu__##name
+
+/* var is in discarded region: offset to particular copy we want */
+#define per_cpu(var, cpu) \
+ (*RELOC_HIDE(&per_cpu__##var, ((unsigned int)(cpu))<<PERCPU_SHIFT))
+#define __get_cpu_var(var) \
+ (per_cpu(var, smp_processor_id()))
+
+#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
+
+#endif /* __X86_PERCPU_H__ */
diff --git a/xen/include/xen/compiler.h b/xen/include/xen/compiler.h
index 29acdc59e8..d4e5a43b2e 100644
--- a/xen/include/xen/compiler.h
+++ b/xen/include/xen/compiler.h
@@ -25,4 +25,17 @@
#define __must_check
#endif
+/* This macro obfuscates arithmetic on a variable address so that gcc
+ shouldn't recognize the original var, and make assumptions about it */
+/*
+ * Versions of the ppc64 compiler before 4.1 had a bug where use of
+ * RELOC_HIDE could trash r30. The bug can be worked around by changing
+ * the inline assembly constraint from =g to =r, in this particular
+ * case either is valid.
+ */
+#define RELOC_HIDE(ptr, off) \
+ ({ unsigned long __ptr; \
+ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
+ (typeof(ptr)) (__ptr + (off)); })
+
#endif /* __LINUX_COMPILER_H */
diff --git a/xen/include/xen/percpu.h b/xen/include/xen/percpu.h
new file mode 100644
index 0000000000..88fba30e8a
--- /dev/null
+++ b/xen/include/xen/percpu.h
@@ -0,0 +1,15 @@
+#ifndef __XEN_PERCPU_H__
+#define __XEN_PERCPU_H__
+
+#include <xen/config.h>
+#include <xen/smp.h>
+#include <asm/percpu.h>
+
+/* Preferred on Xen. Also see arch-defined per_cpu(). */
+#define this_cpu(var) __get_cpu_var(var)
+
+/* Linux compatibility. */
+#define get_cpu_var(var) this_cpu(var)
+#define put_cpu_var(var)
+
+#endif /* __XEN_PERCPU_H__ */