aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/asm-x86/smp.h
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-05-09 17:50:11 +0000
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-05-09 17:50:11 +0000
commit24edf4b8989a60f108982ee44590df856a8fc9b2 (patch)
treea89eadc9e53a8000808cd3b56183a860fb7c51d4 /xen/include/asm-x86/smp.h
parent09e259436510fe39c0e55a4195f5368a1a386dba (diff)
downloadxen-24edf4b8989a60f108982ee44590df856a8fc9b2.tar.gz
xen-24edf4b8989a60f108982ee44590df856a8fc9b2.tar.bz2
xen-24edf4b8989a60f108982ee44590df856a8fc9b2.zip
bitkeeper revision 1.1389.10.1 (427fa2d3ZV92f_ErvLuIzWbV1f67QA)
Phase 1 of upgrading platform code to be derived from Linux 2.6.11 rather than 2.4.x. Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen/include/asm-x86/smp.h')
-rw-r--r--xen/include/asm-x86/smp.h92
1 files changed, 44 insertions, 48 deletions
diff --git a/xen/include/asm-x86/smp.h b/xen/include/asm-x86/smp.h
index e1e9443035..f8edfd1fca 100644
--- a/xen/include/asm-x86/smp.h
+++ b/xen/include/asm-x86/smp.h
@@ -1,14 +1,28 @@
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
+/*
+ * We need the APIC definitions automatically as part of 'smp.h'
+ */
#ifndef __ASSEMBLY__
#include <xen/config.h>
+#include <xen/kernel.h>
+#include <xen/cpumask.h>
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#ifndef __ASSEMBLY__
#include <asm/fixmap.h>
+#include <asm/bitops.h>
#include <asm/mpspec.h>
+#ifdef CONFIG_X86_IO_APIC
#include <asm/io_apic.h>
+#endif
#include <asm/apic.h>
#endif
+#endif
+#define BAD_APICID 0xFFu
#ifdef CONFIG_SMP
#ifndef __ASSEMBLY__
@@ -17,79 +31,61 @@
*/
extern void smp_alloc_memory(void);
-extern unsigned long phys_cpu_present_map;
-extern unsigned long cpu_online_map;
-extern volatile unsigned long smp_invalidate_needed;
extern int pic_mode;
extern int smp_num_siblings;
-extern int cpu_sibling_map[];
+extern cpumask_t cpu_sibling_map[];
-/*
- * On x86 all CPUs are mapped 1:1 to the APIC space.
- * This simplifies scheduling and IPI sending and
- * compresses data structures.
- */
-static inline int cpu_logical_map(int cpu)
-{
- return cpu;
-}
-static inline int cpu_number_map(int cpu)
-{
- return cpu;
-}
+extern void smp_flush_tlb(void);
+extern void smp_invalidate_rcv(void); /* Process an NMI */
+extern void (*mtrr_hook) (void);
+extern void zap_low_mappings (void);
-/*
- * Some lowlevel functions might want to know about
- * the real APIC ID <-> CPU # mapping.
- */
#define MAX_APICID 256
-extern volatile int cpu_to_physical_apicid[NR_CPUS];
-extern volatile int physical_apicid_to_cpu[MAX_APICID];
-extern volatile int cpu_to_logical_apicid[NR_CPUS];
-extern volatile int logical_apicid_to_cpu[MAX_APICID];
-
-/*
- * General functions that each host system must provide.
- */
-
-/*extern void smp_boot_cpus(void);*/
-extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial udelay numbers */
+extern u8 x86_cpu_to_apicid[];
/*
* This function is needed by all SMP systems. It must _always_ be valid
* from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case.
*/
+#define __smp_processor_id() (current->processor)
-#define smp_processor_id() (current->processor)
+extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_callin_map;
+#define cpu_possible_map cpu_callout_map
-static __inline int hard_smp_processor_id(void)
+/* We don't mark CPUs online until __cpu_up(), so we need another measure */
+static inline int num_booting_cpus(void)
+{
+ return cpus_weight(cpu_callout_map);
+}
+
+extern void map_cpu_to_logical_apicid(void);
+extern void unmap_cpu_to_logical_apicid(int cpu);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+#ifdef APIC_DEFINITION
+extern int hard_smp_processor_id(void);
+#else
+#include <mach_apicdef.h>
+static inline int hard_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_ID(*(unsigned *)(APIC_BASE+APIC_ID));
+ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
}
+#endif
static __inline int logical_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_LOGICAL_ID(*(unsigned *)(APIC_BASE+APIC_LDR));
+ return GET_APIC_LOGICAL_ID(*(unsigned int *)(APIC_BASE+APIC_LDR));
}
+#endif
#endif /* !__ASSEMBLY__ */
#define NO_PROC_ID 0xFF /* No processor magic marker */
-/*
- * This magic constant controls our willingness to transfer
- * a process across CPUs. Such a transfer incurs misses on the L1
- * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
- * gut feeling is this will vary by board in value. For a board
- * with separate L2 cache it probably depends also on the RSS, and
- * for a board with shared L2 cache it ought to decay fast as other
- * processes are run.
- */
-
-#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
-
#endif
#endif