aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-03-17 08:34:16 +0000
committerKeir Fraser <keir.fraser@citrix.com>2010-03-17 08:34:16 +0000
commit35d60c59af284262ae011c7e52bd9927788fb47a (patch)
treec552fc85f6cecaab468953a9c8fc5727a75328b9
parenta7e7bb6199d2d9e81b0097dcbd3edac1fb886d24 (diff)
downloadxen-35d60c59af284262ae011c7e52bd9927788fb47a.tar.gz
xen-35d60c59af284262ae011c7e52bd9927788fb47a.tar.bz2
xen-35d60c59af284262ae011c7e52bd9927788fb47a.zip
Increase default console ring allocation size and reduce default verbosity
In order to have better chance that relevant messages fit into the ring buffer, allocate a dynamic (larger) one in more cases, and make the default allocation size depend on both the number of CPUs and the log level. Also free the static buffer if a dynamic one was obtained. In order for "xm dmesg" to retrieve larger buffers, eliminate pyxc_readconsolering()'s 32k limitation resulting from the use of a statically allocated buffer. Finally, suppress on x86 most per-CPU boot time messages (by default, most of them can be re-enabled with a new command line option "cpuinfo", some others are now only printed more than once when there are inconsistencies between CPUs). This reduces both boot time (namely when a graphical console is in use) and pressure on the console ring and serial transmit buffers. Signed-off-by: Jan Beulich <jbeulich@novell.com>
-rw-r--r--tools/python/xen/lowlevel/xc/xc.c33
-rw-r--r--xen/arch/ia64/xen/xen.lds.S7
-rw-r--r--xen/arch/x86/cpu/amd.c5
-rw-r--r--xen/arch/x86/cpu/common.c36
-rw-r--r--xen/arch/x86/cpu/intel_cacheinfo.c32
-rw-r--r--xen/arch/x86/cpu/mcheck/amd_f10.c11
-rw-r--r--xen/arch/x86/cpu/mcheck/amd_k8.c9
-rw-r--r--xen/arch/x86/cpu/mcheck/k7.c8
-rw-r--r--xen/arch/x86/cpu/mcheck/mce.c42
-rw-r--r--xen/arch/x86/cpu/mcheck/mce.h16
-rw-r--r--xen/arch/x86/cpu/mcheck/mce_intel.c14
-rw-r--r--xen/arch/x86/hvm/asid.c9
-rw-r--r--xen/arch/x86/setup.c4
-rw-r--r--xen/arch/x86/smpboot.c10
-rw-r--r--xen/drivers/char/console.c11
-rw-r--r--xen/include/asm-x86/processor.h3
16 files changed, 171 insertions, 79 deletions
diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c
index 8ed1053575..09bb260800 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -1062,14 +1062,16 @@ static PyObject *pyxc_readconsolering(XcObject *self,
PyObject *kwds)
{
unsigned int clear = 0, index = 0, incremental = 0;
- char _str[32768], *str = _str;
- unsigned int count = 32768;
+ unsigned int count = 16384 + 1, size = count;
+ char *str = malloc(size), *ptr;
+ PyObject *obj;
int ret;
static char *kwd_list[] = { "clear", "index", "incremental", NULL };
if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iii", kwd_list,
- &clear, &index, &incremental) )
+ &clear, &index, &incremental) ||
+ !str )
return NULL;
ret = xc_readconsolering(self->xc_handle, &str, &count, clear,
@@ -1077,7 +1079,30 @@ static PyObject *pyxc_readconsolering(XcObject *self,
if ( ret < 0 )
return pyxc_error_to_exception();
- return PyString_FromStringAndSize(str, count);
+ while ( !incremental && count == size )
+ {
+ size += count - 1;
+ if ( size < count )
+ break;
+
+ ptr = realloc(str, size);
+ if ( !ptr )
+ break;
+
+ str = ptr + count;
+ count = size - count;
+ ret = xc_readconsolering(self->xc_handle, &str, &count, clear,
+ 1, &index);
+ if ( ret < 0 )
+ break;
+
+ count += str - ptr;
+ str = ptr;
+ }
+
+ obj = PyString_FromStringAndSize(str, count);
+ free(str);
+ return obj;
}
diff --git a/xen/arch/ia64/xen/xen.lds.S b/xen/arch/ia64/xen/xen.lds.S
index 2fe1905f25..547790f800 100644
--- a/xen/arch/ia64/xen/xen.lds.S
+++ b/xen/arch/ia64/xen/xen.lds.S
@@ -223,7 +223,12 @@ SECTIONS
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
{ *(.sbss) *(.scommon) }
.bss : AT(ADDR(.bss) - LOAD_OFFSET)
- { *(.bss) *(COMMON) }
+ {
+ . = ALIGN(PAGE_SIZE);
+ *(.bss.page_aligned)
+ *(.bss)
+ *(COMMON)
+ }
_end = .;
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index aabfd4c9ba..2f959c05d6 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -493,8 +493,9 @@ static void __devinit init_amd(struct cpuinfo_x86 *c)
}
cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
phys_proc_id[cpu] >>= bits;
- printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
- cpu, c->x86_max_cores, cpu_core_id[cpu]);
+ if (opt_cpu_info)
+ printk("CPU %d(%d) -> Core %d\n",
+ cpu, c->x86_max_cores, cpu_core_id[cpu]);
}
#endif
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index bc6e051daf..87d1a50c64 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -59,6 +59,9 @@ static struct cpu_dev * this_cpu = &default_cpu;
integer_param("cachesize", cachesize_override);
+int __cpuinitdata opt_cpu_info;
+boolean_param("cpuinfo", opt_cpu_info);
+
int __cpuinit get_model_name(struct cpuinfo_x86 *c)
{
unsigned int *v;
@@ -97,8 +100,10 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
if (n >= 0x80000005) {
cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
- printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
- edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+ if (opt_cpu_info)
+ printk("CPU: L1 I cache %dK (%d bytes/line),"
+ " D cache %dK (%d bytes/line)\n",
+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
c->x86_cache_size=(ecx>>24)+(edx>>24);
}
@@ -121,8 +126,9 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
c->x86_cache_size = l2size;
- printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
- l2size, ecx & 0xFF);
+ if (opt_cpu_info)
+ printk("CPU: L2 Cache: %dK (%d bytes/line)\n",
+ l2size, ecx & 0xFF);
}
/* Naming convention should be: <Name> [(<Codename>)] */
@@ -495,8 +501,9 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
index_msb = get_count_order(c->x86_num_siblings);
phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- phys_proc_id[cpu]);
+ if (opt_cpu_info)
+ printk("CPU: Physical Processor ID: %d\n",
+ phys_proc_id[cpu]);
c->x86_num_siblings = c->x86_num_siblings / c->x86_max_cores;
@@ -507,16 +514,22 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
((1 << core_bits) - 1);
- if (c->x86_max_cores > 1)
- printk(KERN_INFO "CPU: Processor Core ID: %d\n",
+ if (opt_cpu_info && c->x86_max_cores > 1)
+ printk("CPU: Processor Core ID: %d\n",
cpu_core_id[cpu]);
}
}
#endif
-void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
+void __cpuinit print_cpu_info(unsigned int cpu)
{
- char *vendor = NULL;
+ const struct cpuinfo_x86 *c = cpu_data + cpu;
+ const char *vendor = NULL;
+
+ if (!opt_cpu_info)
+ return;
+
+ printk("CPU%u: ", cpu);
if (c->x86_vendor < X86_VENDOR_NUM)
vendor = this_cpu->c_vendor;
@@ -578,7 +591,8 @@ void __cpuinit cpu_init(void)
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
for (;;) local_irq_enable();
}
- printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+ if (opt_cpu_info)
+ printk("Initializing CPU#%d\n", cpu);
if (cpu_has_pat)
wrmsrl(MSR_IA32_CR_PAT, host_pat);
diff --git a/xen/arch/x86/cpu/intel_cacheinfo.c b/xen/arch/x86/cpu/intel_cacheinfo.c
index 3750efdfa8..d7ee15d539 100644
--- a/xen/arch/x86/cpu/intel_cacheinfo.c
+++ b/xen/arch/x86/cpu/intel_cacheinfo.c
@@ -415,21 +415,23 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
l3 = new_l3;
}
- if (trace)
- printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
- else if ( l1i )
- printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
-
- if (l1d)
- printk(", L1 D cache: %dK\n", l1d);
- else
- printk("\n");
-
- if (l2)
- printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
-
- if (l3)
- printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
+ if (opt_cpu_info) {
+ if (trace)
+ printk("CPU: Trace cache: %dK uops", trace);
+ else if ( l1i )
+ printk("CPU: L1 I cache: %dK", l1i);
+
+ if (l1d)
+ printk(", L1 D cache: %dK\n", l1d);
+ else
+ printk("\n");
+
+ if (l2)
+ printk("CPU: L2 cache: %dK\n", l2);
+
+ if (l3)
+ printk("CPU: L3 cache: %dK\n", l3);
+ }
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
diff --git a/xen/arch/x86/cpu/mcheck/amd_f10.c b/xen/arch/x86/cpu/mcheck/amd_f10.c
index 68cbdabb36..18e6c071e5 100644
--- a/xen/arch/x86/cpu/mcheck/amd_f10.c
+++ b/xen/arch/x86/cpu/mcheck/amd_f10.c
@@ -83,15 +83,12 @@ amd_f10_handler(struct mc_info *mi, uint16_t bank, uint64_t status)
}
/* AMD Family10 machine check */
-int amd_f10_mcheck_init(struct cpuinfo_x86 *c)
+enum mcheck_type amd_f10_mcheck_init(struct cpuinfo_x86 *c)
{
- if (!amd_k8_mcheck_init(c))
- return 0;
+ if (amd_k8_mcheck_init(c) == mcheck_none)
+ return mcheck_none;
x86_mce_callback_register(amd_f10_handler);
- printk("CPU%i: AMD Family%xh machine check reporting enabled\n",
- smp_processor_id(), c->x86);
-
- return 1;
+ return mcheck_amd_famXX;
}
diff --git a/xen/arch/x86/cpu/mcheck/amd_k8.c b/xen/arch/x86/cpu/mcheck/amd_k8.c
index 6a5859c13c..13be7cbb28 100644
--- a/xen/arch/x86/cpu/mcheck/amd_k8.c
+++ b/xen/arch/x86/cpu/mcheck/amd_k8.c
@@ -76,14 +76,14 @@ static void k8_machine_check(struct cpu_user_regs *regs, long error_code)
}
/* AMD K8 machine check */
-int amd_k8_mcheck_init(struct cpuinfo_x86 *c)
+enum mcheck_type amd_k8_mcheck_init(struct cpuinfo_x86 *c)
{
uint32_t i;
enum mcequirk_amd_flags quirkflag;
/* Check for PPro style MCA; our caller has confirmed MCE support. */
if (!cpu_has(c, X86_FEATURE_MCA))
- return 0;
+ return mcheck_none;
quirkflag = mcequirk_lookup_amd_quirkdata(c);
@@ -102,9 +102,6 @@ int amd_k8_mcheck_init(struct cpuinfo_x86 *c)
}
set_in_cr4(X86_CR4_MCE);
- if (c->x86 < 0x10 || c->x86 > 0x11)
- printk("CPU%i: AMD K8 machine check reporting enabled\n",
- smp_processor_id());
- return 1;
+ return mcheck_amd_k8;
}
diff --git a/xen/arch/x86/cpu/mcheck/k7.c b/xen/arch/x86/cpu/mcheck/k7.c
index 1a0a0a5fef..be73261b82 100644
--- a/xen/arch/x86/cpu/mcheck/k7.c
+++ b/xen/arch/x86/cpu/mcheck/k7.c
@@ -68,14 +68,14 @@ static fastcall void k7_machine_check(struct cpu_user_regs * regs, long error_co
/* AMD K7 machine check */
-int amd_k7_mcheck_init(struct cpuinfo_x86 *c)
+enum mcheck_type amd_k7_mcheck_init(struct cpuinfo_x86 *c)
{
u32 l, h;
int i;
/* Check for PPro style MCA; our caller has confirmed MCE support. */
if (!cpu_has(c, X86_FEATURE_MCA))
- return 0;
+ return mcheck_none;
x86_mce_vector_register(k7_machine_check);
@@ -93,8 +93,6 @@ int amd_k7_mcheck_init(struct cpuinfo_x86 *c)
}
set_in_cr4 (X86_CR4_MCE);
- printk (KERN_INFO "CPU%d: AMD K7 machine check reporting enabled.\n",
- smp_processor_id());
- return 1;
+ return mcheck_amd_k7;
}
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 955ed7b78f..94ff740267 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -562,9 +562,9 @@ void mcheck_mca_clearbanks(cpu_banks_t bankmask)
}
}
-static int amd_mcheck_init(struct cpuinfo_x86 *ci)
+static enum mcheck_type amd_mcheck_init(struct cpuinfo_x86 *ci)
{
- int rc = 0;
+ enum mcheck_type rc = mcheck_none;
switch (ci->x86) {
case 6:
@@ -628,7 +628,9 @@ int mce_firstbank(struct cpuinfo_x86 *c)
/* This has to be run for each processor */
void mcheck_init(struct cpuinfo_x86 *c)
{
- int inited = 0, i, broadcast;
+ int i, broadcast;
+ enum mcheck_type inited = mcheck_none;
+ static enum mcheck_type g_type = mcheck_unset;
static int broadcast_check;
if (mce_disabled == 1) {
@@ -694,9 +696,37 @@ void mcheck_init(struct cpuinfo_x86 *c)
if (g_mcg_cap & MCG_CTL_P)
rdmsrl(MSR_IA32_MCG_CTL, h_mcg_ctl);
set_poll_bankmask(c);
- if (!inited)
- printk(XENLOG_INFO "CPU%i: No machine check initialization\n",
- smp_processor_id());
+
+ if (inited != g_type) {
+ char prefix[20];
+ static const char *const type_str[] = {
+ [mcheck_amd_famXX] = "AMD",
+ [mcheck_amd_k7] = "AMD K7",
+ [mcheck_amd_k8] = "AMD K8",
+ [mcheck_intel] = "Intel"
+ };
+
+ snprintf(prefix, ARRAY_SIZE(prefix),
+ g_type != mcheck_unset ? XENLOG_WARNING "CPU%i: "
+ : XENLOG_INFO,
+ smp_processor_id());
+ BUG_ON(inited >= ARRAY_SIZE(type_str));
+ switch (inited) {
+ default:
+ printk("%s%s machine check reporting enabled\n",
+ prefix, type_str[inited]);
+ break;
+ case mcheck_amd_famXX:
+ printk("%s%s Fam%xh machine check reporting enabled\n",
+ prefix, type_str[inited], c->x86);
+ break;
+ case mcheck_none:
+ printk("%sNo machine check initialization\n", prefix);
+ break;
+ }
+
+ g_type = inited;
+ }
}
u64 mce_cap_init(void)
diff --git a/xen/arch/x86/cpu/mcheck/mce.h b/xen/arch/x86/cpu/mcheck/mce.h
index cfc26abf4c..e5eee396ef 100644
--- a/xen/arch/x86/cpu/mcheck/mce.h
+++ b/xen/arch/x86/cpu/mcheck/mce.h
@@ -28,13 +28,21 @@ extern int mce_verbosity;
printk(s, ##a); \
} while (0)
+enum mcheck_type {
+ mcheck_unset = -1,
+ mcheck_none,
+ mcheck_amd_famXX,
+ mcheck_amd_k7,
+ mcheck_amd_k8,
+ mcheck_intel
+};
/* Init functions */
-int amd_k7_mcheck_init(struct cpuinfo_x86 *c);
-int amd_k8_mcheck_init(struct cpuinfo_x86 *c);
-int amd_f10_mcheck_init(struct cpuinfo_x86 *c);
+enum mcheck_type amd_k7_mcheck_init(struct cpuinfo_x86 *c);
+enum mcheck_type amd_k8_mcheck_init(struct cpuinfo_x86 *c);
+enum mcheck_type amd_f10_mcheck_init(struct cpuinfo_x86 *c);
-int intel_mcheck_init(struct cpuinfo_x86 *c);
+enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c);
void intel_mcheck_timer(struct cpuinfo_x86 *c);
void mce_intel_feature_init(struct cpuinfo_x86 *c);
diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c
index b8b64921ac..9e56df9ca9 100644
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -141,8 +141,9 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
l = apic_read (APIC_LVTTHMR);
apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
- printk (KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
- cpu, tm2 ? "TM2" : "TM1");
+ if (opt_cpu_info)
+ printk(KERN_INFO "CPU%u: Thermal monitoring enabled (%s)\n",
+ cpu, tm2 ? "TM2" : "TM1");
return;
}
#endif /* CONFIG_X86_MCE_THERMAL */
@@ -946,7 +947,8 @@ static void intel_init_cmci(struct cpuinfo_x86 *c)
int cpu = smp_processor_id();
if (!mce_available(c) || !cmci_support) {
- mce_printk(MCE_QUIET, "CMCI: CPU%d has no CMCI support\n", cpu);
+ if (opt_cpu_info)
+ mce_printk(MCE_QUIET, "CMCI: CPU%d has no CMCI support\n", cpu);
return;
}
@@ -1068,11 +1070,9 @@ static void mce_init(void)
}
/* p4/p6 family have similar MCA initialization process */
-int intel_mcheck_init(struct cpuinfo_x86 *c)
+enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c)
{
_mce_cap_init(c);
- mce_printk(MCE_QUIET, "Intel machine check reporting enabled on CPU#%d.\n",
- smp_processor_id());
/* machine check is available */
x86_mce_vector_register(intel_machine_check);
@@ -1085,7 +1085,7 @@ int intel_mcheck_init(struct cpuinfo_x86 *c)
mce_set_owner();
open_softirq(MACHINE_CHECK_SOFTIRQ, mce_softirq);
- return 1;
+ return mcheck_intel;
}
int intel_mce_wrmsr(uint32_t msr, uint64_t val)
diff --git a/xen/arch/x86/hvm/asid.c b/xen/arch/x86/hvm/asid.c
index 69f3f577f8..183a3dcaf4 100644
--- a/xen/arch/x86/hvm/asid.c
+++ b/xen/arch/x86/hvm/asid.c
@@ -59,6 +59,7 @@ static DEFINE_PER_CPU(struct hvm_asid_data, hvm_asid_data);
void hvm_asid_init(int nasids)
{
+ static s8 g_disabled = -1;
struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
/*
@@ -72,8 +73,12 @@ void hvm_asid_init(int nasids)
data->max_asid = nasids - 1;
data->disabled = (nasids <= 1);
- printk("HVM: ASIDs %s \n",
- (data->disabled ? "disabled." : "enabled."));
+ if ( g_disabled != data->disabled )
+ {
+ printk("HVM: ASIDs %sabled.\n", data->disabled ? "dis" : "en");
+ if ( g_disabled < 0 )
+ g_disabled = data->disabled;
+ }
/* Zero indicates 'invalid generation', so we start the count at one. */
data->core_asid_generation = 1;
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 5f5be02bbe..b23db4faaf 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -269,8 +269,8 @@ void __devinit srat_detect_node(int cpu)
node = 0;
numa_set_node(cpu, node);
- if ( acpi_numa > 0 )
- printk(KERN_INFO "CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
+ if ( opt_cpu_info && acpi_numa > 0 )
+ printk("CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
}
/*
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 7e9473f267..8d5a103b80 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -877,7 +877,9 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
start_eip = setup_trampoline();
/* So we see what's up */
- printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
+ if (opt_cpu_info)
+ printk("Booting processor %d/%d eip %lx\n",
+ cpu, apicid, start_eip);
stack_start.esp = prepare_idle_stack(cpu);
@@ -960,8 +962,7 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
if (cpu_isset(cpu, cpu_callin_map)) {
/* number CPUs logically, starting from 1 (BSP is 0) */
Dprintk("OK.\n");
- printk("CPU%d: ", cpu);
- print_cpu_info(&cpu_data[cpu]);
+ print_cpu_info(cpu);
Dprintk("CPU has booted.\n");
} else {
boot_error = 1;
@@ -1062,8 +1063,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
* Setup boot CPU information
*/
smp_store_cpu_info(0); /* Final full version of the data */
- printk("CPU%d: ", 0);
- print_cpu_info(&cpu_data[0]);
+ print_cpu_info(0);
boot_cpu_physical_apicid = get_apic_id();
x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c
index 82f74e4745..56500ad681 100644
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -65,7 +65,12 @@ size_param("conring_size", opt_conring_size);
#define _CONRING_SIZE 16384
#define CONRING_IDX_MASK(i) ((i)&(conring_size-1))
-static char _conring[_CONRING_SIZE], *__read_mostly conring = _conring;
+static char
+#if _CONRING_SIZE >= PAGE_SIZE
+ __attribute__((__section__(".bss.page_aligned"), __aligned__(PAGE_SIZE)))
+#endif
+ _conring[_CONRING_SIZE];
+static char *__read_mostly conring = _conring;
static uint32_t __read_mostly conring_size = _CONRING_SIZE;
static uint32_t conringc, conringp;
@@ -595,6 +600,8 @@ void __init console_init_postirq(void)
serial_init_postirq();
+ if ( !opt_conring_size )
+ opt_conring_size = num_present_cpus() << (9 + xenlog_lower_thresh);
/* Round size down to a power of two. */
while ( opt_conring_size & (opt_conring_size - 1) )
opt_conring_size &= opt_conring_size - 1;
@@ -618,6 +625,8 @@ void __init console_init_postirq(void)
spin_unlock_irq(&console_lock);
printk("Allocated console ring of %u KiB.\n", opt_conring_size >> 10);
+
+ init_xenheap_pages(__pa(_conring), __pa(_conring + _CONRING_SIZE));
}
void __init console_endboot(void)
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 62f8aea880..a17eb93c42 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -194,10 +194,11 @@ extern struct cpuinfo_x86 cpu_data[];
extern u64 host_pat;
extern int phys_proc_id[NR_CPUS];
extern int cpu_core_id[NR_CPUS];
+extern int opt_cpu_info;
extern void identify_cpu(struct cpuinfo_x86 *);
extern void setup_clear_cpu_cap(unsigned int);
-extern void print_cpu_info(struct cpuinfo_x86 *);
+extern void print_cpu_info(unsigned int cpu);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void dodgy_tsc(void);