aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/hvm/hvm.c1
-rw-r--r--xen/arch/x86/nmi.c2
-rw-r--r--xen/arch/x86/oprofile/nmi_int.c5
-rw-r--r--xen/arch/x86/setup.c2
-rw-r--r--xen/arch/x86/smpboot.c5
-rw-r--r--xen/arch/x86/time.c7
-rw-r--r--xen/common/domctl.c7
-rw-r--r--xen/common/perfc.c26
-rw-r--r--xen/common/radix-tree.c5
-rw-r--r--xen/common/rcupdate.c2
-rw-r--r--xen/common/sched_sedf.c14
-rw-r--r--xen/common/schedule.c41
-rw-r--r--xen/common/stop_machine.c25
-rw-r--r--xen/common/tasklet.c9
-rw-r--r--xen/common/timer.c31
-rw-r--r--xen/common/tmem.c7
-rw-r--r--xen/common/tmem_xen.c90
-rw-r--r--xen/include/asm-ia64/perfc.h4
-rw-r--r--xen/include/asm-x86/perfc.h4
-rw-r--r--xen/include/asm-x86/setup.h1
-rw-r--r--xen/include/asm-x86/smp.h1
-rw-r--r--xen/include/xen/tmem.h1
-rw-r--r--xen/xsm/flask/flask_op.c6
23 files changed, 201 insertions, 95 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7f6ba29a06..a6f1be9d0a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -85,6 +85,7 @@ static int cpu_callback(
case CPU_DYING:
hvm_cpu_down();
break;
+ case CPU_UP_CANCELED:
case CPU_DEAD:
hvm_funcs.cpu_dead(cpu);
break;
diff --git a/xen/arch/x86/nmi.c b/xen/arch/x86/nmi.c
index 759c0f6c77..f84a8f43a0 100644
--- a/xen/arch/x86/nmi.c
+++ b/xen/arch/x86/nmi.c
@@ -468,7 +468,7 @@ static void do_nmi_stats(unsigned char key)
struct vcpu *v;
printk("CPU\tNMI\n");
- for_each_possible_cpu ( i )
+ for_each_online_cpu ( i )
printk("%3d\t%3d\n", i, nmi_count(i));
if ( ((d = dom0) == NULL) || (d->vcpu == NULL) ||
diff --git a/xen/arch/x86/oprofile/nmi_int.c b/xen/arch/x86/oprofile/nmi_int.c
index 98f66f66c5..0f543a1f1c 100644
--- a/xen/arch/x86/oprofile/nmi_int.c
+++ b/xen/arch/x86/oprofile/nmi_int.c
@@ -150,10 +150,7 @@ static int allocate_msrs(void)
size_t counters_size = sizeof(struct op_msr) * model->num_counters;
int i;
- for (i = 0; i < NR_CPUS; ++i) {
- if (!test_bit(i, &cpu_online_map))
- continue;
-
+ for_each_online_cpu (i) {
cpu_msrs[i].counters = xmalloc_bytes(counters_size);
if (!cpu_msrs[i].counters) {
success = 0;
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 9d6036f300..22cea30cfd 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1158,8 +1158,6 @@ void __init __start_xen(unsigned long mbi_p)
init_trace_bufs();
- init_tmem();
-
console_endboot();
/* Hide UART from DOM0 if we're using it */
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 035e570413..1e648024e7 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -1198,11 +1198,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
* construct cpu_sibling_map, so that we can tell sibling CPUs
* efficiently.
*/
- for_each_possible_cpu(cpu) {
- cpus_clear(per_cpu(cpu_sibling_map, cpu));
- cpus_clear(per_cpu(cpu_core_map, cpu));
- }
-
cpu_set(0, per_cpu(cpu_sibling_map, 0));
cpu_set(0, per_cpu(cpu_core_map, 0));
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index 1ba23f9f46..c56cfe88ee 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1401,6 +1401,10 @@ void init_percpu_time(void)
unsigned long flags;
s_time_t now;
+ /* If we have constant-rate TSCs then scale factor can be shared. */
+ if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
+ this_cpu(cpu_time).tsc_scale = per_cpu(cpu_time, 0).tsc_scale;
+
local_irq_save(flags);
rdtscll(t->local_tsc_stamp);
now = read_platform_stime();
@@ -1435,9 +1439,6 @@ int __init init_xen_time(void)
/* If we have constant-rate TSCs then scale factor can be shared. */
if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
{
- int cpu;
- for_each_possible_cpu ( cpu )
- per_cpu(cpu_time, cpu).tsc_scale = per_cpu(cpu_time, 0).tsc_scale;
/* If TSCs are not marked as 'reliable', re-sync during rendezvous. */
if ( !boot_cpu_has(X86_FEATURE_TSC_RELIABLE) )
time_calibration_rendezvous_fn = time_calibration_tsc_rendezvous;
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 259a6b28a5..7bc976eb79 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -155,7 +155,7 @@ static unsigned int default_vcpu0_location(cpumask_t *online)
cpumask_t cpu_exclude_map;
/* Do an initial CPU placement. Pick the least-populated CPU. */
- nr_cpus = last_cpu(cpu_possible_map) + 1;
+ nr_cpus = last_cpu(cpu_online_map) + 1;
cnt = xmalloc_array(unsigned int, nr_cpus);
if ( cnt )
{
@@ -164,8 +164,9 @@ static unsigned int default_vcpu0_location(cpumask_t *online)
rcu_read_lock(&domlist_read_lock);
for_each_domain ( d )
for_each_vcpu ( d, v )
- if ( !test_bit(_VPF_down, &v->pause_flags) )
- cnt[v->processor]++;
+ if ( !test_bit(_VPF_down, &v->pause_flags)
+ && ((cpu = v->processor) < nr_cpus) )
+ cnt[cpu]++;
rcu_read_unlock(&domlist_read_lock);
}
diff --git a/xen/common/perfc.c b/xen/common/perfc.c
index 5e1e196a5f..3d30556ea6 100644
--- a/xen/common/perfc.c
+++ b/xen/common/perfc.c
@@ -114,8 +114,6 @@ void perfc_printall(unsigned char key)
}
printk("\n");
}
-
- arch_perfc_printall();
}
void perfc_reset(unsigned char key)
@@ -136,13 +134,13 @@ void perfc_reset(unsigned char key)
switch ( perfc_info[i].type )
{
case TYPE_SINGLE:
- for_each_possible_cpu ( cpu )
+ for_each_online_cpu ( cpu )
per_cpu(perfcounters, cpu)[j] = 0;
case TYPE_S_SINGLE:
++j;
break;
case TYPE_ARRAY:
- for_each_possible_cpu ( cpu )
+ for_each_online_cpu ( cpu )
memset(per_cpu(perfcounters, cpu) + j, 0,
perfc_info[i].nr_elements * sizeof(perfc_t));
case TYPE_S_ARRAY:
@@ -157,15 +155,22 @@ void perfc_reset(unsigned char key)
static xen_sysctl_perfc_desc_t perfc_d[NR_PERFCTRS];
static xen_sysctl_perfc_val_t *perfc_vals;
static unsigned int perfc_nbr_vals;
-static int perfc_init = 0;
+static cpumask_t perfc_cpumap;
+
static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc,
XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val)
{
unsigned int i, j, v;
/* We only copy the name and array-size information once. */
- if ( !perfc_init )
+ if ( !cpus_equal(cpu_online_map, perfc_cpumap) )
{
+ unsigned int nr_cpus;
+ perfc_cpumap = cpu_online_map;
+ nr_cpus = cpus_weight(perfc_cpumap);
+
+ perfc_nbr_vals = 0;
+
for ( i = 0; i < NR_PERFCTRS; i++ )
{
safe_strcpy(perfc_d[i].name, perfc_info[i].name);
@@ -174,7 +179,7 @@ static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc,
{
case TYPE_SINGLE:
case TYPE_S_SINGLE:
- perfc_d[i].nr_vals = num_possible_cpus();
+ perfc_d[i].nr_vals = nr_cpus;
break;
case TYPE_ARRAY:
case TYPE_S_ARRAY:
@@ -183,8 +188,9 @@ static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc,
}
perfc_nbr_vals += perfc_d[i].nr_vals;
}
+
+ xfree(perfc_vals);
perfc_vals = xmalloc_array(xen_sysctl_perfc_val_t, perfc_nbr_vals);
- perfc_init = 1;
}
if ( guest_handle_is_null(desc) )
@@ -205,14 +211,14 @@ static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc,
{
case TYPE_SINGLE:
case TYPE_S_SINGLE:
- for_each_possible_cpu ( cpu )
+ for_each_cpu_mask ( cpu, perfc_cpumap )
perfc_vals[v++] = per_cpu(perfcounters, cpu)[j];
++j;
break;
case TYPE_ARRAY:
case TYPE_S_ARRAY:
memset(perfc_vals + v, 0, perfc_d[i].nr_vals * sizeof(*perfc_vals));
- for_each_possible_cpu ( cpu )
+ for_each_cpu_mask ( cpu, perfc_cpumap )
{
perfc_t *counters = per_cpu(perfcounters, cpu) + j;
unsigned int k;
diff --git a/xen/common/radix-tree.c b/xen/common/radix-tree.c
index 414f0cef72..e6e213c0a0 100644
--- a/xen/common/radix-tree.c
+++ b/xen/common/radix-tree.c
@@ -31,6 +31,7 @@
*/
#include <xen/config.h>
+#include <xen/init.h>
#include <xen/lib.h>
#include <xen/types.h>
#include <xen/errno.h>
@@ -429,7 +430,7 @@ void radix_tree_destroy(struct radix_tree_root *root,
}
EXPORT_SYMBOL(radix_tree_destroy);
-static /*__init*/ unsigned long __maxindex(unsigned int height)
+static unsigned long __init __maxindex(unsigned int height)
{
unsigned int tmp = height * RADIX_TREE_MAP_SHIFT;
unsigned long index = (~0UL >> (RADIX_TREE_INDEX_BITS - tmp - 1)) >> 1;
@@ -439,7 +440,7 @@ static /*__init*/ unsigned long __maxindex(unsigned int height)
return index;
}
-/*__init*/ void radix_tree_init(void)
+void __init radix_tree_init(void)
{
unsigned int i;
diff --git a/xen/common/rcupdate.c b/xen/common/rcupdate.c
index 31c5f6f39b..827b8bd5ca 100644
--- a/xen/common/rcupdate.c
+++ b/xen/common/rcupdate.c
@@ -361,7 +361,7 @@ static struct notifier_block cpu_nfb = {
void __init rcu_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
- cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
+ cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
register_cpu_notifier(&cpu_nfb);
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
}
diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
index 6223fc030d..a9d107b546 100644
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -1333,7 +1333,7 @@ static int sedf_adjust_weights(struct cpupool *c, struct xen_domctl_scheduler_op
{
struct vcpu *p;
struct domain *d;
- unsigned int nr_cpus = last_cpu(cpu_possible_map) + 1;
+ unsigned int cpu, nr_cpus = last_cpu(cpu_online_map) + 1;
int *sumw = xmalloc_array(int, nr_cpus);
s_time_t *sumt = xmalloc_array(s_time_t, nr_cpus);
@@ -1354,9 +1354,12 @@ static int sedf_adjust_weights(struct cpupool *c, struct xen_domctl_scheduler_op
continue;
for_each_vcpu( d, p )
{
+ if ( (cpu = p->processor) >= nr_cpus )
+ continue;
+
if ( EDOM_INFO(p)->weight )
{
- sumw[p->processor] += EDOM_INFO(p)->weight;
+ sumw[cpu] += EDOM_INFO(p)->weight;
}
else
{
@@ -1367,7 +1370,7 @@ static int sedf_adjust_weights(struct cpupool *c, struct xen_domctl_scheduler_op
/*check for overflows*/
ASSERT((WEIGHT_PERIOD < ULONG_MAX)
&& (EDOM_INFO(p)->slice_orig < ULONG_MAX));
- sumt[p->processor] +=
+ sumt[cpu] +=
(WEIGHT_PERIOD * EDOM_INFO(p)->slice_orig) /
EDOM_INFO(p)->period_orig;
}
@@ -1381,6 +1384,8 @@ static int sedf_adjust_weights(struct cpupool *c, struct xen_domctl_scheduler_op
{
for_each_vcpu ( d, p )
{
+ if ( (cpu = p->processor) >= nr_cpus )
+ continue;
if ( EDOM_INFO(p)->weight )
{
EDOM_INFO(p)->period_orig =
@@ -1388,8 +1393,7 @@ static int sedf_adjust_weights(struct cpupool *c, struct xen_domctl_scheduler_op
EDOM_INFO(p)->slice_orig =
EDOM_INFO(p)->slice =
(EDOM_INFO(p)->weight *
- (WEIGHT_PERIOD - WEIGHT_SAFETY - sumt[p->processor])) /
- sumw[p->processor];
+ (WEIGHT_PERIOD - WEIGHT_SAFETY - sumt[cpu])) / sumw[cpu];
}
}
}
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index dffcbf5bda..a718dfa6b5 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -31,6 +31,7 @@
#include <xen/errno.h>
#include <xen/guest_access.h>
#include <xen/multicall.h>
+#include <xen/cpu.h>
#include <public/sched.h>
#include <xsm/xsm.h>
@@ -1089,9 +1090,39 @@ const struct scheduler *scheduler_get_by_id(unsigned int id)
return NULL;
}
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch ( action )
+ {
+ case CPU_UP_PREPARE:
+ per_cpu(scheduler, cpu) = &ops;
+ spin_lock_init(&per_cpu(schedule_data, cpu)._lock);
+ per_cpu(schedule_data, cpu).schedule_lock
+ = &per_cpu(schedule_data, cpu)._lock;
+ init_timer(&per_cpu(schedule_data, cpu).s_timer,
+ s_timer_fn, NULL, cpu);
+ break;
+ case CPU_DEAD:
+ kill_timer(&per_cpu(schedule_data, cpu).s_timer);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
/* Initialise the data structures. */
void __init scheduler_init(void)
{
+ void *hcpu = (void *)(long)smp_processor_id();
int i;
open_softirq(SCHEDULE_SOFTIRQ, schedule);
@@ -1109,14 +1140,8 @@ void __init scheduler_init(void)
ops = *schedulers[0];
}
- for_each_possible_cpu ( i )
- {
- per_cpu(scheduler, i) = &ops;
- spin_lock_init(&per_cpu(schedule_data, i)._lock);
- per_cpu(schedule_data, i).schedule_lock
- = &per_cpu(schedule_data, i)._lock;
- init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
- }
+ cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
+ register_cpu_notifier(&cpu_nfb);
printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
if ( SCHED_OP(&ops, init, 1) )
diff --git a/xen/common/stop_machine.c b/xen/common/stop_machine.c
index 83f525f09b..31d5c6fff7 100644
--- a/xen/common/stop_machine.c
+++ b/xen/common/stop_machine.c
@@ -155,12 +155,31 @@ static void stopmachine_action(unsigned long cpu)
local_irq_enable();
}
-static int __init cpu_stopmachine_init(void)
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
{
- unsigned int cpu;
- for_each_possible_cpu ( cpu )
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if ( action == CPU_UP_PREPARE )
tasklet_init(&per_cpu(stopmachine_tasklet, cpu),
stopmachine_action, cpu);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
+static int __init cpu_stopmachine_init(void)
+{
+ unsigned int cpu;
+ for_each_online_cpu ( cpu )
+ {
+ void *hcpu = (void *)(long)cpu;
+ cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
+ }
+ register_cpu_notifier(&cpu_nfb);
return 0;
}
__initcall(cpu_stopmachine_init);
diff --git a/xen/common/tasklet.c b/xen/common/tasklet.c
index b898535223..2e23232c41 100644
--- a/xen/common/tasklet.c
+++ b/xen/common/tasklet.c
@@ -19,7 +19,7 @@
#include <xen/cpu.h>
/* Some subsystems call into us before we are initialised. We ignore them. */
-static cpumask_t tasklets_initialised;
+static bool_t tasklets_initialised;
DEFINE_PER_CPU(unsigned long, tasklet_work_to_do);
@@ -44,7 +44,7 @@ void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
spin_lock_irqsave(&tasklet_lock, flags);
- if ( cpu_isset(cpu, tasklets_initialised) && !t->is_dead )
+ if ( tasklets_initialised && !t->is_dead )
{
t->scheduled_on = cpu;
if ( !t->is_running )
@@ -174,9 +174,9 @@ static int cpu_callback(
switch ( action )
{
case CPU_UP_PREPARE:
- if ( !cpu_test_and_set(cpu, tasklets_initialised) )
- INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
+ INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
break;
+ case CPU_UP_CANCELED:
case CPU_DEAD:
migrate_tasklets_from_cpu(cpu);
break;
@@ -196,6 +196,7 @@ void __init tasklet_subsys_init(void)
void *hcpu = (void *)(long)smp_processor_id();
cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
register_cpu_notifier(&cpu_nfb);
+ tasklets_initialised = 1;
}
/*
diff --git a/xen/common/timer.c b/xen/common/timer.c
index 7a84bac671..0759df818f 100644
--- a/xen/common/timer.c
+++ b/xen/common/timer.c
@@ -18,6 +18,7 @@
#include <xen/timer.h>
#include <xen/keyhandler.h>
#include <xen/percpu.h>
+#include <xen/cpu.h>
#include <asm/system.h>
#include <asm/desc.h>
@@ -514,10 +515,29 @@ static struct keyhandler dump_timerq_keyhandler = {
.desc = "dump timer queues"
};
+static struct timer *dummy_heap;
+
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if ( action == CPU_UP_PREPARE )
+ {
+ spin_lock_init(&per_cpu(timers, cpu).lock);
+ per_cpu(timers, cpu).heap = &dummy_heap;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
void __init timer_init(void)
{
- static struct timer *dummy_heap;
- int i;
+ void *cpu = (void *)(long)smp_processor_id();
open_softirq(TIMER_SOFTIRQ, timer_softirq_action);
@@ -528,11 +548,8 @@ void __init timer_init(void)
SET_HEAP_SIZE(&dummy_heap, 0);
SET_HEAP_LIMIT(&dummy_heap, 0);
- for_each_possible_cpu ( i )
- {
- spin_lock_init(&per_cpu(timers, i).lock);
- per_cpu(timers, i).heap = &dummy_heap;
- }
+ cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
+ register_cpu_notifier(&cpu_nfb);
register_keyhandler('a', &dump_timerq_keyhandler);
}
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 0dcbcbfffc..c340f59fab 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -2877,11 +2877,11 @@ EXPORT void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
}
/* called at hypervisor startup */
-EXPORT void init_tmem(void)
+static int __init init_tmem(void)
{
int i;
if ( !tmh_enabled() )
- return;
+ return 0;
radix_tree_init();
if ( tmh_dedup_enabled() )
@@ -2905,7 +2905,10 @@ EXPORT void init_tmem(void)
}
else
printk("tmem: initialization FAILED\n");
+
+ return 0;
}
+__initcall(init_tmem);
/*
* Local variables:
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index ccfd98cc56..d10f49306d 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -11,6 +11,7 @@
#include <xen/lzo.h> /* compression code */
#include <xen/paging.h>
#include <xen/domain_page.h>
+#include <xen/cpu.h>
#define EXPORT /* indicates code other modules are dependent upon */
@@ -277,7 +278,7 @@ static void tmh_mempool_page_put(void *page_va)
tmh_free_page(virt_to_page(page_va));
}
-static int tmh_mempool_init(void)
+static int __init tmh_mempool_init(void)
{
tmh_mempool = xmem_pool_create("tmem", tmh_mempool_page_get,
tmh_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
@@ -347,32 +348,83 @@ EXPORT void tmh_client_destroy(tmh_client_t *tmh)
/****************** XEN-SPECIFIC HOST INITIALIZATION ********************/
-EXPORT int tmh_init(void)
-{
#ifndef __i386__
- int dstmem_order, workmem_order;
- bool_t bad_alloc = 0;
- struct page_info *pi;
- unsigned char *p1, *p2;
- int cpu;
+
+static int dstmem_order, workmem_order;
+
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch ( action )
+ {
+ case CPU_UP_PREPARE: {
+ if ( per_cpu(dstmem, cpu) == NULL )
+ {
+ struct page_info *p = alloc_domheap_pages(0, dstmem_order, 0);
+ per_cpu(dstmem, cpu) = p ? page_to_virt(p) : NULL;
+ }
+ if ( per_cpu(workmem, cpu) == NULL )
+ {
+ struct page_info *p = alloc_domheap_pages(0, workmem_order, 0);
+ per_cpu(workmem, cpu) = p ? page_to_virt(p) : NULL;
+ }
+ break;
+ }
+ case CPU_DEAD:
+ case CPU_UP_CANCELED: {
+ if ( per_cpu(dstmem, cpu) != NULL )
+ {
+ struct page_info *p = virt_to_page(per_cpu(dstmem, cpu));
+ free_domheap_pages(p, dstmem_order);
+ per_cpu(dstmem, cpu) = NULL;
+ }
+ if ( per_cpu(workmem, cpu) != NULL )
+ {
+ struct page_info *p = virt_to_page(per_cpu(workmem, cpu));
+ free_domheap_pages(p, workmem_order);
+ per_cpu(workmem, cpu) = NULL;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
+EXPORT int __init tmh_init(void)
+{
+ unsigned int cpu;
if ( !tmh_mempool_init() )
return 0;
dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
- for_each_possible_cpu ( cpu )
+
+ for_each_online_cpu ( cpu )
{
- pi = alloc_domheap_pages(0,dstmem_order,0);
- per_cpu(dstmem, cpu) = p1 = ((pi == NULL) ? NULL : page_to_virt(pi));
- pi = alloc_domheap_pages(0,workmem_order,0);
- per_cpu(workmem, cpu) = p2 = ((pi == NULL) ? NULL : page_to_virt(pi));
- if ( (p1 == NULL) || (p2 == NULL) )
- bad_alloc++;
+ void *hcpu = (void *)(long)cpu;
+ cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
}
- if ( bad_alloc )
- printk("tmem: can't allocate compression buffers for %d cpus\n",
- bad_alloc);
-#endif
+
+ register_cpu_notifier(&cpu_nfb);
+
return 1;
}
+
+#else
+
+EXPORT int __init tmh_init(void)
+{
+ return 1;
+}
+
+#endif
diff --git a/xen/include/asm-ia64/perfc.h b/xen/include/asm-ia64/perfc.h
index 0ca2189622..40cd752098 100644
--- a/xen/include/asm-ia64/perfc.h
+++ b/xen/include/asm-ia64/perfc.h
@@ -4,10 +4,6 @@
#include <asm/vhpt.h>
#include <asm/privop_stat.h>
-static inline void arch_perfc_printall(void)
-{
-}
-
static inline void arch_perfc_reset(void)
{
reset_privop_addrs();
diff --git a/xen/include/asm-x86/perfc.h b/xen/include/asm-x86/perfc.h
index 31bcdfbbd0..a1a591e803 100644
--- a/xen/include/asm-x86/perfc.h
+++ b/xen/include/asm-x86/perfc.h
@@ -1,10 +1,6 @@
#ifndef __ASM_PERFC_H__
#define __ASM_PERFC_H__
-static inline void arch_perfc_printall(void)
-{
-}
-
static inline void arch_perfc_reset(void)
{
}
diff --git a/xen/include/asm-x86/setup.h b/xen/include/asm-x86/setup.h
index c6b6ea5e2a..743793ab34 100644
--- a/xen/include/asm-x86/setup.h
+++ b/xen/include/asm-x86/setup.h
@@ -22,7 +22,6 @@ void arch_init_memory(void);
void subarch_init_memory(void);
void init_IRQ(void);
-void init_tmem(void);
void vesa_init(void);
void vesa_mtrr_init(void);
diff --git a/xen/include/asm-x86/smp.h b/xen/include/asm-x86/smp.h
index 029b25b658..3f14b0a89a 100644
--- a/xen/include/asm-x86/smp.h
+++ b/xen/include/asm-x86/smp.h
@@ -69,7 +69,6 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm);
extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_callin_map;
-/* cpu_possible_map declared in <xen/cpumask.h> */
/* We don't mark CPUs online until __cpu_up(), so we need another measure */
static inline int num_booting_cpus(void)
diff --git a/xen/include/xen/tmem.h b/xen/include/xen/tmem.h
index 1a9b772a69..9140b08847 100644
--- a/xen/include/xen/tmem.h
+++ b/xen/include/xen/tmem.h
@@ -9,7 +9,6 @@
#ifndef __XEN_TMEM_H__
#define __XEN_TMEM_H__
-extern void init_tmem(void);
extern void tmem_destroy(void *);
extern void *tmem_relinquish_pages(unsigned int, unsigned int);
extern int opt_tmem;
diff --git a/xen/xsm/flask/flask_op.c b/xen/xsm/flask/flask_op.c
index 9078a23b4d..c8f10a2ade 100644
--- a/xen/xsm/flask/flask_op.c
+++ b/xen/xsm/flask/flask_op.c
@@ -680,7 +680,6 @@ static int flask_security_avc_cachestats(char *buf, uint32_t count)
char *page = NULL;
int len = 0;
int length = 0;
- long long idx = 0;
int cpu;
struct avc_cache_stats *st;
@@ -701,11 +700,8 @@ static int flask_security_avc_cachestats(char *buf, uint32_t count)
length += len;
count -= len;
- for ( cpu = idx; cpu < NR_CPUS; ++cpu )
+ for_each_online_cpu ( cpu )
{
- if ( !cpu_possible(cpu) )
- continue;
- idx = cpu + 1;
st = &per_cpu(avc_cache_stats, cpu);
len = snprintf(page, PAGE_SIZE, "%u %u %u %u %u %u\n", st->lookups,