aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2010-12-01 20:12:12 +0000
committerKeir Fraser <keir@xen.org>2010-12-01 20:12:12 +0000
commit7fe20b9f9657a2541a5fdc6ad9f43a20f0daa431 (patch)
tree5c968254c911ecfbd16d562ef7594aba456c8f69
parent4f2ae0a845add6bdc8e3b7014b070cbb63ff6c4a (diff)
downloadxen-7fe20b9f9657a2541a5fdc6ad9f43a20f0daa431.tar.gz
xen-7fe20b9f9657a2541a5fdc6ad9f43a20f0daa431.tar.bz2
xen-7fe20b9f9657a2541a5fdc6ad9f43a20f0daa431.zip
x86/IRQ: pass CPU masks by reference rather than by value in more places
Additionally simplify operations on them in a few cases. Signed-off-by: Jan Beulich <jbeulich@novell.com>
-rw-r--r--xen/arch/x86/genapic/delivery.c24
-rw-r--r--xen/arch/x86/genapic/x2apic.c16
-rw-r--r--xen/arch/x86/hpet.c2
-rw-r--r--xen/arch/x86/io_apic.c19
-rw-r--r--xen/arch/x86/irq.c23
-rw-r--r--xen/arch/x86/msi.c5
-rw-r--r--xen/drivers/passthrough/amd/iommu_init.c2
-rw-r--r--xen/drivers/passthrough/vtd/iommu.c2
-rw-r--r--xen/include/asm-x86/genapic.h26
-rw-r--r--xen/include/asm-x86/irq.h2
-rw-r--r--xen/include/xen/irq.h8
11 files changed, 63 insertions, 66 deletions
diff --git a/xen/arch/x86/genapic/delivery.c b/xen/arch/x86/genapic/delivery.c
index f618d75ba5..54ab6d0c22 100644
--- a/xen/arch/x86/genapic/delivery.c
+++ b/xen/arch/x86/genapic/delivery.c
@@ -26,19 +26,19 @@ void clustered_apic_check_flat(void)
printk("Enabling APIC mode: Flat. Using %d I/O APICs\n", nr_ioapics);
}
-cpumask_t target_cpus_flat(void)
+const cpumask_t *target_cpus_flat(void)
{
- return cpu_online_map;
+ return &cpu_online_map;
}
-cpumask_t vector_allocation_cpumask_flat(int cpu)
+const cpumask_t *vector_allocation_cpumask_flat(int cpu)
{
- return cpu_online_map;
+ return &cpu_online_map;
}
-unsigned int cpu_mask_to_apicid_flat(cpumask_t cpumask)
+unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask)
{
- return cpus_addr(cpumask)[0]&0xFF;
+ return cpus_addr(*cpumask)[0]&0xFF;
}
/*
@@ -59,18 +59,18 @@ void clustered_apic_check_phys(void)
printk("Enabling APIC mode: Phys. Using %d I/O APICs\n", nr_ioapics);
}
-cpumask_t target_cpus_phys(void)
+const cpumask_t *target_cpus_phys(void)
{
- return cpu_online_map;
+ return &cpu_online_map;
}
-cpumask_t vector_allocation_cpumask_phys(int cpu)
+const cpumask_t *vector_allocation_cpumask_phys(int cpu)
{
- return cpumask_of_cpu(cpu);
+ return cpumask_of(cpu);
}
-unsigned int cpu_mask_to_apicid_phys(cpumask_t cpumask)
+unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask)
{
/* As we are using single CPU as destination, pick only one CPU here */
- return cpu_physical_id(first_cpu(cpumask));
+ return cpu_physical_id(cpumask_first(cpumask));
}
diff --git a/xen/arch/x86/genapic/x2apic.c b/xen/arch/x86/genapic/x2apic.c
index 93b9765c8a..0d36d34948 100644
--- a/xen/arch/x86/genapic/x2apic.c
+++ b/xen/arch/x86/genapic/x2apic.c
@@ -89,24 +89,24 @@ void clustered_apic_check_x2apic(void)
return;
}
-cpumask_t target_cpus_x2apic(void)
+const cpumask_t *target_cpus_x2apic(void)
{
- return cpu_online_map;
+ return &cpu_online_map;
}
-cpumask_t vector_allocation_cpumask_x2apic(int cpu)
+const cpumask_t *vector_allocation_cpumask_x2apic(int cpu)
{
- return cpumask_of_cpu(cpu);
+ return cpumask_of(cpu);
}
-unsigned int cpu_mask_to_apicid_x2apic_phys(cpumask_t cpumask)
+unsigned int cpu_mask_to_apicid_x2apic_phys(const cpumask_t *cpumask)
{
- return cpu_physical_id(first_cpu(cpumask));
+ return cpu_physical_id(cpumask_first(cpumask));
}
-unsigned int cpu_mask_to_apicid_x2apic_cluster(cpumask_t cpumask)
+unsigned int cpu_mask_to_apicid_x2apic_cluster(const cpumask_t *cpumask)
{
- return cpu_2_logical_apicid[first_cpu(cpumask)];
+ return cpu_2_logical_apicid[cpumask_first(cpumask)];
}
static void __send_IPI_mask_x2apic(
diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c
index b26f677967..b4a077d0b4 100644
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -336,7 +336,7 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
struct irq_desc * desc = irq_to_desc(irq);
struct irq_cfg *cfg= desc->chip_data;
- dest = set_desc_affinity(desc, mask);
+ dest = set_desc_affinity(desc, &mask);
if (dest == BAD_APICID)
return;
diff --git a/xen/arch/x86/io_apic.c b/xen/arch/x86/io_apic.c
index 916be1049a..401f2ecd3f 100644
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -460,7 +460,7 @@ void irq_complete_move(struct irq_desc **descp)
send_cleanup_vector(cfg);
}
-unsigned int set_desc_affinity(struct irq_desc *desc, cpumask_t mask)
+unsigned int set_desc_affinity(struct irq_desc *desc, const cpumask_t *mask)
{
struct irq_cfg *cfg;
unsigned int irq;
@@ -468,7 +468,7 @@ unsigned int set_desc_affinity(struct irq_desc *desc, cpumask_t mask)
unsigned long flags;
cpumask_t dest_mask;
- if (!cpus_intersects(mask, cpu_online_map))
+ if (!cpus_intersects(*mask, cpu_online_map))
return BAD_APICID;
irq = desc->irq;
@@ -483,15 +483,14 @@ unsigned int set_desc_affinity(struct irq_desc *desc, cpumask_t mask)
if (ret < 0)
return BAD_APICID;
- cpus_copy(desc->affinity, mask);
- cpus_and(dest_mask, desc->affinity, cfg->cpu_mask);
+ cpus_copy(desc->affinity, *mask);
+ cpus_and(dest_mask, *mask, cfg->cpu_mask);
- return cpu_mask_to_apicid(dest_mask);
+ return cpu_mask_to_apicid(&dest_mask);
}
static void
-set_ioapic_affinity_irq_desc(struct irq_desc *desc,
- const struct cpumask mask)
+set_ioapic_affinity_irq_desc(struct irq_desc *desc, const cpumask_t *mask)
{
unsigned long flags;
unsigned int dest;
@@ -536,7 +535,7 @@ set_ioapic_affinity_irq(unsigned int irq, const struct cpumask mask)
desc = irq_to_desc(irq);
- set_ioapic_affinity_irq_desc(desc, mask);
+ set_ioapic_affinity_irq_desc(desc, &mask);
}
#endif /* CONFIG_SMP */
@@ -992,7 +991,7 @@ static void __init setup_IO_APIC_irqs(void)
}
cfg = irq_cfg(irq);
SET_DEST(entry.dest.dest32, entry.dest.logical.logical_dest,
- cpu_mask_to_apicid(cfg->cpu_mask));
+ cpu_mask_to_apicid(&cfg->cpu_mask));
spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
@@ -2447,7 +2446,7 @@ int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
rte.vector = cfg->vector;
SET_DEST(rte.dest.dest32, rte.dest.logical.logical_dest,
- cpu_mask_to_apicid(cfg->cpu_mask));
+ cpu_mask_to_apicid(&cfg->cpu_mask));
io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&rte) + 0));
io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&rte) + 1));
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 3dfe4901e9..08610b7bdf 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -330,7 +330,7 @@ hw_irq_controller no_irq_type = {
atomic_t irq_err_count;
-int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
+int __assign_irq_vector(int irq, struct irq_cfg *cfg, const cpumask_t *mask)
{
/*
* NOTE! The local APIC isn't very good at handling
@@ -350,9 +350,8 @@ int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
old_vector = irq_to_vector(irq);
if (old_vector) {
- cpus_and(tmp_mask, mask, cpu_online_map);
- cpus_and(tmp_mask, cfg->cpu_mask, tmp_mask);
- if (!cpus_empty(tmp_mask)) {
+ cpus_and(tmp_mask, *mask, cpu_online_map);
+ if (cpus_intersects(tmp_mask, cfg->cpu_mask)) {
cfg->vector = old_vector;
return 0;
}
@@ -361,16 +360,16 @@ int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
if ((cfg->move_in_progress) || cfg->move_cleanup_count)
return -EAGAIN;
- /* Only try and allocate irqs on cpus that are present */
- cpus_and(mask, mask, cpu_online_map);
-
err = -ENOSPC;
- for_each_cpu_mask(cpu, mask) {
+ for_each_cpu_mask(cpu, *mask) {
int new_cpu;
int vector, offset;
- tmp_mask = vector_allocation_cpumask(cpu);
- cpus_and(tmp_mask, tmp_mask, cpu_online_map);
+ /* Only try and allocate irqs on cpus that are present. */
+ if (!cpu_online(cpu))
+ continue;
+
+ cpus_and(tmp_mask, *vector_allocation_cpumask(cpu), cpu_online_map);
vector = current_vector;
offset = current_offset;
@@ -1747,14 +1746,14 @@ void fixup_irqs(void)
spin_lock(&desc->lock);
affinity = desc->affinity;
- if ( !desc->action || cpus_equal(affinity, cpu_online_map) )
+ if ( !desc->action || cpus_subset(affinity, cpu_online_map) )
{
spin_unlock(&desc->lock);
continue;
}
cpus_and(affinity, affinity, cpu_online_map);
- if ( any_online_cpu(affinity) == NR_CPUS )
+ if ( cpus_empty(affinity) )
{
break_affinity = 1;
affinity = cpu_online_map;
diff --git a/xen/arch/x86/msi.c b/xen/arch/x86/msi.c
index 88f3a3707d..81beb290ed 100644
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -134,8 +134,7 @@ void msi_compose_msg(struct pci_dev *pdev, int irq,
}
if ( vector ) {
-
- dest = cpu_mask_to_apicid(domain);
+ dest = cpu_mask_to_apicid(&domain);
msg->address_hi = MSI_ADDR_BASE_HI;
msg->address_lo =
@@ -275,7 +274,7 @@ void set_msi_affinity(unsigned int irq, cpumask_t mask)
struct msi_desc *msi_desc = desc->msi_desc;
struct irq_cfg *cfg = desc->chip_data;
- dest = set_desc_affinity(desc, mask);
+ dest = set_desc_affinity(desc, &mask);
if (dest == BAD_APICID || !msi_desc)
return;
diff --git a/xen/drivers/passthrough/amd/iommu_init.c b/xen/drivers/passthrough/amd/iommu_init.c
index 8afd81babd..70d764dc26 100644
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -356,7 +356,7 @@ static void iommu_msi_set_affinity(unsigned int irq, cpumask_t mask)
u8 dev = PCI_SLOT(iommu->bdf & 0xff);
u8 func = PCI_FUNC(iommu->bdf & 0xff);
- dest = set_desc_affinity(desc, mask);
+ dest = set_desc_affinity(desc, &mask);
if (dest == BAD_APICID){
dprintk(XENLOG_ERR, "Set iommu interrupt affinity error!\n");
return;
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 47b6f68075..ee64656635 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1011,7 +1011,7 @@ static void dma_msi_set_affinity(unsigned int irq, cpumask_t mask)
struct irq_cfg *cfg = desc->chip_data;
#ifdef CONFIG_X86
- dest = set_desc_affinity(desc, mask);
+ dest = set_desc_affinity(desc, &mask);
if (dest == BAD_APICID){
dprintk(XENLOG_ERR VTDPREFIX, "Set iommu interrupt affinity error!\n");
return;
diff --git a/xen/include/asm-x86/genapic.h b/xen/include/asm-x86/genapic.h
index bd8debb8d5..2060bab48b 100644
--- a/xen/include/asm-x86/genapic.h
+++ b/xen/include/asm-x86/genapic.h
@@ -33,9 +33,9 @@ struct genapic {
int int_dest_mode;
void (*init_apic_ldr)(void);
void (*clustered_apic_check)(void);
- cpumask_t (*target_cpus)(void);
- cpumask_t (*vector_allocation_cpumask)(int cpu);
- unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
+ const cpumask_t *(*target_cpus)(void);
+ const cpumask_t *(*vector_allocation_cpumask)(int cpu);
+ unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
void (*send_IPI_mask)(const cpumask_t *mask, int vector);
void (*send_IPI_self)(int vector);
};
@@ -54,11 +54,11 @@ extern const struct genapic apic_x2apic_cluster;
void init_apic_ldr_flat(void);
void clustered_apic_check_flat(void);
-cpumask_t target_cpus_flat(void);
-unsigned int cpu_mask_to_apicid_flat(cpumask_t cpumask);
+const cpumask_t *target_cpus_flat(void);
+unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask);
void send_IPI_mask_flat(const cpumask_t *mask, int vector);
void send_IPI_self_flat(int vector);
-cpumask_t vector_allocation_cpumask_flat(int cpu);
+const cpumask_t *vector_allocation_cpumask_flat(int cpu);
#define GENAPIC_FLAT \
.int_delivery_mode = dest_LowestPrio, \
.int_dest_mode = 1 /* logical delivery */, \
@@ -74,13 +74,13 @@ const struct genapic *apic_x2apic_probe(void);
void init_apic_ldr_x2apic_phys(void);
void init_apic_ldr_x2apic_cluster(void);
void clustered_apic_check_x2apic(void);
-cpumask_t target_cpus_x2apic(void);
-unsigned int cpu_mask_to_apicid_x2apic_phys(cpumask_t cpumask);
-unsigned int cpu_mask_to_apicid_x2apic_cluster(cpumask_t cpumask);
+const cpumask_t *target_cpus_x2apic(void);
+unsigned int cpu_mask_to_apicid_x2apic_phys(const cpumask_t *cpumask);
+unsigned int cpu_mask_to_apicid_x2apic_cluster(const cpumask_t *cpumask);
void send_IPI_mask_x2apic_phys(const cpumask_t *mask, int vector);
void send_IPI_mask_x2apic_cluster(const cpumask_t *mask, int vector);
void send_IPI_self_x2apic(int vector);
-cpumask_t vector_allocation_cpumask_x2apic(int cpu);
+const cpumask_t *vector_allocation_cpumask_x2apic(int cpu);
#define GENAPIC_X2APIC_PHYS \
.int_delivery_mode = dest_Fixed, \
.int_dest_mode = 0 /* physical delivery */, \
@@ -105,11 +105,11 @@ cpumask_t vector_allocation_cpumask_x2apic(int cpu);
void init_apic_ldr_phys(void);
void clustered_apic_check_phys(void);
-cpumask_t target_cpus_phys(void);
-unsigned int cpu_mask_to_apicid_phys(cpumask_t cpumask);
+const cpumask_t *target_cpus_phys(void);
+unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask);
void send_IPI_mask_phys(const cpumask_t *mask, int vector);
void send_IPI_self_phys(int vector);
-cpumask_t vector_allocation_cpumask_phys(int cpu);
+const cpumask_t *vector_allocation_cpumask_phys(int cpu);
#define GENAPIC_PHYS \
.int_delivery_mode = dest_Fixed, \
.int_dest_mode = 0 /* physical delivery */, \
diff --git a/xen/include/asm-x86/irq.h b/xen/include/asm-x86/irq.h
index d8ead87916..601828973d 100644
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -139,7 +139,7 @@ void __setup_vector_irq(int cpu);
void move_native_irq(int irq);
void move_masked_irq(int irq);
-int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask);
+int __assign_irq_vector(int irq, struct irq_cfg *, const cpumask_t *);
int bind_irq_vector(int irq, int vector, cpumask_t domain);
diff --git a/xen/include/xen/irq.h b/xen/include/xen/irq.h
index 2ae5a43d42..02456e4a80 100644
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -143,16 +143,16 @@ extern void pirq_set_affinity(struct domain *d, int irq, const cpumask_t *);
extern irq_desc_t *domain_spin_lock_irq_desc(
struct domain *d, int irq, unsigned long *pflags);
-static inline void set_native_irq_info(unsigned int irq, cpumask_t mask)
+static inline void set_native_irq_info(unsigned int irq, const cpumask_t *mask)
{
- irq_desc[irq].affinity = mask;
+ irq_desc[irq].affinity = *mask;
}
static inline void set_irq_info(int irq, cpumask_t mask)
{
- set_native_irq_info(irq, mask);
+ set_native_irq_info(irq, &mask);
}
-unsigned int set_desc_affinity(struct irq_desc *desc, cpumask_t mask);
+unsigned int set_desc_affinity(struct irq_desc *, const cpumask_t *);
#endif /* __XEN_IRQ_H__ */