aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/bcm63xx/patches-4.19/322-MIPS-BCM63XX-switch-to-IRQ_DOMAIN.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/bcm63xx/patches-4.19/322-MIPS-BCM63XX-switch-to-IRQ_DOMAIN.patch')
-rw-r--r--target/linux/bcm63xx/patches-4.19/322-MIPS-BCM63XX-switch-to-IRQ_DOMAIN.patch695
1 files changed, 695 insertions, 0 deletions
diff --git a/target/linux/bcm63xx/patches-4.19/322-MIPS-BCM63XX-switch-to-IRQ_DOMAIN.patch b/target/linux/bcm63xx/patches-4.19/322-MIPS-BCM63XX-switch-to-IRQ_DOMAIN.patch
new file mode 100644
index 0000000000..f0ffc654f7
--- /dev/null
+++ b/target/linux/bcm63xx/patches-4.19/322-MIPS-BCM63XX-switch-to-IRQ_DOMAIN.patch
@@ -0,0 +1,695 @@
+From d2d2489e0a4b740abd980e9d1cad952d15bc2d9e Mon Sep 17 00:00:00 2001
+From: Jonas Gorski <jogo@openwrt.org>
+Date: Sun, 30 Nov 2014 14:55:02 +0100
+Subject: [PATCH] MIPS: BCM63XX: switch to IRQ_DOMAIN
+
+Now that we have working IRQ_DOMAIN drivers for both interrupt controllers,
+switch to using them.
+
+Signed-off-by: Jonas Gorski <jogo@openwrt.org>
+---
+ arch/mips/Kconfig | 3 +
+ arch/mips/bcm63xx/irq.c | 612 +++++++++---------------------------------------
+ 2 files changed, 108 insertions(+), 507 deletions(-)
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -283,6 +283,9 @@ config BCM63XX
+ select SYNC_R4K
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
++ select BCM6345_EXT_IRQ
++ select BCM6345_PERIPH_IRQ
++ select IRQ_DOMAIN
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
+--- a/arch/mips/bcm63xx/irq.c
++++ b/arch/mips/bcm63xx/irq.c
+@@ -11,7 +11,9 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/irq.h>
+-#include <linux/spinlock.h>
++#include <linux/irqchip.h>
++#include <linux/irqchip/irq-bcm6345-ext.h>
++#include <linux/irqchip/irq-bcm6345-periph.h>
+ #include <asm/irq_cpu.h>
+ #include <asm/mipsregs.h>
+ #include <bcm63xx_cpu.h>
+@@ -19,544 +21,140 @@
+ #include <bcm63xx_io.h>
+ #include <bcm63xx_irq.h>
+
+-
+-static DEFINE_SPINLOCK(ipic_lock);
+-static DEFINE_SPINLOCK(epic_lock);
+-
+-static u32 irq_stat_addr[2];
+-static u32 irq_mask_addr[2];
+-static void (*dispatch_internal)(int cpu);
+-static int is_ext_irq_cascaded;
+-static unsigned int ext_irq_count;
+-static unsigned int ext_irq_start, ext_irq_end;
+-static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
+-static void (*internal_irq_mask)(struct irq_data *d);
+-static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
+-
+-
+-static inline u32 get_ext_irq_perf_reg(int irq)
+-{
+- if (irq < 4)
+- return ext_irq_cfg_reg1;
+- return ext_irq_cfg_reg2;
+-}
+-
+-static inline void handle_internal(int intbit)
+-{
+- if (is_ext_irq_cascaded &&
+- intbit >= ext_irq_start && intbit <= ext_irq_end)
+- do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
+- else
+- do_IRQ(intbit + IRQ_INTERNAL_BASE);
+-}
+-
+-static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
+- const struct cpumask *m)
+-{
+- bool enable = cpu_online(cpu);
+-
+-#ifdef CONFIG_SMP
+- if (m)
+- enable &= cpumask_test_cpu(cpu, m);
+- else if (irqd_affinity_was_set(d))
+- enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
+-#endif
+- return enable;
+-}
+-
+-/*
+- * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
+- * prioritize any interrupt relatively to another. the static counter
+- * will resume the loop where it ended the last time we left this
+- * function.
+- */
+-
+-#define BUILD_IPIC_INTERNAL(width) \
+-void __dispatch_internal_##width(int cpu) \
+-{ \
+- u32 pending[width / 32]; \
+- unsigned int src, tgt; \
+- bool irqs_pending = false; \
+- static unsigned int i[2]; \
+- unsigned int *next = &i[cpu]; \
+- unsigned long flags; \
+- \
+- /* read registers in reverse order */ \
+- spin_lock_irqsave(&ipic_lock, flags); \
+- for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
+- u32 val; \
+- \
+- val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
+- val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
+- pending[--tgt] = val; \
+- \
+- if (val) \
+- irqs_pending = true; \
+- } \
+- spin_unlock_irqrestore(&ipic_lock, flags); \
+- \
+- if (!irqs_pending) \
+- return; \
+- \
+- while (1) { \
+- unsigned int to_call = *next; \
+- \
+- *next = (*next + 1) & (width - 1); \
+- if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
+- handle_internal(to_call); \
+- break; \
+- } \
+- } \
+-} \
+- \
+-static void __internal_irq_mask_##width(struct irq_data *d) \
+-{ \
+- u32 val; \
+- unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
+- unsigned reg = (irq / 32) ^ (width/32 - 1); \
+- unsigned bit = irq & 0x1f; \
+- unsigned long flags; \
+- int cpu; \
+- \
+- spin_lock_irqsave(&ipic_lock, flags); \
+- for_each_present_cpu(cpu) { \
+- if (!irq_mask_addr[cpu]) \
+- break; \
+- \
+- val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
+- val &= ~(1 << bit); \
+- bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
+- } \
+- spin_unlock_irqrestore(&ipic_lock, flags); \
+-} \
+- \
+-static void __internal_irq_unmask_##width(struct irq_data *d, \
+- const struct cpumask *m) \
+-{ \
+- u32 val; \
+- unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
+- unsigned reg = (irq / 32) ^ (width/32 - 1); \
+- unsigned bit = irq & 0x1f; \
+- unsigned long flags; \
+- int cpu; \
+- \
+- spin_lock_irqsave(&ipic_lock, flags); \
+- for_each_present_cpu(cpu) { \
+- if (!irq_mask_addr[cpu]) \
+- break; \
+- \
+- val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
+- if (enable_irq_for_cpu(cpu, d, m)) \
+- val |= (1 << bit); \
+- else \
+- val &= ~(1 << bit); \
+- bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
+- } \
+- spin_unlock_irqrestore(&ipic_lock, flags); \
+-}
+-
+-BUILD_IPIC_INTERNAL(32);
+-BUILD_IPIC_INTERNAL(64);
+-
+-asmlinkage void plat_irq_dispatch(void)
+-{
+- u32 cause;
+-
+- do {
+- cause = read_c0_cause() & read_c0_status() & ST0_IM;
+-
+- if (!cause)
+- break;
+-
+- if (cause & CAUSEF_IP7)
+- do_IRQ(7);
+- if (cause & CAUSEF_IP0)
+- do_IRQ(0);
+- if (cause & CAUSEF_IP1)
+- do_IRQ(1);
+- if (cause & CAUSEF_IP2)
+- dispatch_internal(0);
+- if (is_ext_irq_cascaded) {
+- if (cause & CAUSEF_IP3)
+- dispatch_internal(1);
+- } else {
+- if (cause & CAUSEF_IP3)
+- do_IRQ(IRQ_EXT_0);
+- if (cause & CAUSEF_IP4)
+- do_IRQ(IRQ_EXT_1);
+- if (cause & CAUSEF_IP5)
+- do_IRQ(IRQ_EXT_2);
+- if (cause & CAUSEF_IP6)
+- do_IRQ(IRQ_EXT_3);
+- }
+- } while (1);
+-}
+-
+-/*
+- * internal IRQs operations: only mask/unmask on PERF irq mask
+- * register.
+- */
+-static void bcm63xx_internal_irq_mask(struct irq_data *d)
+-{
+- internal_irq_mask(d);
+-}
+-
+-static void bcm63xx_internal_irq_unmask(struct irq_data *d)
+-{
+- internal_irq_unmask(d, NULL);
+-}
+-
+-/*
+- * external IRQs operations: mask/unmask and clear on PERF external
+- * irq control register.
+- */
+-static void bcm63xx_external_irq_mask(struct irq_data *d)
+-{
+- unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
+- u32 reg, regaddr;
+- unsigned long flags;
+-
+- regaddr = get_ext_irq_perf_reg(irq);
+- spin_lock_irqsave(&epic_lock, flags);
+- reg = bcm_perf_readl(regaddr);
+-
+- if (BCMCPU_IS_6348())
+- reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
+- else
+- reg &= ~EXTIRQ_CFG_MASK(irq % 4);
+-
+- bcm_perf_writel(reg, regaddr);
+- spin_unlock_irqrestore(&epic_lock, flags);
+-
+- if (is_ext_irq_cascaded)
+- internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
+-}
+-
+-static void bcm63xx_external_irq_unmask(struct irq_data *d)
+-{
+- unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
+- u32 reg, regaddr;
+- unsigned long flags;
+-
+- regaddr = get_ext_irq_perf_reg(irq);
+- spin_lock_irqsave(&epic_lock, flags);
+- reg = bcm_perf_readl(regaddr);
+-
+- if (BCMCPU_IS_6348())
+- reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
+- else
+- reg |= EXTIRQ_CFG_MASK(irq % 4);
+-
+- bcm_perf_writel(reg, regaddr);
+- spin_unlock_irqrestore(&epic_lock, flags);
+-
+- if (is_ext_irq_cascaded)
+- internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
+- NULL);
+-}
+-
+-static void bcm63xx_external_irq_clear(struct irq_data *d)
+-{
+- unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
+- u32 reg, regaddr;
+- unsigned long flags;
+-
+- regaddr = get_ext_irq_perf_reg(irq);
+- spin_lock_irqsave(&epic_lock, flags);
+- reg = bcm_perf_readl(regaddr);
+-
+- if (BCMCPU_IS_6348())
+- reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
+- else
+- reg |= EXTIRQ_CFG_CLEAR(irq % 4);
+-
+- bcm_perf_writel(reg, regaddr);
+- spin_unlock_irqrestore(&epic_lock, flags);
+-}
+-
+-static int bcm63xx_external_irq_set_type(struct irq_data *d,
+- unsigned int flow_type)
+-{
+- unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
+- u32 reg, regaddr;
+- int levelsense, sense, bothedge;
+- unsigned long flags;
+-
+- flow_type &= IRQ_TYPE_SENSE_MASK;
+-
+- if (flow_type == IRQ_TYPE_NONE)
+- flow_type = IRQ_TYPE_LEVEL_LOW;
+-
+- levelsense = sense = bothedge = 0;
+- switch (flow_type) {
+- case IRQ_TYPE_EDGE_BOTH:
+- bothedge = 1;
+- break;
+-
+- case IRQ_TYPE_EDGE_RISING:
+- sense = 1;
+- break;
+-
+- case IRQ_TYPE_EDGE_FALLING:
+- break;
+-
+- case IRQ_TYPE_LEVEL_HIGH:
+- levelsense = 1;
+- sense = 1;
+- break;
+-
+- case IRQ_TYPE_LEVEL_LOW:
+- levelsense = 1;
+- break;
+-
+- default:
+- pr_err("bogus flow type combination given !\n");
+- return -EINVAL;
+- }
+-
+- regaddr = get_ext_irq_perf_reg(irq);
+- spin_lock_irqsave(&epic_lock, flags);
+- reg = bcm_perf_readl(regaddr);
+- irq %= 4;
+-
+- switch (bcm63xx_get_cpu_id()) {
+- case BCM6348_CPU_ID:
+- if (levelsense)
+- reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
+- else
+- reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
+- if (sense)
+- reg |= EXTIRQ_CFG_SENSE_6348(irq);
+- else
+- reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
+- if (bothedge)
+- reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
+- else
+- reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
+- break;
+-
+- case BCM3368_CPU_ID:
+- case BCM6328_CPU_ID:
+- case BCM6338_CPU_ID:
+- case BCM6345_CPU_ID:
+- case BCM6358_CPU_ID:
+- case BCM6362_CPU_ID:
+- case BCM6368_CPU_ID:
+- if (levelsense)
+- reg |= EXTIRQ_CFG_LEVELSENSE(irq);
+- else
+- reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
+- if (sense)
+- reg |= EXTIRQ_CFG_SENSE(irq);
+- else
+- reg &= ~EXTIRQ_CFG_SENSE(irq);
+- if (bothedge)
+- reg |= EXTIRQ_CFG_BOTHEDGE(irq);
+- else
+- reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
+- break;
+- default:
+- BUG();
+- }
+-
+- bcm_perf_writel(reg, regaddr);
+- spin_unlock_irqrestore(&epic_lock, flags);
+-
+- irqd_set_trigger_type(d, flow_type);
+- if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+- irq_set_handler_locked(d, handle_level_irq);
+- else
+- irq_set_handler_locked(d, handle_edge_irq);
+-
+- return IRQ_SET_MASK_OK_NOCOPY;
+-}
+-
+-#ifdef CONFIG_SMP
+-static int bcm63xx_internal_set_affinity(struct irq_data *data,
+- const struct cpumask *dest,
+- bool force)
+-{
+- if (!irqd_irq_disabled(data))
+- internal_irq_unmask(data, dest);
+-
+- return 0;
+-}
+-#endif
+-
+-static struct irq_chip bcm63xx_internal_irq_chip = {
+- .name = "bcm63xx_ipic",
+- .irq_mask = bcm63xx_internal_irq_mask,
+- .irq_unmask = bcm63xx_internal_irq_unmask,
+-};
+-
+-static struct irq_chip bcm63xx_external_irq_chip = {
+- .name = "bcm63xx_epic",
+- .irq_ack = bcm63xx_external_irq_clear,
+-
+- .irq_mask = bcm63xx_external_irq_mask,
+- .irq_unmask = bcm63xx_external_irq_unmask,
+-
+- .irq_set_type = bcm63xx_external_irq_set_type,
+-};
+-
+-static struct irqaction cpu_ip2_cascade_action = {
+- .handler = no_action,
+- .name = "cascade_ip2",
+- .flags = IRQF_NO_THREAD,
+-};
+-
+-#ifdef CONFIG_SMP
+-static struct irqaction cpu_ip3_cascade_action = {
+- .handler = no_action,
+- .name = "cascade_ip3",
+- .flags = IRQF_NO_THREAD,
+-};
+-#endif
+-
+-static struct irqaction cpu_ext_cascade_action = {
+- .handler = no_action,
+- .name = "cascade_extirq",
+- .flags = IRQF_NO_THREAD,
+-};
+-
+-static void bcm63xx_init_irq(void)
++void __init arch_init_irq(void)
+ {
+- int irq_bits;
+-
+- irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
+- irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
+- irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
+- irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
++ void __iomem *periph_bases[2];
++ void __iomem *ext_intc_bases[2];
++ int periph_irq_count, periph_width, ext_irq_count, ext_shift;
++ int periph_irqs[2] = { 2, 3 };
++ int ext_irqs[6];
++
++ periph_bases[0] = (void __iomem *)bcm63xx_regset_address(RSET_PERF);
++ periph_bases[1] = (void __iomem *)bcm63xx_regset_address(RSET_PERF);
++ ext_intc_bases[0] = (void __iomem *)bcm63xx_regset_address(RSET_PERF);
++ ext_intc_bases[1] = (void __iomem *)bcm63xx_regset_address(RSET_PERF);
+
+ switch (bcm63xx_get_cpu_id()) {
+ case BCM3368_CPU_ID:
+- irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
+- irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
+- irq_stat_addr[1] = 0;
+- irq_mask_addr[1] = 0;
+- irq_bits = 32;
+- ext_irq_count = 4;
+- ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
++ periph_bases[0] += PERF_IRQMASK_3368_REG;
++ periph_irq_count = 1;
++ periph_width = 1;
++
++ ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_3368;
++ ext_irq_count = 4;
++ ext_irqs[0] = BCM_3368_EXT_IRQ0;
++ ext_irqs[1] = BCM_3368_EXT_IRQ1;
++ ext_irqs[2] = BCM_3368_EXT_IRQ2;
++ ext_irqs[3] = BCM_3368_EXT_IRQ3;
++ ext_shift = 4;
+ break;
+ case BCM6328_CPU_ID:
+- irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
+- irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
+- irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
+- irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
+- irq_bits = 64;
+- ext_irq_count = 4;
+- is_ext_irq_cascaded = 1;
+- ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+- ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
+- ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
++ periph_bases[0] += PERF_IRQMASK_6328_REG(0);
++ periph_bases[1] += PERF_IRQMASK_6328_REG(1);
++ periph_irq_count = 2;
++ periph_width = 2;
++
++ ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6328;
++ ext_irq_count = 4;
++ ext_irqs[0] = BCM_6328_EXT_IRQ0;
++ ext_irqs[1] = BCM_6328_EXT_IRQ1;
++ ext_irqs[2] = BCM_6328_EXT_IRQ2;
++ ext_irqs[3] = BCM_6328_EXT_IRQ3;
++ ext_shift = 4;
+ break;
+ case BCM6338_CPU_ID:
+- irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
+- irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
+- irq_stat_addr[1] = 0;
+- irq_mask_addr[1] = 0;
+- irq_bits = 32;
+- ext_irq_count = 4;
+- ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
++ periph_bases[0] += PERF_IRQMASK_6338_REG;
++ periph_irq_count = 1;
++ periph_width = 1;
++
++ ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6338;
++ ext_irq_count = 4;
++ ext_irqs[0] = 3;
++ ext_irqs[1] = 4;
++ ext_irqs[2] = 5;
++ ext_irqs[3] = 6;
++ ext_shift = 4;
+ break;
+ case BCM6345_CPU_ID:
+- irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
+- irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
+- irq_stat_addr[1] = 0;
+- irq_mask_addr[1] = 0;
+- irq_bits = 32;
+- ext_irq_count = 4;
+- ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
++ periph_bases[0] += PERF_IRQMASK_6345_REG;
++ periph_irq_count = 1;
++ periph_width = 1;
++
++ ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6345;
++ ext_irq_count = 4;
++ ext_irqs[0] = 3;
++ ext_irqs[1] = 4;
++ ext_irqs[2] = 5;
++ ext_irqs[3] = 6;
++ ext_shift = 4;
+ break;
+ case BCM6348_CPU_ID:
+- irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
+- irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
+- irq_stat_addr[1] = 0;
+- irq_mask_addr[1] = 0;
+- irq_bits = 32;
+- ext_irq_count = 4;
+- ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
++ periph_bases[0] += PERF_IRQMASK_6348_REG;
++ periph_irq_count = 1;
++ periph_width = 1;
++
++ ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6348;
++ ext_irq_count = 4;
++ ext_irqs[0] = 3;
++ ext_irqs[1] = 4;
++ ext_irqs[2] = 5;
++ ext_irqs[3] = 6;
++ ext_shift = 5;
+ break;
+ case BCM6358_CPU_ID:
+- irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
+- irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
+- irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
+- irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
+- irq_bits = 32;
+- ext_irq_count = 4;
+- is_ext_irq_cascaded = 1;
+- ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+- ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
+- ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
++ periph_bases[0] += PERF_IRQMASK_6358_REG(0);
++ periph_bases[1] += PERF_IRQMASK_6358_REG(1);
++ periph_irq_count = 2;
++ periph_width = 1;
++
++ ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6358;
++ ext_irq_count = 4;
++ ext_irqs[0] = BCM_6358_EXT_IRQ0;
++ ext_irqs[1] = BCM_6358_EXT_IRQ1;
++ ext_irqs[2] = BCM_6358_EXT_IRQ2;
++ ext_irqs[3] = BCM_6358_EXT_IRQ3;
++ ext_shift = 4;
+ break;
+ case BCM6362_CPU_ID:
+- irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
+- irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
+- irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
+- irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
+- irq_bits = 64;
+- ext_irq_count = 4;
+- is_ext_irq_cascaded = 1;
+- ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+- ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
+- ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
++ periph_bases[0] += PERF_IRQMASK_6362_REG(0);
++ periph_bases[1] += PERF_IRQMASK_6362_REG(1);
++ periph_irq_count = 2;
++ periph_width = 2;
++
++ ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6362;
++ ext_irq_count = 4;
++ ext_irqs[0] = BCM_6362_EXT_IRQ0;
++ ext_irqs[1] = BCM_6362_EXT_IRQ1;
++ ext_irqs[2] = BCM_6362_EXT_IRQ2;
++ ext_irqs[3] = BCM_6362_EXT_IRQ3;
++ ext_shift = 4;
+ break;
+ case BCM6368_CPU_ID:
+- irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
+- irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
+- irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
+- irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
+- irq_bits = 64;
++ periph_bases[0] += PERF_IRQMASK_6368_REG(0);
++ periph_bases[1] += PERF_IRQMASK_6368_REG(1);
++ periph_irq_count = 2;
++ periph_width = 2;
++
++ ext_intc_bases[0] += PERF_EXTIRQ_CFG_REG_6368;
++ ext_intc_bases[1] += PERF_EXTIRQ_CFG_REG2_6368;
+ ext_irq_count = 6;
+- is_ext_irq_cascaded = 1;
+- ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+- ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
+- ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
+- ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
++ ext_irqs[0] = BCM_6368_EXT_IRQ0;
++ ext_irqs[1] = BCM_6368_EXT_IRQ1;
++ ext_irqs[2] = BCM_6368_EXT_IRQ2;
++ ext_irqs[3] = BCM_6368_EXT_IRQ3;
++ ext_irqs[4] = BCM_6368_EXT_IRQ4;
++ ext_irqs[5] = BCM_6368_EXT_IRQ5;
++ ext_shift = 4;
+ break;
+ default:
+ BUG();
+ }
+
+- if (irq_bits == 32) {
+- dispatch_internal = __dispatch_internal_32;
+- internal_irq_mask = __internal_irq_mask_32;
+- internal_irq_unmask = __internal_irq_unmask_32;
+- } else {
+- dispatch_internal = __dispatch_internal_64;
+- internal_irq_mask = __internal_irq_mask_64;
+- internal_irq_unmask = __internal_irq_unmask_64;
+- }
+-}
+-
+-void __init arch_init_irq(void)
+-{
+- int i;
+-
+- bcm63xx_init_irq();
+ mips_cpu_irq_init();
+- for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
+- irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
+- handle_level_irq);
+-
+- for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
+- irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
+- handle_edge_irq);
+-
+- if (!is_ext_irq_cascaded) {
+- for (i = 3; i < 3 + ext_irq_count; ++i)
+- setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
+- }
+-
+- setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
+-#ifdef CONFIG_SMP
+- if (is_ext_irq_cascaded) {
+- setup_irq(MIPS_CPU_IRQ_BASE + 3, &cpu_ip3_cascade_action);
+- bcm63xx_internal_irq_chip.irq_set_affinity =
+- bcm63xx_internal_set_affinity;
+-
+- cpumask_clear(irq_default_affinity);
+- cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+- }
+-#endif
++ bcm6345_periph_intc_init(periph_irq_count, periph_irqs, periph_bases,
++ periph_width);
++ bcm6345_ext_intc_init(4, ext_irqs, ext_intc_bases[0], ext_shift);
++ if (ext_irq_count > 4)
++ bcm6345_ext_intc_init(2, &ext_irqs[4], ext_intc_bases[1],
++ ext_shift);
+ }