diff options
| author | fishsoupisgood <github@madingley.org> | 2019-04-29 01:17:54 +0100 | 
|---|---|---|
| committer | fishsoupisgood <github@madingley.org> | 2019-05-27 03:43:43 +0100 | 
| commit | 3f2546b2ef55b661fd8dd69682b38992225e86f6 (patch) | |
| tree | 65ca85f13617aee1dce474596800950f266a456c /hw/intc | |
| download | qemu-master.tar.gz qemu-master.tar.bz2 qemu-master.zip  | |
Diffstat (limited to 'hw/intc')
34 files changed, 14347 insertions, 0 deletions
diff --git a/hw/intc/Makefile.objs b/hw/intc/Makefile.objs new file mode 100644 index 00000000..092d8a80 --- /dev/null +++ b/hw/intc/Makefile.objs @@ -0,0 +1,30 @@ +common-obj-$(CONFIG_HEATHROW_PIC) += heathrow_pic.o +common-obj-$(CONFIG_I8259) += i8259_common.o i8259.o +common-obj-$(CONFIG_PL190) += pl190.o +common-obj-$(CONFIG_PUV3) += puv3_intc.o +common-obj-$(CONFIG_XILINX) += xilinx_intc.o +common-obj-$(CONFIG_ETRAXFS) += etraxfs_pic.o +common-obj-$(CONFIG_IMX) += imx_avic.o +common-obj-$(CONFIG_LM32) += lm32_pic.o +common-obj-$(CONFIG_REALVIEW) += realview_gic.o +common-obj-$(CONFIG_SLAVIO) += slavio_intctl.o +common-obj-$(CONFIG_IOAPIC) += ioapic_common.o +common-obj-$(CONFIG_ARM_GIC) += arm_gic_common.o +common-obj-$(CONFIG_ARM_GIC) += arm_gic.o +common-obj-$(CONFIG_ARM_GIC) += arm_gicv2m.o +common-obj-$(CONFIG_OPENPIC) += openpic.o + +obj-$(CONFIG_APIC) += apic.o apic_common.o +obj-$(CONFIG_ARM_GIC_KVM) += arm_gic_kvm.o +obj-$(CONFIG_STELLARIS) += armv7m_nvic.o +obj-$(CONFIG_EXYNOS4) += exynos4210_gic.o exynos4210_combiner.o +obj-$(CONFIG_GRLIB) += grlib_irqmp.o +obj-$(CONFIG_IOAPIC) += ioapic.o +obj-$(CONFIG_OMAP) += omap_intc.o +obj-$(CONFIG_OPENPIC_KVM) += openpic_kvm.o +obj-$(CONFIG_SH4) += sh_intc.o +obj-$(CONFIG_XICS) += xics.o +obj-$(CONFIG_XICS_KVM) += xics_kvm.o +obj-$(CONFIG_ALLWINNER_A10_PIC) += allwinner-a10-pic.o +obj-$(CONFIG_S390_FLIC) += s390_flic.o +obj-$(CONFIG_S390_FLIC_KVM) += s390_flic_kvm.o diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c new file mode 100644 index 00000000..eed7621f --- /dev/null +++ b/hw/intc/allwinner-a10-pic.c @@ -0,0 +1,212 @@ +/* + * Allwinner A10 interrupt controller device emulation + * + * Copyright (C) 2013 Li Guang + * Written by Li Guang <lig.fnst@cn.fujitsu.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "hw/sysbus.h" +#include "hw/devices.h" +#include "sysemu/sysemu.h" +#include "hw/intc/allwinner-a10-pic.h" + +static void aw_a10_pic_update(AwA10PICState *s) +{ +    uint8_t i; +    int irq = 0, fiq = 0, zeroes; + +    s->vector = 0; + +    for (i = 0; i < AW_A10_PIC_REG_NUM; i++) { +        irq |= s->irq_pending[i] & ~s->mask[i]; +        fiq |= s->select[i] & s->irq_pending[i] & ~s->mask[i]; + +        if (!s->vector) { +            zeroes = ctz32(s->irq_pending[i] & ~s->mask[i]); +            if (zeroes != 32) { +                s->vector = (i * 32 + zeroes) * 4; +            } +        } +    } + +    qemu_set_irq(s->parent_irq, !!irq); +    qemu_set_irq(s->parent_fiq, !!fiq); +} + +static void aw_a10_pic_set_irq(void *opaque, int irq, int level) +{ +    AwA10PICState *s = opaque; + +    if (level) { +        set_bit(irq % 32, (void *)&s->irq_pending[irq / 32]); +    } else { +        clear_bit(irq % 32, (void *)&s->irq_pending[irq / 32]); +    } +    aw_a10_pic_update(s); +} + +static uint64_t aw_a10_pic_read(void *opaque, hwaddr offset, unsigned size) +{ +    AwA10PICState *s = opaque; +    uint8_t index = (offset & 0xc) / 4; + +    switch (offset) { +    case AW_A10_PIC_VECTOR: +        return s->vector; +    case AW_A10_PIC_BASE_ADDR: +        return s->base_addr; +    case AW_A10_PIC_PROTECT: +        return s->protect; +    case AW_A10_PIC_NMI: +        return s->nmi; +    case AW_A10_PIC_IRQ_PENDING ... AW_A10_PIC_IRQ_PENDING + 8: +        return s->irq_pending[index]; +    case AW_A10_PIC_FIQ_PENDING ... AW_A10_PIC_FIQ_PENDING + 8: +        return s->fiq_pending[index]; +    case AW_A10_PIC_SELECT ... AW_A10_PIC_SELECT + 8: +        return s->select[index]; +    case AW_A10_PIC_ENABLE ... AW_A10_PIC_ENABLE + 8: +        return s->enable[index]; +    case AW_A10_PIC_MASK ... AW_A10_PIC_MASK + 8: +        return s->mask[index]; +    default: +        qemu_log_mask(LOG_GUEST_ERROR, +                      "%s: Bad offset 0x%x\n",  __func__, (int)offset); +        break; +    } + +    return 0; +} + +static void aw_a10_pic_write(void *opaque, hwaddr offset, uint64_t value, +                             unsigned size) +{ +    AwA10PICState *s = opaque; +    uint8_t index = (offset & 0xc) / 4; + +    switch (offset) { +    case AW_A10_PIC_BASE_ADDR: +        s->base_addr = value & ~0x3; +        break; +    case AW_A10_PIC_PROTECT: +        s->protect = value; +        break; +    case AW_A10_PIC_NMI: +        s->nmi = value; +        break; +    case AW_A10_PIC_IRQ_PENDING ... AW_A10_PIC_IRQ_PENDING + 8: +        /* +         * The register is read-only; nevertheless, Linux (including +         * the version originally shipped by Allwinner) pretends to +         * write to the register. Just ignore it. +         */ +        break; +    case AW_A10_PIC_FIQ_PENDING ... AW_A10_PIC_FIQ_PENDING + 8: +        s->fiq_pending[index] &= ~value; +        break; +    case AW_A10_PIC_SELECT ... AW_A10_PIC_SELECT + 8: +        s->select[index] = value; +        break; +    case AW_A10_PIC_ENABLE ... AW_A10_PIC_ENABLE + 8: +        s->enable[index] = value; +        break; +    case AW_A10_PIC_MASK ... AW_A10_PIC_MASK + 8: +        s->mask[index] = value; +        break; +    default: +        qemu_log_mask(LOG_GUEST_ERROR, +                      "%s: Bad offset 0x%x\n",  __func__, (int)offset); +        break; +    } + +    aw_a10_pic_update(s); +} + +static const MemoryRegionOps aw_a10_pic_ops = { +    .read = aw_a10_pic_read, +    .write = aw_a10_pic_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_aw_a10_pic = { +    .name = "a10.pic", +    .version_id = 1, +    .minimum_version_id = 1, +    .fields = (VMStateField[]) { +        VMSTATE_UINT32(vector, AwA10PICState), +        VMSTATE_UINT32(base_addr, AwA10PICState), +        VMSTATE_UINT32(protect, AwA10PICState), +        VMSTATE_UINT32(nmi, AwA10PICState), +        VMSTATE_UINT32_ARRAY(irq_pending, AwA10PICState, AW_A10_PIC_REG_NUM), +        VMSTATE_UINT32_ARRAY(fiq_pending, AwA10PICState, AW_A10_PIC_REG_NUM), +        VMSTATE_UINT32_ARRAY(enable, AwA10PICState, AW_A10_PIC_REG_NUM), +        VMSTATE_UINT32_ARRAY(select, AwA10PICState, AW_A10_PIC_REG_NUM), +        VMSTATE_UINT32_ARRAY(mask, AwA10PICState, AW_A10_PIC_REG_NUM), +        VMSTATE_END_OF_LIST() +    } +}; + +static void aw_a10_pic_init(Object *obj) +{ +    AwA10PICState *s = AW_A10_PIC(obj); +    SysBusDevice *dev = SYS_BUS_DEVICE(obj); + +     qdev_init_gpio_in(DEVICE(dev), aw_a10_pic_set_irq, AW_A10_PIC_INT_NR); +     sysbus_init_irq(dev, &s->parent_irq); +     sysbus_init_irq(dev, &s->parent_fiq); +     memory_region_init_io(&s->iomem, OBJECT(s), &aw_a10_pic_ops, s, +                           TYPE_AW_A10_PIC, 0x400); +     sysbus_init_mmio(dev, &s->iomem); +} + +static void aw_a10_pic_reset(DeviceState *d) +{ +    AwA10PICState *s = AW_A10_PIC(d); +    uint8_t i; + +    s->base_addr = 0; +    s->protect = 0; +    s->nmi = 0; +    s->vector = 0; +    for (i = 0; i < AW_A10_PIC_REG_NUM; i++) { +        s->irq_pending[i] = 0; +        s->fiq_pending[i] = 0; +        s->select[i] = 0; +        s->enable[i] = 0; +        s->mask[i] = 0; +    } +} + +static void aw_a10_pic_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); + +    dc->reset = aw_a10_pic_reset; +    dc->desc = "allwinner a10 pic"; +    dc->vmsd = &vmstate_aw_a10_pic; + } + +static const TypeInfo aw_a10_pic_info = { +    .name = TYPE_AW_A10_PIC, +    .parent = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(AwA10PICState), +    .instance_init = aw_a10_pic_init, +    .class_init = aw_a10_pic_class_init, +}; + +static void aw_a10_register_types(void) +{ +    type_register_static(&aw_a10_pic_info); +} + +type_init(aw_a10_register_types); diff --git a/hw/intc/apic.c b/hw/intc/apic.c new file mode 100644 index 00000000..77b639cc --- /dev/null +++ b/hw/intc/apic.c @@ -0,0 +1,921 @@ +/* + *  APIC support + * + *  Copyright (c) 2004-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/> + */ +#include "qemu/thread.h" +#include "hw/i386/apic_internal.h" +#include "hw/i386/apic.h" +#include "hw/i386/ioapic.h" +#include "hw/pci/msi.h" +#include "qemu/host-utils.h" +#include "trace.h" +#include "hw/i386/pc.h" +#include "hw/i386/apic-msidef.h" + +#define MAX_APIC_WORDS 8 + +#define SYNC_FROM_VAPIC                 0x1 +#define SYNC_TO_VAPIC                   0x2 +#define SYNC_ISR_IRR_TO_VAPIC           0x4 + +static APICCommonState *local_apics[MAX_APICS + 1]; + +static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode); +static void apic_update_irq(APICCommonState *s); +static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, +                                      uint8_t dest, uint8_t dest_mode); + +/* Find first bit starting from msb */ +static int apic_fls_bit(uint32_t value) +{ +    return 31 - clz32(value); +} + +/* Find first bit starting from lsb */ +static int apic_ffs_bit(uint32_t value) +{ +    return ctz32(value); +} + +static inline void apic_set_bit(uint32_t *tab, int index) +{ +    int i, mask; +    i = index >> 5; +    mask = 1 << (index & 0x1f); +    tab[i] |= mask; +} + +static inline void apic_reset_bit(uint32_t *tab, int index) +{ +    int i, mask; +    i = index >> 5; +    mask = 1 << (index & 0x1f); +    tab[i] &= ~mask; +} + +static inline int apic_get_bit(uint32_t *tab, int index) +{ +    int i, mask; +    i = index >> 5; +    mask = 1 << (index & 0x1f); +    return !!(tab[i] & mask); +} + +/* return -1 if no bit is set */ +static int get_highest_priority_int(uint32_t *tab) +{ +    int i; +    for (i = 7; i >= 0; i--) { +        if (tab[i] != 0) { +            return i * 32 + apic_fls_bit(tab[i]); +        } +    } +    return -1; +} + +static void apic_sync_vapic(APICCommonState *s, int sync_type) +{ +    VAPICState vapic_state; +    size_t length; +    off_t start; +    int vector; + +    if (!s->vapic_paddr) { +        return; +    } +    if (sync_type & SYNC_FROM_VAPIC) { +        cpu_physical_memory_read(s->vapic_paddr, &vapic_state, +                                 sizeof(vapic_state)); +        s->tpr = vapic_state.tpr; +    } +    if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) { +        start = offsetof(VAPICState, isr); +        length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr); + +        if (sync_type & SYNC_TO_VAPIC) { +            assert(qemu_cpu_is_self(CPU(s->cpu))); + +            vapic_state.tpr = s->tpr; +            vapic_state.enabled = 1; +            start = 0; +            length = sizeof(VAPICState); +        } + +        vector = get_highest_priority_int(s->isr); +        if (vector < 0) { +            vector = 0; +        } +        vapic_state.isr = vector & 0xf0; + +        vapic_state.zero = 0; + +        vector = get_highest_priority_int(s->irr); +        if (vector < 0) { +            vector = 0; +        } +        vapic_state.irr = vector & 0xff; + +        cpu_physical_memory_write_rom(&address_space_memory, +                                      s->vapic_paddr + start, +                                      ((void *)&vapic_state) + start, length); +    } +} + +static void apic_vapic_base_update(APICCommonState *s) +{ +    apic_sync_vapic(s, SYNC_TO_VAPIC); +} + +static void apic_local_deliver(APICCommonState *s, int vector) +{ +    uint32_t lvt = s->lvt[vector]; +    int trigger_mode; + +    trace_apic_local_deliver(vector, (lvt >> 8) & 7); + +    if (lvt & APIC_LVT_MASKED) +        return; + +    switch ((lvt >> 8) & 7) { +    case APIC_DM_SMI: +        cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SMI); +        break; + +    case APIC_DM_NMI: +        cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_NMI); +        break; + +    case APIC_DM_EXTINT: +        cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD); +        break; + +    case APIC_DM_FIXED: +        trigger_mode = APIC_TRIGGER_EDGE; +        if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) && +            (lvt & APIC_LVT_LEVEL_TRIGGER)) +            trigger_mode = APIC_TRIGGER_LEVEL; +        apic_set_irq(s, lvt & 0xff, trigger_mode); +    } +} + +void apic_deliver_pic_intr(DeviceState *dev, int level) +{ +    APICCommonState *s = APIC_COMMON(dev); + +    if (level) { +        apic_local_deliver(s, APIC_LVT_LINT0); +    } else { +        uint32_t lvt = s->lvt[APIC_LVT_LINT0]; + +        switch ((lvt >> 8) & 7) { +        case APIC_DM_FIXED: +            if (!(lvt & APIC_LVT_LEVEL_TRIGGER)) +                break; +            apic_reset_bit(s->irr, lvt & 0xff); +            /* fall through */ +        case APIC_DM_EXTINT: +            apic_update_irq(s); +            break; +        } +    } +} + +static void apic_external_nmi(APICCommonState *s) +{ +    apic_local_deliver(s, APIC_LVT_LINT1); +} + +#define foreach_apic(apic, deliver_bitmask, code) \ +{\ +    int __i, __j;\ +    for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\ +        uint32_t __mask = deliver_bitmask[__i];\ +        if (__mask) {\ +            for(__j = 0; __j < 32; __j++) {\ +                if (__mask & (1U << __j)) {\ +                    apic = local_apics[__i * 32 + __j];\ +                    if (apic) {\ +                        code;\ +                    }\ +                }\ +            }\ +        }\ +    }\ +} + +static void apic_bus_deliver(const uint32_t *deliver_bitmask, +                             uint8_t delivery_mode, uint8_t vector_num, +                             uint8_t trigger_mode) +{ +    APICCommonState *apic_iter; + +    switch (delivery_mode) { +        case APIC_DM_LOWPRI: +            /* XXX: search for focus processor, arbitration */ +            { +                int i, d; +                d = -1; +                for(i = 0; i < MAX_APIC_WORDS; i++) { +                    if (deliver_bitmask[i]) { +                        d = i * 32 + apic_ffs_bit(deliver_bitmask[i]); +                        break; +                    } +                } +                if (d >= 0) { +                    apic_iter = local_apics[d]; +                    if (apic_iter) { +                        apic_set_irq(apic_iter, vector_num, trigger_mode); +                    } +                } +            } +            return; + +        case APIC_DM_FIXED: +            break; + +        case APIC_DM_SMI: +            foreach_apic(apic_iter, deliver_bitmask, +                cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_SMI) +            ); +            return; + +        case APIC_DM_NMI: +            foreach_apic(apic_iter, deliver_bitmask, +                cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_NMI) +            ); +            return; + +        case APIC_DM_INIT: +            /* normal INIT IPI sent to processors */ +            foreach_apic(apic_iter, deliver_bitmask, +                         cpu_interrupt(CPU(apic_iter->cpu), +                                       CPU_INTERRUPT_INIT) +            ); +            return; + +        case APIC_DM_EXTINT: +            /* handled in I/O APIC code */ +            break; + +        default: +            return; +    } + +    foreach_apic(apic_iter, deliver_bitmask, +                 apic_set_irq(apic_iter, vector_num, trigger_mode) ); +} + +void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, +                      uint8_t vector_num, uint8_t trigger_mode) +{ +    uint32_t deliver_bitmask[MAX_APIC_WORDS]; + +    trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num, +                           trigger_mode); + +    apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); +    apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); +} + +static void apic_set_base(APICCommonState *s, uint64_t val) +{ +    s->apicbase = (val & 0xfffff000) | +        (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE)); +    /* if disabled, cannot be enabled again */ +    if (!(val & MSR_IA32_APICBASE_ENABLE)) { +        s->apicbase &= ~MSR_IA32_APICBASE_ENABLE; +        cpu_clear_apic_feature(&s->cpu->env); +        s->spurious_vec &= ~APIC_SV_ENABLE; +    } +} + +static void apic_set_tpr(APICCommonState *s, uint8_t val) +{ +    /* Updates from cr8 are ignored while the VAPIC is active */ +    if (!s->vapic_paddr) { +        s->tpr = val << 4; +        apic_update_irq(s); +    } +} + +static uint8_t apic_get_tpr(APICCommonState *s) +{ +    apic_sync_vapic(s, SYNC_FROM_VAPIC); +    return s->tpr >> 4; +} + +static int apic_get_ppr(APICCommonState *s) +{ +    int tpr, isrv, ppr; + +    tpr = (s->tpr >> 4); +    isrv = get_highest_priority_int(s->isr); +    if (isrv < 0) +        isrv = 0; +    isrv >>= 4; +    if (tpr >= isrv) +        ppr = s->tpr; +    else +        ppr = isrv << 4; +    return ppr; +} + +static int apic_get_arb_pri(APICCommonState *s) +{ +    /* XXX: arbitration */ +    return 0; +} + + +/* + * <0 - low prio interrupt, + * 0  - no interrupt, + * >0 - interrupt number + */ +static int apic_irq_pending(APICCommonState *s) +{ +    int irrv, ppr; + +    if (!(s->spurious_vec & APIC_SV_ENABLE)) { +        return 0; +    } + +    irrv = get_highest_priority_int(s->irr); +    if (irrv < 0) { +        return 0; +    } +    ppr = apic_get_ppr(s); +    if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) { +        return -1; +    } + +    return irrv; +} + +/* signal the CPU if an irq is pending */ +static void apic_update_irq(APICCommonState *s) +{ +    CPUState *cpu; +    DeviceState *dev = (DeviceState *)s; + +    cpu = CPU(s->cpu); +    if (!qemu_cpu_is_self(cpu)) { +        cpu_interrupt(cpu, CPU_INTERRUPT_POLL); +    } else if (apic_irq_pending(s) > 0) { +        cpu_interrupt(cpu, CPU_INTERRUPT_HARD); +    } else if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { +        cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); +    } +} + +void apic_poll_irq(DeviceState *dev) +{ +    APICCommonState *s = APIC_COMMON(dev); + +    apic_sync_vapic(s, SYNC_FROM_VAPIC); +    apic_update_irq(s); +} + +static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode) +{ +    apic_report_irq_delivered(!apic_get_bit(s->irr, vector_num)); + +    apic_set_bit(s->irr, vector_num); +    if (trigger_mode) +        apic_set_bit(s->tmr, vector_num); +    else +        apic_reset_bit(s->tmr, vector_num); +    if (s->vapic_paddr) { +        apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC); +        /* +         * The vcpu thread needs to see the new IRR before we pull its current +         * TPR value. That way, if we miss a lowering of the TRP, the guest +         * has the chance to notice the new IRR and poll for IRQs on its own. +         */ +        smp_wmb(); +        apic_sync_vapic(s, SYNC_FROM_VAPIC); +    } +    apic_update_irq(s); +} + +static void apic_eoi(APICCommonState *s) +{ +    int isrv; +    isrv = get_highest_priority_int(s->isr); +    if (isrv < 0) +        return; +    apic_reset_bit(s->isr, isrv); +    if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && apic_get_bit(s->tmr, isrv)) { +        ioapic_eoi_broadcast(isrv); +    } +    apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC); +    apic_update_irq(s); +} + +static int apic_find_dest(uint8_t dest) +{ +    APICCommonState *apic = local_apics[dest]; +    int i; + +    if (apic && apic->id == dest) +        return dest;  /* shortcut in case apic->id == apic->idx */ + +    for (i = 0; i < MAX_APICS; i++) { +        apic = local_apics[i]; +	if (apic && apic->id == dest) +            return i; +        if (!apic) +            break; +    } + +    return -1; +} + +static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, +                                      uint8_t dest, uint8_t dest_mode) +{ +    APICCommonState *apic_iter; +    int i; + +    if (dest_mode == 0) { +        if (dest == 0xff) { +            memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t)); +        } else { +            int idx = apic_find_dest(dest); +            memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); +            if (idx >= 0) +                apic_set_bit(deliver_bitmask, idx); +        } +    } else { +        /* XXX: cluster mode */ +        memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); +        for(i = 0; i < MAX_APICS; i++) { +            apic_iter = local_apics[i]; +            if (apic_iter) { +                if (apic_iter->dest_mode == 0xf) { +                    if (dest & apic_iter->log_dest) +                        apic_set_bit(deliver_bitmask, i); +                } else if (apic_iter->dest_mode == 0x0) { +                    if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) && +                        (dest & apic_iter->log_dest & 0x0f)) { +                        apic_set_bit(deliver_bitmask, i); +                    } +                } +            } else { +                break; +            } +        } +    } +} + +static void apic_startup(APICCommonState *s, int vector_num) +{ +    s->sipi_vector = vector_num; +    cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); +} + +void apic_sipi(DeviceState *dev) +{ +    APICCommonState *s = APIC_COMMON(dev); + +    cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); + +    if (!s->wait_for_sipi) +        return; +    cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector); +    s->wait_for_sipi = 0; +} + +static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode, +                         uint8_t delivery_mode, uint8_t vector_num, +                         uint8_t trigger_mode) +{ +    APICCommonState *s = APIC_COMMON(dev); +    uint32_t deliver_bitmask[MAX_APIC_WORDS]; +    int dest_shorthand = (s->icr[0] >> 18) & 3; +    APICCommonState *apic_iter; + +    switch (dest_shorthand) { +    case 0: +        apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); +        break; +    case 1: +        memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask)); +        apic_set_bit(deliver_bitmask, s->idx); +        break; +    case 2: +        memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); +        break; +    case 3: +        memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); +        apic_reset_bit(deliver_bitmask, s->idx); +        break; +    } + +    switch (delivery_mode) { +        case APIC_DM_INIT: +            { +                int trig_mode = (s->icr[0] >> 15) & 1; +                int level = (s->icr[0] >> 14) & 1; +                if (level == 0 && trig_mode == 1) { +                    foreach_apic(apic_iter, deliver_bitmask, +                                 apic_iter->arb_id = apic_iter->id ); +                    return; +                } +            } +            break; + +        case APIC_DM_SIPI: +            foreach_apic(apic_iter, deliver_bitmask, +                         apic_startup(apic_iter, vector_num) ); +            return; +    } + +    apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); +} + +static bool apic_check_pic(APICCommonState *s) +{ +    DeviceState *dev = (DeviceState *)s; + +    if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { +        return false; +    } +    apic_deliver_pic_intr(dev, 1); +    return true; +} + +int apic_get_interrupt(DeviceState *dev) +{ +    APICCommonState *s = APIC_COMMON(dev); +    int intno; + +    /* if the APIC is installed or enabled, we let the 8259 handle the +       IRQs */ +    if (!s) +        return -1; +    if (!(s->spurious_vec & APIC_SV_ENABLE)) +        return -1; + +    apic_sync_vapic(s, SYNC_FROM_VAPIC); +    intno = apic_irq_pending(s); + +    /* if there is an interrupt from the 8259, let the caller handle +     * that first since ExtINT interrupts ignore the priority. +     */ +    if (intno == 0 || apic_check_pic(s)) { +        apic_sync_vapic(s, SYNC_TO_VAPIC); +        return -1; +    } else if (intno < 0) { +        apic_sync_vapic(s, SYNC_TO_VAPIC); +        return s->spurious_vec & 0xff; +    } +    apic_reset_bit(s->irr, intno); +    apic_set_bit(s->isr, intno); +    apic_sync_vapic(s, SYNC_TO_VAPIC); + +    apic_update_irq(s); + +    return intno; +} + +int apic_accept_pic_intr(DeviceState *dev) +{ +    APICCommonState *s = APIC_COMMON(dev); +    uint32_t lvt0; + +    if (!s) +        return -1; + +    lvt0 = s->lvt[APIC_LVT_LINT0]; + +    if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 || +        (lvt0 & APIC_LVT_MASKED) == 0) +        return 1; + +    return 0; +} + +static uint32_t apic_get_current_count(APICCommonState *s) +{ +    int64_t d; +    uint32_t val; +    d = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->initial_count_load_time) >> +        s->count_shift; +    if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) { +        /* periodic */ +        val = s->initial_count - (d % ((uint64_t)s->initial_count + 1)); +    } else { +        if (d >= s->initial_count) +            val = 0; +        else +            val = s->initial_count - d; +    } +    return val; +} + +static void apic_timer_update(APICCommonState *s, int64_t current_time) +{ +    if (apic_next_timer(s, current_time)) { +        timer_mod(s->timer, s->next_time); +    } else { +        timer_del(s->timer); +    } +} + +static void apic_timer(void *opaque) +{ +    APICCommonState *s = opaque; + +    apic_local_deliver(s, APIC_LVT_TIMER); +    apic_timer_update(s, s->next_time); +} + +static uint32_t apic_mem_readb(void *opaque, hwaddr addr) +{ +    return 0; +} + +static uint32_t apic_mem_readw(void *opaque, hwaddr addr) +{ +    return 0; +} + +static void apic_mem_writeb(void *opaque, hwaddr addr, uint32_t val) +{ +} + +static void apic_mem_writew(void *opaque, hwaddr addr, uint32_t val) +{ +} + +static uint32_t apic_mem_readl(void *opaque, hwaddr addr) +{ +    DeviceState *dev; +    APICCommonState *s; +    uint32_t val; +    int index; + +    dev = cpu_get_current_apic(); +    if (!dev) { +        return 0; +    } +    s = APIC_COMMON(dev); + +    index = (addr >> 4) & 0xff; +    switch(index) { +    case 0x02: /* id */ +        val = s->id << 24; +        break; +    case 0x03: /* version */ +        val = s->version | ((APIC_LVT_NB - 1) << 16); +        break; +    case 0x08: +        apic_sync_vapic(s, SYNC_FROM_VAPIC); +        if (apic_report_tpr_access) { +            cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ); +        } +        val = s->tpr; +        break; +    case 0x09: +        val = apic_get_arb_pri(s); +        break; +    case 0x0a: +        /* ppr */ +        val = apic_get_ppr(s); +        break; +    case 0x0b: +        val = 0; +        break; +    case 0x0d: +        val = s->log_dest << 24; +        break; +    case 0x0e: +        val = (s->dest_mode << 28) | 0xfffffff; +        break; +    case 0x0f: +        val = s->spurious_vec; +        break; +    case 0x10 ... 0x17: +        val = s->isr[index & 7]; +        break; +    case 0x18 ... 0x1f: +        val = s->tmr[index & 7]; +        break; +    case 0x20 ... 0x27: +        val = s->irr[index & 7]; +        break; +    case 0x28: +        val = s->esr; +        break; +    case 0x30: +    case 0x31: +        val = s->icr[index & 1]; +        break; +    case 0x32 ... 0x37: +        val = s->lvt[index - 0x32]; +        break; +    case 0x38: +        val = s->initial_count; +        break; +    case 0x39: +        val = apic_get_current_count(s); +        break; +    case 0x3e: +        val = s->divide_conf; +        break; +    default: +        s->esr |= ESR_ILLEGAL_ADDRESS; +        val = 0; +        break; +    } +    trace_apic_mem_readl(addr, val); +    return val; +} + +static void apic_send_msi(hwaddr addr, uint32_t data) +{ +    uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; +    uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; +    uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; +    uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; +    uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; +    /* XXX: Ignore redirection hint. */ +    apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode); +} + +static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val) +{ +    DeviceState *dev; +    APICCommonState *s; +    int index = (addr >> 4) & 0xff; +    if (addr > 0xfff || !index) { +        /* MSI and MMIO APIC are at the same memory location, +         * but actually not on the global bus: MSI is on PCI bus +         * APIC is connected directly to the CPU. +         * Mapping them on the global bus happens to work because +         * MSI registers are reserved in APIC MMIO and vice versa. */ +        apic_send_msi(addr, val); +        return; +    } + +    dev = cpu_get_current_apic(); +    if (!dev) { +        return; +    } +    s = APIC_COMMON(dev); + +    trace_apic_mem_writel(addr, val); + +    switch(index) { +    case 0x02: +        s->id = (val >> 24); +        break; +    case 0x03: +        break; +    case 0x08: +        if (apic_report_tpr_access) { +            cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE); +        } +        s->tpr = val; +        apic_sync_vapic(s, SYNC_TO_VAPIC); +        apic_update_irq(s); +        break; +    case 0x09: +    case 0x0a: +        break; +    case 0x0b: /* EOI */ +        apic_eoi(s); +        break; +    case 0x0d: +        s->log_dest = val >> 24; +        break; +    case 0x0e: +        s->dest_mode = val >> 28; +        break; +    case 0x0f: +        s->spurious_vec = val & 0x1ff; +        apic_update_irq(s); +        break; +    case 0x10 ... 0x17: +    case 0x18 ... 0x1f: +    case 0x20 ... 0x27: +    case 0x28: +        break; +    case 0x30: +        s->icr[0] = val; +        apic_deliver(dev, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1, +                     (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff), +                     (s->icr[0] >> 15) & 1); +        break; +    case 0x31: +        s->icr[1] = val; +        break; +    case 0x32 ... 0x37: +        { +            int n = index - 0x32; +            s->lvt[n] = val; +            if (n == APIC_LVT_TIMER) { +                apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); +            } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) { +                apic_update_irq(s); +            } +        } +        break; +    case 0x38: +        s->initial_count = val; +        s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +        apic_timer_update(s, s->initial_count_load_time); +        break; +    case 0x39: +        break; +    case 0x3e: +        { +            int v; +            s->divide_conf = val & 0xb; +            v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4); +            s->count_shift = (v + 1) & 7; +        } +        break; +    default: +        s->esr |= ESR_ILLEGAL_ADDRESS; +        break; +    } +} + +static void apic_pre_save(APICCommonState *s) +{ +    apic_sync_vapic(s, SYNC_FROM_VAPIC); +} + +static void apic_post_load(APICCommonState *s) +{ +    if (s->timer_expiry != -1) { +        timer_mod(s->timer, s->timer_expiry); +    } else { +        timer_del(s->timer); +    } +} + +static const MemoryRegionOps apic_io_ops = { +    .old_mmio = { +        .read = { apic_mem_readb, apic_mem_readw, apic_mem_readl, }, +        .write = { apic_mem_writeb, apic_mem_writew, apic_mem_writel, }, +    }, +    .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void apic_realize(DeviceState *dev, Error **errp) +{ +    APICCommonState *s = APIC_COMMON(dev); + +    memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi", +                          APIC_SPACE_SIZE); + +    s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); +    local_apics[s->idx] = s; + +    msi_supported = true; +} + +static void apic_class_init(ObjectClass *klass, void *data) +{ +    APICCommonClass *k = APIC_COMMON_CLASS(klass); + +    k->realize = apic_realize; +    k->set_base = apic_set_base; +    k->set_tpr = apic_set_tpr; +    k->get_tpr = apic_get_tpr; +    k->vapic_base_update = apic_vapic_base_update; +    k->external_nmi = apic_external_nmi; +    k->pre_save = apic_pre_save; +    k->post_load = apic_post_load; +} + +static const TypeInfo apic_info = { +    .name          = "apic", +    .instance_size = sizeof(APICCommonState), +    .parent        = TYPE_APIC_COMMON, +    .class_init    = apic_class_init, +}; + +static void apic_register_types(void) +{ +    type_register_static(&apic_info); +} + +type_init(apic_register_types) diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c new file mode 100644 index 00000000..0032b97c --- /dev/null +++ b/hw/intc/apic_common.c @@ -0,0 +1,456 @@ +/* + *  APIC support - common bits of emulated and KVM kernel model + * + *  Copyright (c) 2004-2005 Fabrice Bellard + *  Copyright (c) 2011      Jan Kiszka, Siemens AG + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/> + */ +#include "hw/i386/apic.h" +#include "hw/i386/apic_internal.h" +#include "trace.h" +#include "sysemu/kvm.h" +#include "hw/qdev.h" +#include "hw/sysbus.h" + +static int apic_irq_delivered; +bool apic_report_tpr_access; + +void cpu_set_apic_base(DeviceState *dev, uint64_t val) +{ +    trace_cpu_set_apic_base(val); + +    if (dev) { +        APICCommonState *s = APIC_COMMON(dev); +        APICCommonClass *info = APIC_COMMON_GET_CLASS(s); +        info->set_base(s, val); +    } +} + +uint64_t cpu_get_apic_base(DeviceState *dev) +{ +    if (dev) { +        APICCommonState *s = APIC_COMMON(dev); +        trace_cpu_get_apic_base((uint64_t)s->apicbase); +        return s->apicbase; +    } else { +        trace_cpu_get_apic_base(MSR_IA32_APICBASE_BSP); +        return MSR_IA32_APICBASE_BSP; +    } +} + +void cpu_set_apic_tpr(DeviceState *dev, uint8_t val) +{ +    APICCommonState *s; +    APICCommonClass *info; + +    if (!dev) { +        return; +    } + +    s = APIC_COMMON(dev); +    info = APIC_COMMON_GET_CLASS(s); + +    info->set_tpr(s, val); +} + +uint8_t cpu_get_apic_tpr(DeviceState *dev) +{ +    APICCommonState *s; +    APICCommonClass *info; + +    if (!dev) { +        return 0; +    } + +    s = APIC_COMMON(dev); +    info = APIC_COMMON_GET_CLASS(s); + +    return info->get_tpr(s); +} + +void apic_enable_tpr_access_reporting(DeviceState *dev, bool enable) +{ +    APICCommonState *s = APIC_COMMON(dev); +    APICCommonClass *info = APIC_COMMON_GET_CLASS(s); + +    apic_report_tpr_access = enable; +    if (info->enable_tpr_reporting) { +        info->enable_tpr_reporting(s, enable); +    } +} + +void apic_enable_vapic(DeviceState *dev, hwaddr paddr) +{ +    APICCommonState *s = APIC_COMMON(dev); +    APICCommonClass *info = APIC_COMMON_GET_CLASS(s); + +    s->vapic_paddr = paddr; +    info->vapic_base_update(s); +} + +void apic_handle_tpr_access_report(DeviceState *dev, target_ulong ip, +                                   TPRAccess access) +{ +    APICCommonState *s = APIC_COMMON(dev); + +    vapic_report_tpr_access(s->vapic, CPU(s->cpu), ip, access); +} + +void apic_report_irq_delivered(int delivered) +{ +    apic_irq_delivered += delivered; + +    trace_apic_report_irq_delivered(apic_irq_delivered); +} + +void apic_reset_irq_delivered(void) +{ +    /* Copy this into a local variable to encourage gcc to emit a plain +     * register for a sys/sdt.h marker.  For details on this workaround, see: +     * https://sourceware.org/bugzilla/show_bug.cgi?id=13296 +     */ +    volatile int a_i_d = apic_irq_delivered; +    trace_apic_reset_irq_delivered(a_i_d); + +    apic_irq_delivered = 0; +} + +int apic_get_irq_delivered(void) +{ +    trace_apic_get_irq_delivered(apic_irq_delivered); + +    return apic_irq_delivered; +} + +void apic_deliver_nmi(DeviceState *dev) +{ +    APICCommonState *s = APIC_COMMON(dev); +    APICCommonClass *info = APIC_COMMON_GET_CLASS(s); + +    info->external_nmi(s); +} + +bool apic_next_timer(APICCommonState *s, int64_t current_time) +{ +    int64_t d; + +    /* We need to store the timer state separately to support APIC +     * implementations that maintain a non-QEMU timer, e.g. inside the +     * host kernel. This open-coded state allows us to migrate between +     * both models. */ +    s->timer_expiry = -1; + +    if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_MASKED) { +        return false; +    } + +    d = (current_time - s->initial_count_load_time) >> s->count_shift; + +    if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) { +        if (!s->initial_count) { +            return false; +        } +        d = ((d / ((uint64_t)s->initial_count + 1)) + 1) * +            ((uint64_t)s->initial_count + 1); +    } else { +        if (d >= s->initial_count) { +            return false; +        } +        d = (uint64_t)s->initial_count + 1; +    } +    s->next_time = s->initial_count_load_time + (d << s->count_shift); +    s->timer_expiry = s->next_time; +    return true; +} + +void apic_init_reset(DeviceState *dev) +{ +    APICCommonState *s; +    APICCommonClass *info; +    int i; + +    if (!dev) { +        return; +    } +    s = APIC_COMMON(dev); +    s->tpr = 0; +    s->spurious_vec = 0xff; +    s->log_dest = 0; +    s->dest_mode = 0xf; +    memset(s->isr, 0, sizeof(s->isr)); +    memset(s->tmr, 0, sizeof(s->tmr)); +    memset(s->irr, 0, sizeof(s->irr)); +    for (i = 0; i < APIC_LVT_NB; i++) { +        s->lvt[i] = APIC_LVT_MASKED; +    } +    s->esr = 0; +    memset(s->icr, 0, sizeof(s->icr)); +    s->divide_conf = 0; +    s->count_shift = 0; +    s->initial_count = 0; +    s->initial_count_load_time = 0; +    s->next_time = 0; +    s->wait_for_sipi = !cpu_is_bsp(s->cpu); + +    if (s->timer) { +        timer_del(s->timer); +    } +    s->timer_expiry = -1; + +    info = APIC_COMMON_GET_CLASS(s); +    if (info->reset) { +        info->reset(s); +    } +} + +void apic_designate_bsp(DeviceState *dev, bool bsp) +{ +    if (dev == NULL) { +        return; +    } + +    APICCommonState *s = APIC_COMMON(dev); +    if (bsp) { +        s->apicbase |= MSR_IA32_APICBASE_BSP; +    } else { +        s->apicbase &= ~MSR_IA32_APICBASE_BSP; +    } +} + +static void apic_reset_common(DeviceState *dev) +{ +    APICCommonState *s = APIC_COMMON(dev); +    APICCommonClass *info = APIC_COMMON_GET_CLASS(s); +    uint32_t bsp; + +    bsp = s->apicbase & MSR_IA32_APICBASE_BSP; +    s->apicbase = APIC_DEFAULT_ADDRESS | bsp | MSR_IA32_APICBASE_ENABLE; + +    s->vapic_paddr = 0; +    info->vapic_base_update(s); + +    apic_init_reset(dev); +} + +/* This function is only used for old state version 1 and 2 */ +static int apic_load_old(QEMUFile *f, void *opaque, int version_id) +{ +    APICCommonState *s = opaque; +    APICCommonClass *info = APIC_COMMON_GET_CLASS(s); +    int i; + +    if (version_id > 2) { +        return -EINVAL; +    } + +    /* XXX: what if the base changes? (registered memory regions) */ +    qemu_get_be32s(f, &s->apicbase); +    qemu_get_8s(f, &s->id); +    qemu_get_8s(f, &s->arb_id); +    qemu_get_8s(f, &s->tpr); +    qemu_get_be32s(f, &s->spurious_vec); +    qemu_get_8s(f, &s->log_dest); +    qemu_get_8s(f, &s->dest_mode); +    for (i = 0; i < 8; i++) { +        qemu_get_be32s(f, &s->isr[i]); +        qemu_get_be32s(f, &s->tmr[i]); +        qemu_get_be32s(f, &s->irr[i]); +    } +    for (i = 0; i < APIC_LVT_NB; i++) { +        qemu_get_be32s(f, &s->lvt[i]); +    } +    qemu_get_be32s(f, &s->esr); +    qemu_get_be32s(f, &s->icr[0]); +    qemu_get_be32s(f, &s->icr[1]); +    qemu_get_be32s(f, &s->divide_conf); +    s->count_shift = qemu_get_be32(f); +    qemu_get_be32s(f, &s->initial_count); +    s->initial_count_load_time = qemu_get_be64(f); +    s->next_time = qemu_get_be64(f); + +    if (version_id >= 2) { +        s->timer_expiry = qemu_get_be64(f); +    } + +    if (info->post_load) { +        info->post_load(s); +    } +    return 0; +} + +static void apic_common_realize(DeviceState *dev, Error **errp) +{ +    APICCommonState *s = APIC_COMMON(dev); +    APICCommonClass *info; +    static DeviceState *vapic; +    static int apic_no; +    static bool mmio_registered; + +    if (apic_no >= MAX_APICS) { +        error_setg(errp, "%s initialization failed.", +                   object_get_typename(OBJECT(dev))); +        return; +    } +    s->idx = apic_no++; + +    info = APIC_COMMON_GET_CLASS(s); +    info->realize(dev, errp); +    if (!mmio_registered) { +        ICCBus *b = ICC_BUS(qdev_get_parent_bus(dev)); +        memory_region_add_subregion(b->apic_address_space, 0, &s->io_memory); +        mmio_registered = true; +    } + +    /* Note: We need at least 1M to map the VAPIC option ROM */ +    if (!vapic && s->vapic_control & VAPIC_ENABLE_MASK && +        ram_size >= 1024 * 1024) { +        vapic = sysbus_create_simple("kvmvapic", -1, NULL); +    } +    s->vapic = vapic; +    if (apic_report_tpr_access && info->enable_tpr_reporting) { +        info->enable_tpr_reporting(s, true); +    } + +} + +static int apic_pre_load(void *opaque) +{ +    APICCommonState *s = APIC_COMMON(opaque); + +    /* The default is !cpu_is_bsp(s->cpu), but the common value is 0 +     * so that's what apic_common_sipi_needed checks for.  Reset to +     * the value that is assumed when the apic_sipi subsection is +     * absent. +     */ +    s->wait_for_sipi = 0; +    return 0; +} + +static void apic_dispatch_pre_save(void *opaque) +{ +    APICCommonState *s = APIC_COMMON(opaque); +    APICCommonClass *info = APIC_COMMON_GET_CLASS(s); + +    if (info->pre_save) { +        info->pre_save(s); +    } +} + +static int apic_dispatch_post_load(void *opaque, int version_id) +{ +    APICCommonState *s = APIC_COMMON(opaque); +    APICCommonClass *info = APIC_COMMON_GET_CLASS(s); + +    if (info->post_load) { +        info->post_load(s); +    } +    return 0; +} + +static bool apic_common_sipi_needed(void *opaque) +{ +    APICCommonState *s = APIC_COMMON(opaque); +    return s->wait_for_sipi != 0; +} + +static const VMStateDescription vmstate_apic_common_sipi = { +    .name = "apic_sipi", +    .version_id = 1, +    .minimum_version_id = 1, +    .needed = apic_common_sipi_needed, +    .fields = (VMStateField[]) { +        VMSTATE_INT32(sipi_vector, APICCommonState), +        VMSTATE_INT32(wait_for_sipi, APICCommonState), +        VMSTATE_END_OF_LIST() +    } +}; + +static const VMStateDescription vmstate_apic_common = { +    .name = "apic", +    .version_id = 3, +    .minimum_version_id = 3, +    .minimum_version_id_old = 1, +    .load_state_old = apic_load_old, +    .pre_load = apic_pre_load, +    .pre_save = apic_dispatch_pre_save, +    .post_load = apic_dispatch_post_load, +    .fields = (VMStateField[]) { +        VMSTATE_UINT32(apicbase, APICCommonState), +        VMSTATE_UINT8(id, APICCommonState), +        VMSTATE_UINT8(arb_id, APICCommonState), +        VMSTATE_UINT8(tpr, APICCommonState), +        VMSTATE_UINT32(spurious_vec, APICCommonState), +        VMSTATE_UINT8(log_dest, APICCommonState), +        VMSTATE_UINT8(dest_mode, APICCommonState), +        VMSTATE_UINT32_ARRAY(isr, APICCommonState, 8), +        VMSTATE_UINT32_ARRAY(tmr, APICCommonState, 8), +        VMSTATE_UINT32_ARRAY(irr, APICCommonState, 8), +        VMSTATE_UINT32_ARRAY(lvt, APICCommonState, APIC_LVT_NB), +        VMSTATE_UINT32(esr, APICCommonState), +        VMSTATE_UINT32_ARRAY(icr, APICCommonState, 2), +        VMSTATE_UINT32(divide_conf, APICCommonState), +        VMSTATE_INT32(count_shift, APICCommonState), +        VMSTATE_UINT32(initial_count, APICCommonState), +        VMSTATE_INT64(initial_count_load_time, APICCommonState), +        VMSTATE_INT64(next_time, APICCommonState), +        VMSTATE_INT64(timer_expiry, +                      APICCommonState), /* open-coded timer state */ +        VMSTATE_END_OF_LIST() +    }, +    .subsections = (const VMStateDescription*[]) { +        &vmstate_apic_common_sipi, +        NULL +    } +}; + +static Property apic_properties_common[] = { +    DEFINE_PROP_UINT8("id", APICCommonState, id, -1), +    DEFINE_PROP_UINT8("version", APICCommonState, version, 0x14), +    DEFINE_PROP_BIT("vapic", APICCommonState, vapic_control, VAPIC_ENABLE_BIT, +                    true), +    DEFINE_PROP_END_OF_LIST(), +}; + +static void apic_common_class_init(ObjectClass *klass, void *data) +{ +    ICCDeviceClass *idc = ICC_DEVICE_CLASS(klass); +    DeviceClass *dc = DEVICE_CLASS(klass); + +    dc->vmsd = &vmstate_apic_common; +    dc->reset = apic_reset_common; +    dc->props = apic_properties_common; +    idc->realize = apic_common_realize; +    /* +     * Reason: APIC and CPU need to be wired up by +     * x86_cpu_apic_create() +     */ +    dc->cannot_instantiate_with_device_add_yet = true; +} + +static const TypeInfo apic_common_type = { +    .name = TYPE_APIC_COMMON, +    .parent = TYPE_ICC_DEVICE, +    .instance_size = sizeof(APICCommonState), +    .class_size = sizeof(APICCommonClass), +    .class_init = apic_common_class_init, +    .abstract = true, +}; + +static void apic_common_register_types(void) +{ +    type_register_static(&apic_common_type); +} + +type_init(apic_common_register_types) diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c new file mode 100644 index 00000000..454bfd7d --- /dev/null +++ b/hw/intc/arm_gic.c @@ -0,0 +1,1160 @@ +/* + * ARM Generic/Distributed Interrupt Controller + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +/* This file contains implementation code for the RealView EB interrupt + * controller, MPCore distributed interrupt controller and ARMv7-M + * Nested Vectored Interrupt Controller. + * It is compiled in two ways: + *  (1) as a standalone file to produce a sysbus device which is a GIC + *  that can be used on the realview board and as one of the builtin + *  private peripherals for the ARM MP CPUs (11MPCore, A9, etc) + *  (2) by being directly #included into armv7m_nvic.c to produce the + *  armv7m_nvic device. + */ + +#include "hw/sysbus.h" +#include "gic_internal.h" +#include "qom/cpu.h" + +//#define DEBUG_GIC + +#ifdef DEBUG_GIC +#define DPRINTF(fmt, ...) \ +do { fprintf(stderr, "arm_gic: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do {} while(0) +#endif + +static const uint8_t gic_id[] = { +    0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 +}; + +#define NUM_CPU(s) ((s)->num_cpu) + +static inline int gic_get_current_cpu(GICState *s) +{ +    if (s->num_cpu > 1) { +        return current_cpu->cpu_index; +    } +    return 0; +} + +/* Return true if this GIC config has interrupt groups, which is + * true if we're a GICv2, or a GICv1 with the security extensions. + */ +static inline bool gic_has_groups(GICState *s) +{ +    return s->revision == 2 || s->security_extn; +} + +/* TODO: Many places that call this routine could be optimized.  */ +/* Update interrupt status after enabled or pending bits have been changed.  */ +void gic_update(GICState *s) +{ +    int best_irq; +    int best_prio; +    int irq; +    int irq_level, fiq_level; +    int cpu; +    int cm; + +    for (cpu = 0; cpu < NUM_CPU(s); cpu++) { +        cm = 1 << cpu; +        s->current_pending[cpu] = 1023; +        if (!(s->ctlr & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1)) +            || !(s->cpu_ctlr[cpu] & (GICC_CTLR_EN_GRP0 | GICC_CTLR_EN_GRP1))) { +            qemu_irq_lower(s->parent_irq[cpu]); +            qemu_irq_lower(s->parent_fiq[cpu]); +            continue; +        } +        best_prio = 0x100; +        best_irq = 1023; +        for (irq = 0; irq < s->num_irq; irq++) { +            if (GIC_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) && +                (irq < GIC_INTERNAL || GIC_TARGET(irq) & cm)) { +                if (GIC_GET_PRIORITY(irq, cpu) < best_prio) { +                    best_prio = GIC_GET_PRIORITY(irq, cpu); +                    best_irq = irq; +                } +            } +        } + +        irq_level = fiq_level = 0; + +        if (best_prio < s->priority_mask[cpu]) { +            s->current_pending[cpu] = best_irq; +            if (best_prio < s->running_priority[cpu]) { +                int group = GIC_TEST_GROUP(best_irq, cm); + +                if (extract32(s->ctlr, group, 1) && +                    extract32(s->cpu_ctlr[cpu], group, 1)) { +                    if (group == 0 && s->cpu_ctlr[cpu] & GICC_CTLR_FIQ_EN) { +                        DPRINTF("Raised pending FIQ %d (cpu %d)\n", +                                best_irq, cpu); +                        fiq_level = 1; +                    } else { +                        DPRINTF("Raised pending IRQ %d (cpu %d)\n", +                                best_irq, cpu); +                        irq_level = 1; +                    } +                } +            } +        } + +        qemu_set_irq(s->parent_irq[cpu], irq_level); +        qemu_set_irq(s->parent_fiq[cpu], fiq_level); +    } +} + +void gic_set_pending_private(GICState *s, int cpu, int irq) +{ +    int cm = 1 << cpu; + +    if (gic_test_pending(s, irq, cm)) { +        return; +    } + +    DPRINTF("Set %d pending cpu %d\n", irq, cpu); +    GIC_SET_PENDING(irq, cm); +    gic_update(s); +} + +static void gic_set_irq_11mpcore(GICState *s, int irq, int level, +                                 int cm, int target) +{ +    if (level) { +        GIC_SET_LEVEL(irq, cm); +        if (GIC_TEST_EDGE_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) { +            DPRINTF("Set %d pending mask %x\n", irq, target); +            GIC_SET_PENDING(irq, target); +        } +    } else { +        GIC_CLEAR_LEVEL(irq, cm); +    } +} + +static void gic_set_irq_generic(GICState *s, int irq, int level, +                                int cm, int target) +{ +    if (level) { +        GIC_SET_LEVEL(irq, cm); +        DPRINTF("Set %d pending mask %x\n", irq, target); +        if (GIC_TEST_EDGE_TRIGGER(irq)) { +            GIC_SET_PENDING(irq, target); +        } +    } else { +        GIC_CLEAR_LEVEL(irq, cm); +    } +} + +/* Process a change in an external IRQ input.  */ +static void gic_set_irq(void *opaque, int irq, int level) +{ +    /* Meaning of the 'irq' parameter: +     *  [0..N-1] : external interrupts +     *  [N..N+31] : PPI (internal) interrupts for CPU 0 +     *  [N+32..N+63] : PPI (internal interrupts for CPU 1 +     *  ... +     */ +    GICState *s = (GICState *)opaque; +    int cm, target; +    if (irq < (s->num_irq - GIC_INTERNAL)) { +        /* The first external input line is internal interrupt 32.  */ +        cm = ALL_CPU_MASK; +        irq += GIC_INTERNAL; +        target = GIC_TARGET(irq); +    } else { +        int cpu; +        irq -= (s->num_irq - GIC_INTERNAL); +        cpu = irq / GIC_INTERNAL; +        irq %= GIC_INTERNAL; +        cm = 1 << cpu; +        target = cm; +    } + +    assert(irq >= GIC_NR_SGIS); + +    if (level == GIC_TEST_LEVEL(irq, cm)) { +        return; +    } + +    if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { +        gic_set_irq_11mpcore(s, irq, level, cm, target); +    } else { +        gic_set_irq_generic(s, irq, level, cm, target); +    } + +    gic_update(s); +} + +static uint16_t gic_get_current_pending_irq(GICState *s, int cpu, +                                            MemTxAttrs attrs) +{ +    uint16_t pending_irq = s->current_pending[cpu]; + +    if (pending_irq < GIC_MAXIRQ && gic_has_groups(s)) { +        int group = GIC_TEST_GROUP(pending_irq, (1 << cpu)); +        /* On a GIC without the security extensions, reading this register +         * behaves in the same way as a secure access to a GIC with them. +         */ +        bool secure = !s->security_extn || attrs.secure; + +        if (group == 0 && !secure) { +            /* Group0 interrupts hidden from Non-secure access */ +            return 1023; +        } +        if (group == 1 && secure && !(s->cpu_ctlr[cpu] & GICC_CTLR_ACK_CTL)) { +            /* Group1 interrupts only seen by Secure access if +             * AckCtl bit set. +             */ +            return 1022; +        } +    } +    return pending_irq; +} + +static void gic_set_running_irq(GICState *s, int cpu, int irq) +{ +    s->running_irq[cpu] = irq; +    if (irq == 1023) { +        s->running_priority[cpu] = 0x100; +    } else { +        s->running_priority[cpu] = GIC_GET_PRIORITY(irq, cpu); +    } +    gic_update(s); +} + +uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs) +{ +    int ret, irq, src; +    int cm = 1 << cpu; + +    /* gic_get_current_pending_irq() will return 1022 or 1023 appropriately +     * for the case where this GIC supports grouping and the pending interrupt +     * is in the wrong group. +     */ +    irq = gic_get_current_pending_irq(s, cpu, attrs);; + +    if (irq >= GIC_MAXIRQ) { +        DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq); +        return irq; +    } + +    if (GIC_GET_PRIORITY(irq, cpu) >= s->running_priority[cpu]) { +        DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq); +        return 1023; +    } +    s->last_active[irq][cpu] = s->running_irq[cpu]; + +    if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { +        /* Clear pending flags for both level and edge triggered interrupts. +         * Level triggered IRQs will be reasserted once they become inactive. +         */ +        GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); +        ret = irq; +    } else { +        if (irq < GIC_NR_SGIS) { +            /* Lookup the source CPU for the SGI and clear this in the +             * sgi_pending map.  Return the src and clear the overall pending +             * state on this CPU if the SGI is not pending from any CPUs. +             */ +            assert(s->sgi_pending[irq][cpu] != 0); +            src = ctz32(s->sgi_pending[irq][cpu]); +            s->sgi_pending[irq][cpu] &= ~(1 << src); +            if (s->sgi_pending[irq][cpu] == 0) { +                GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); +            } +            ret = irq | ((src & 0x7) << 10); +        } else { +            /* Clear pending state for both level and edge triggered +             * interrupts. (level triggered interrupts with an active line +             * remain pending, see gic_test_pending) +             */ +            GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); +            ret = irq; +        } +    } + +    gic_set_running_irq(s, cpu, irq); +    DPRINTF("ACK %d\n", irq); +    return ret; +} + +void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val, +                      MemTxAttrs attrs) +{ +    if (s->security_extn && !attrs.secure) { +        if (!GIC_TEST_GROUP(irq, (1 << cpu))) { +            return; /* Ignore Non-secure access of Group0 IRQ */ +        } +        val = 0x80 | (val >> 1); /* Non-secure view */ +    } + +    if (irq < GIC_INTERNAL) { +        s->priority1[irq][cpu] = val; +    } else { +        s->priority2[(irq) - GIC_INTERNAL] = val; +    } +} + +static uint32_t gic_get_priority(GICState *s, int cpu, int irq, +                                 MemTxAttrs attrs) +{ +    uint32_t prio = GIC_GET_PRIORITY(irq, cpu); + +    if (s->security_extn && !attrs.secure) { +        if (!GIC_TEST_GROUP(irq, (1 << cpu))) { +            return 0; /* Non-secure access cannot read priority of Group0 IRQ */ +        } +        prio = (prio << 1) & 0xff; /* Non-secure view */ +    } +    return prio; +} + +static void gic_set_priority_mask(GICState *s, int cpu, uint8_t pmask, +                                  MemTxAttrs attrs) +{ +    if (s->security_extn && !attrs.secure) { +        if (s->priority_mask[cpu] & 0x80) { +            /* Priority Mask in upper half */ +            pmask = 0x80 | (pmask >> 1); +        } else { +            /* Non-secure write ignored if priority mask is in lower half */ +            return; +        } +    } +    s->priority_mask[cpu] = pmask; +} + +static uint32_t gic_get_priority_mask(GICState *s, int cpu, MemTxAttrs attrs) +{ +    uint32_t pmask = s->priority_mask[cpu]; + +    if (s->security_extn && !attrs.secure) { +        if (pmask & 0x80) { +            /* Priority Mask in upper half, return Non-secure view */ +            pmask = (pmask << 1) & 0xff; +        } else { +            /* Priority Mask in lower half, RAZ */ +            pmask = 0; +        } +    } +    return pmask; +} + +static uint32_t gic_get_cpu_control(GICState *s, int cpu, MemTxAttrs attrs) +{ +    uint32_t ret = s->cpu_ctlr[cpu]; + +    if (s->security_extn && !attrs.secure) { +        /* Construct the NS banked view of GICC_CTLR from the correct +         * bits of the S banked view. We don't need to move the bypass +         * control bits because we don't implement that (IMPDEF) part +         * of the GIC architecture. +         */ +        ret = (ret & (GICC_CTLR_EN_GRP1 | GICC_CTLR_EOIMODE_NS)) >> 1; +    } +    return ret; +} + +static void gic_set_cpu_control(GICState *s, int cpu, uint32_t value, +                                MemTxAttrs attrs) +{ +    uint32_t mask; + +    if (s->security_extn && !attrs.secure) { +        /* The NS view can only write certain bits in the register; +         * the rest are unchanged +         */ +        mask = GICC_CTLR_EN_GRP1; +        if (s->revision == 2) { +            mask |= GICC_CTLR_EOIMODE_NS; +        } +        s->cpu_ctlr[cpu] &= ~mask; +        s->cpu_ctlr[cpu] |= (value << 1) & mask; +    } else { +        if (s->revision == 2) { +            mask = s->security_extn ? GICC_CTLR_V2_S_MASK : GICC_CTLR_V2_MASK; +        } else { +            mask = s->security_extn ? GICC_CTLR_V1_S_MASK : GICC_CTLR_V1_MASK; +        } +        s->cpu_ctlr[cpu] = value & mask; +    } +    DPRINTF("CPU Interface %d: Group0 Interrupts %sabled, " +            "Group1 Interrupts %sabled\n", cpu, +            (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP0) ? "En" : "Dis", +            (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP1) ? "En" : "Dis"); +} + +static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs) +{ +    if (s->security_extn && !attrs.secure) { +        if (s->running_priority[cpu] & 0x80) { +            /* Running priority in upper half of range: return the Non-secure +             * view of the priority. +             */ +            return s->running_priority[cpu] << 1; +        } else { +            /* Running priority in lower half of range: RAZ */ +            return 0; +        } +    } else { +        return s->running_priority[cpu]; +    } +} + +void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) +{ +    int update = 0; +    int cm = 1 << cpu; +    DPRINTF("EOI %d\n", irq); +    if (irq >= s->num_irq) { +        /* This handles two cases: +         * 1. If software writes the ID of a spurious interrupt [ie 1023] +         * to the GICC_EOIR, the GIC ignores that write. +         * 2. If software writes the number of a non-existent interrupt +         * this must be a subcase of "value written does not match the last +         * valid interrupt value read from the Interrupt Acknowledge +         * register" and so this is UNPREDICTABLE. We choose to ignore it. +         */ +        return; +    } +    if (s->running_irq[cpu] == 1023) +        return; /* No active IRQ.  */ + +    if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { +        /* Mark level triggered interrupts as pending if they are still +           raised.  */ +        if (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm) +            && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) { +            DPRINTF("Set %d pending mask %x\n", irq, cm); +            GIC_SET_PENDING(irq, cm); +            update = 1; +        } +    } + +    if (s->security_extn && !attrs.secure && !GIC_TEST_GROUP(irq, cm)) { +        DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq); +        return; +    } + +    /* Secure EOI with GICC_CTLR.AckCtl == 0 when the IRQ is a Group 1 +     * interrupt is UNPREDICTABLE. We choose to handle it as if AckCtl == 1, +     * i.e. go ahead and complete the irq anyway. +     */ + +    if (irq != s->running_irq[cpu]) { +        /* Complete an IRQ that is not currently running.  */ +        int tmp = s->running_irq[cpu]; +        while (s->last_active[tmp][cpu] != 1023) { +            if (s->last_active[tmp][cpu] == irq) { +                s->last_active[tmp][cpu] = s->last_active[irq][cpu]; +                break; +            } +            tmp = s->last_active[tmp][cpu]; +        } +        if (update) { +            gic_update(s); +        } +    } else { +        /* Complete the current running IRQ.  */ +        gic_set_running_irq(s, cpu, s->last_active[s->running_irq[cpu]][cpu]); +    } +} + +static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) +{ +    GICState *s = (GICState *)opaque; +    uint32_t res; +    int irq; +    int i; +    int cpu; +    int cm; +    int mask; + +    cpu = gic_get_current_cpu(s); +    cm = 1 << cpu; +    if (offset < 0x100) { +        if (offset == 0) {      /* GICD_CTLR */ +            if (s->security_extn && !attrs.secure) { +                /* The NS bank of this register is just an alias of the +                 * EnableGrp1 bit in the S bank version. +                 */ +                return extract32(s->ctlr, 1, 1); +            } else { +                return s->ctlr; +            } +        } +        if (offset == 4) +            /* Interrupt Controller Type Register */ +            return ((s->num_irq / 32) - 1) +                    | ((NUM_CPU(s) - 1) << 5) +                    | (s->security_extn << 10); +        if (offset < 0x08) +            return 0; +        if (offset >= 0x80) { +            /* Interrupt Group Registers: these RAZ/WI if this is an NS +             * access to a GIC with the security extensions, or if the GIC +             * doesn't have groups at all. +             */ +            res = 0; +            if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { +                /* Every byte offset holds 8 group status bits */ +                irq = (offset - 0x080) * 8 + GIC_BASE_IRQ; +                if (irq >= s->num_irq) { +                    goto bad_reg; +                } +                for (i = 0; i < 8; i++) { +                    if (GIC_TEST_GROUP(irq + i, cm)) { +                        res |= (1 << i); +                    } +                } +            } +            return res; +        } +        goto bad_reg; +    } else if (offset < 0x200) { +        /* Interrupt Set/Clear Enable.  */ +        if (offset < 0x180) +            irq = (offset - 0x100) * 8; +        else +            irq = (offset - 0x180) * 8; +        irq += GIC_BASE_IRQ; +        if (irq >= s->num_irq) +            goto bad_reg; +        res = 0; +        for (i = 0; i < 8; i++) { +            if (GIC_TEST_ENABLED(irq + i, cm)) { +                res |= (1 << i); +            } +        } +    } else if (offset < 0x300) { +        /* Interrupt Set/Clear Pending.  */ +        if (offset < 0x280) +            irq = (offset - 0x200) * 8; +        else +            irq = (offset - 0x280) * 8; +        irq += GIC_BASE_IRQ; +        if (irq >= s->num_irq) +            goto bad_reg; +        res = 0; +        mask = (irq < GIC_INTERNAL) ?  cm : ALL_CPU_MASK; +        for (i = 0; i < 8; i++) { +            if (gic_test_pending(s, irq + i, mask)) { +                res |= (1 << i); +            } +        } +    } else if (offset < 0x400) { +        /* Interrupt Active.  */ +        irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; +        if (irq >= s->num_irq) +            goto bad_reg; +        res = 0; +        mask = (irq < GIC_INTERNAL) ?  cm : ALL_CPU_MASK; +        for (i = 0; i < 8; i++) { +            if (GIC_TEST_ACTIVE(irq + i, mask)) { +                res |= (1 << i); +            } +        } +    } else if (offset < 0x800) { +        /* Interrupt Priority.  */ +        irq = (offset - 0x400) + GIC_BASE_IRQ; +        if (irq >= s->num_irq) +            goto bad_reg; +        res = gic_get_priority(s, cpu, irq, attrs); +    } else if (offset < 0xc00) { +        /* Interrupt CPU Target.  */ +        if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { +            /* For uniprocessor GICs these RAZ/WI */ +            res = 0; +        } else { +            irq = (offset - 0x800) + GIC_BASE_IRQ; +            if (irq >= s->num_irq) { +                goto bad_reg; +            } +            if (irq >= 29 && irq <= 31) { +                res = cm; +            } else { +                res = GIC_TARGET(irq); +            } +        } +    } else if (offset < 0xf00) { +        /* Interrupt Configuration.  */ +        irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; +        if (irq >= s->num_irq) +            goto bad_reg; +        res = 0; +        for (i = 0; i < 4; i++) { +            if (GIC_TEST_MODEL(irq + i)) +                res |= (1 << (i * 2)); +            if (GIC_TEST_EDGE_TRIGGER(irq + i)) +                res |= (2 << (i * 2)); +        } +    } else if (offset < 0xf10) { +        goto bad_reg; +    } else if (offset < 0xf30) { +        if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { +            goto bad_reg; +        } + +        if (offset < 0xf20) { +            /* GICD_CPENDSGIRn */ +            irq = (offset - 0xf10); +        } else { +            irq = (offset - 0xf20); +            /* GICD_SPENDSGIRn */ +        } + +        res = s->sgi_pending[irq][cpu]; +    } else if (offset < 0xfe0) { +        goto bad_reg; +    } else /* offset >= 0xfe0 */ { +        if (offset & 3) { +            res = 0; +        } else { +            res = gic_id[(offset - 0xfe0) >> 2]; +        } +    } +    return res; +bad_reg: +    qemu_log_mask(LOG_GUEST_ERROR, +                  "gic_dist_readb: Bad offset %x\n", (int)offset); +    return 0; +} + +static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data, +                                 unsigned size, MemTxAttrs attrs) +{ +    switch (size) { +    case 1: +        *data = gic_dist_readb(opaque, offset, attrs); +        return MEMTX_OK; +    case 2: +        *data = gic_dist_readb(opaque, offset, attrs); +        *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; +        return MEMTX_OK; +    case 4: +        *data = gic_dist_readb(opaque, offset, attrs); +        *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; +        *data |= gic_dist_readb(opaque, offset + 2, attrs) << 16; +        *data |= gic_dist_readb(opaque, offset + 3, attrs) << 24; +        return MEMTX_OK; +    default: +        return MEMTX_ERROR; +    } +} + +static void gic_dist_writeb(void *opaque, hwaddr offset, +                            uint32_t value, MemTxAttrs attrs) +{ +    GICState *s = (GICState *)opaque; +    int irq; +    int i; +    int cpu; + +    cpu = gic_get_current_cpu(s); +    if (offset < 0x100) { +        if (offset == 0) { +            if (s->security_extn && !attrs.secure) { +                /* NS version is just an alias of the S version's bit 1 */ +                s->ctlr = deposit32(s->ctlr, 1, 1, value); +            } else if (gic_has_groups(s)) { +                s->ctlr = value & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1); +            } else { +                s->ctlr = value & GICD_CTLR_EN_GRP0; +            } +            DPRINTF("Distributor: Group0 %sabled; Group 1 %sabled\n", +                    s->ctlr & GICD_CTLR_EN_GRP0 ? "En" : "Dis", +                    s->ctlr & GICD_CTLR_EN_GRP1 ? "En" : "Dis"); +        } else if (offset < 4) { +            /* ignored.  */ +        } else if (offset >= 0x80) { +            /* Interrupt Group Registers: RAZ/WI for NS access to secure +             * GIC, or for GICs without groups. +             */ +            if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { +                /* Every byte offset holds 8 group status bits */ +                irq = (offset - 0x80) * 8 + GIC_BASE_IRQ; +                if (irq >= s->num_irq) { +                    goto bad_reg; +                } +                for (i = 0; i < 8; i++) { +                    /* Group bits are banked for private interrupts */ +                    int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; +                    if (value & (1 << i)) { +                        /* Group1 (Non-secure) */ +                        GIC_SET_GROUP(irq + i, cm); +                    } else { +                        /* Group0 (Secure) */ +                        GIC_CLEAR_GROUP(irq + i, cm); +                    } +                } +            } +        } else { +            goto bad_reg; +        } +    } else if (offset < 0x180) { +        /* Interrupt Set Enable.  */ +        irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; +        if (irq >= s->num_irq) +            goto bad_reg; +        if (irq < GIC_NR_SGIS) { +            value = 0xff; +        } + +        for (i = 0; i < 8; i++) { +            if (value & (1 << i)) { +                int mask = +                    (irq < GIC_INTERNAL) ? (1 << cpu) : GIC_TARGET(irq + i); +                int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; + +                if (!GIC_TEST_ENABLED(irq + i, cm)) { +                    DPRINTF("Enabled IRQ %d\n", irq + i); +                } +                GIC_SET_ENABLED(irq + i, cm); +                /* If a raised level triggered IRQ enabled then mark +                   is as pending.  */ +                if (GIC_TEST_LEVEL(irq + i, mask) +                        && !GIC_TEST_EDGE_TRIGGER(irq + i)) { +                    DPRINTF("Set %d pending mask %x\n", irq + i, mask); +                    GIC_SET_PENDING(irq + i, mask); +                } +            } +        } +    } else if (offset < 0x200) { +        /* Interrupt Clear Enable.  */ +        irq = (offset - 0x180) * 8 + GIC_BASE_IRQ; +        if (irq >= s->num_irq) +            goto bad_reg; +        if (irq < GIC_NR_SGIS) { +            value = 0; +        } + +        for (i = 0; i < 8; i++) { +            if (value & (1 << i)) { +                int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; + +                if (GIC_TEST_ENABLED(irq + i, cm)) { +                    DPRINTF("Disabled IRQ %d\n", irq + i); +                } +                GIC_CLEAR_ENABLED(irq + i, cm); +            } +        } +    } else if (offset < 0x280) { +        /* Interrupt Set Pending.  */ +        irq = (offset - 0x200) * 8 + GIC_BASE_IRQ; +        if (irq >= s->num_irq) +            goto bad_reg; +        if (irq < GIC_NR_SGIS) { +            value = 0; +        } + +        for (i = 0; i < 8; i++) { +            if (value & (1 << i)) { +                GIC_SET_PENDING(irq + i, GIC_TARGET(irq + i)); +            } +        } +    } else if (offset < 0x300) { +        /* Interrupt Clear Pending.  */ +        irq = (offset - 0x280) * 8 + GIC_BASE_IRQ; +        if (irq >= s->num_irq) +            goto bad_reg; +        if (irq < GIC_NR_SGIS) { +            value = 0; +        } + +        for (i = 0; i < 8; i++) { +            /* ??? This currently clears the pending bit for all CPUs, even +               for per-CPU interrupts.  It's unclear whether this is the +               corect behavior.  */ +            if (value & (1 << i)) { +                GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK); +            } +        } +    } else if (offset < 0x400) { +        /* Interrupt Active.  */ +        goto bad_reg; +    } else if (offset < 0x800) { +        /* Interrupt Priority.  */ +        irq = (offset - 0x400) + GIC_BASE_IRQ; +        if (irq >= s->num_irq) +            goto bad_reg; +        gic_set_priority(s, cpu, irq, value, attrs); +    } else if (offset < 0xc00) { +        /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the +         * annoying exception of the 11MPCore's GIC. +         */ +        if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { +            irq = (offset - 0x800) + GIC_BASE_IRQ; +            if (irq >= s->num_irq) { +                goto bad_reg; +            } +            if (irq < 29) { +                value = 0; +            } else if (irq < GIC_INTERNAL) { +                value = ALL_CPU_MASK; +            } +            s->irq_target[irq] = value & ALL_CPU_MASK; +        } +    } else if (offset < 0xf00) { +        /* Interrupt Configuration.  */ +        irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; +        if (irq >= s->num_irq) +            goto bad_reg; +        if (irq < GIC_NR_SGIS) +            value |= 0xaa; +        for (i = 0; i < 4; i++) { +            if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { +                if (value & (1 << (i * 2))) { +                    GIC_SET_MODEL(irq + i); +                } else { +                    GIC_CLEAR_MODEL(irq + i); +                } +            } +            if (value & (2 << (i * 2))) { +                GIC_SET_EDGE_TRIGGER(irq + i); +            } else { +                GIC_CLEAR_EDGE_TRIGGER(irq + i); +            } +        } +    } else if (offset < 0xf10) { +        /* 0xf00 is only handled for 32-bit writes.  */ +        goto bad_reg; +    } else if (offset < 0xf20) { +        /* GICD_CPENDSGIRn */ +        if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { +            goto bad_reg; +        } +        irq = (offset - 0xf10); + +        s->sgi_pending[irq][cpu] &= ~value; +        if (s->sgi_pending[irq][cpu] == 0) { +            GIC_CLEAR_PENDING(irq, 1 << cpu); +        } +    } else if (offset < 0xf30) { +        /* GICD_SPENDSGIRn */ +        if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { +            goto bad_reg; +        } +        irq = (offset - 0xf20); + +        GIC_SET_PENDING(irq, 1 << cpu); +        s->sgi_pending[irq][cpu] |= value; +    } else { +        goto bad_reg; +    } +    gic_update(s); +    return; +bad_reg: +    qemu_log_mask(LOG_GUEST_ERROR, +                  "gic_dist_writeb: Bad offset %x\n", (int)offset); +} + +static void gic_dist_writew(void *opaque, hwaddr offset, +                            uint32_t value, MemTxAttrs attrs) +{ +    gic_dist_writeb(opaque, offset, value & 0xff, attrs); +    gic_dist_writeb(opaque, offset + 1, value >> 8, attrs); +} + +static void gic_dist_writel(void *opaque, hwaddr offset, +                            uint32_t value, MemTxAttrs attrs) +{ +    GICState *s = (GICState *)opaque; +    if (offset == 0xf00) { +        int cpu; +        int irq; +        int mask; +        int target_cpu; + +        cpu = gic_get_current_cpu(s); +        irq = value & 0x3ff; +        switch ((value >> 24) & 3) { +        case 0: +            mask = (value >> 16) & ALL_CPU_MASK; +            break; +        case 1: +            mask = ALL_CPU_MASK ^ (1 << cpu); +            break; +        case 2: +            mask = 1 << cpu; +            break; +        default: +            DPRINTF("Bad Soft Int target filter\n"); +            mask = ALL_CPU_MASK; +            break; +        } +        GIC_SET_PENDING(irq, mask); +        target_cpu = ctz32(mask); +        while (target_cpu < GIC_NCPU) { +            s->sgi_pending[irq][target_cpu] |= (1 << cpu); +            mask &= ~(1 << target_cpu); +            target_cpu = ctz32(mask); +        } +        gic_update(s); +        return; +    } +    gic_dist_writew(opaque, offset, value & 0xffff, attrs); +    gic_dist_writew(opaque, offset + 2, value >> 16, attrs); +} + +static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data, +                                  unsigned size, MemTxAttrs attrs) +{ +    switch (size) { +    case 1: +        gic_dist_writeb(opaque, offset, data, attrs); +        return MEMTX_OK; +    case 2: +        gic_dist_writew(opaque, offset, data, attrs); +        return MEMTX_OK; +    case 4: +        gic_dist_writel(opaque, offset, data, attrs); +        return MEMTX_OK; +    default: +        return MEMTX_ERROR; +    } +} + +static const MemoryRegionOps gic_dist_ops = { +    .read_with_attrs = gic_dist_read, +    .write_with_attrs = gic_dist_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, +                                uint64_t *data, MemTxAttrs attrs) +{ +    switch (offset) { +    case 0x00: /* Control */ +        *data = gic_get_cpu_control(s, cpu, attrs); +        break; +    case 0x04: /* Priority mask */ +        *data = gic_get_priority_mask(s, cpu, attrs); +        break; +    case 0x08: /* Binary Point */ +        if (s->security_extn && !attrs.secure) { +            /* BPR is banked. Non-secure copy stored in ABPR. */ +            *data = s->abpr[cpu]; +        } else { +            *data = s->bpr[cpu]; +        } +        break; +    case 0x0c: /* Acknowledge */ +        *data = gic_acknowledge_irq(s, cpu, attrs); +        break; +    case 0x14: /* Running Priority */ +        *data = gic_get_running_priority(s, cpu, attrs); +        break; +    case 0x18: /* Highest Pending Interrupt */ +        *data = gic_get_current_pending_irq(s, cpu, attrs); +        break; +    case 0x1c: /* Aliased Binary Point */ +        /* GIC v2, no security: ABPR +         * GIC v1, no security: not implemented (RAZ/WI) +         * With security extensions, secure access: ABPR (alias of NS BPR) +         * With security extensions, nonsecure access: RAZ/WI +         */ +        if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { +            *data = 0; +        } else { +            *data = s->abpr[cpu]; +        } +        break; +    case 0xd0: case 0xd4: case 0xd8: case 0xdc: +        *data = s->apr[(offset - 0xd0) / 4][cpu]; +        break; +    default: +        qemu_log_mask(LOG_GUEST_ERROR, +                      "gic_cpu_read: Bad offset %x\n", (int)offset); +        return MEMTX_ERROR; +    } +    return MEMTX_OK; +} + +static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, +                                 uint32_t value, MemTxAttrs attrs) +{ +    switch (offset) { +    case 0x00: /* Control */ +        gic_set_cpu_control(s, cpu, value, attrs); +        break; +    case 0x04: /* Priority mask */ +        gic_set_priority_mask(s, cpu, value, attrs); +        break; +    case 0x08: /* Binary Point */ +        if (s->security_extn && !attrs.secure) { +            s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); +        } else { +            s->bpr[cpu] = MAX(value & 0x7, GIC_MIN_BPR); +        } +        break; +    case 0x10: /* End Of Interrupt */ +        gic_complete_irq(s, cpu, value & 0x3ff, attrs); +        return MEMTX_OK; +    case 0x1c: /* Aliased Binary Point */ +        if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { +            /* unimplemented, or NS access: RAZ/WI */ +            return MEMTX_OK; +        } else { +            s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); +        } +        break; +    case 0xd0: case 0xd4: case 0xd8: case 0xdc: +        qemu_log_mask(LOG_UNIMP, "Writing APR not implemented\n"); +        break; +    default: +        qemu_log_mask(LOG_GUEST_ERROR, +                      "gic_cpu_write: Bad offset %x\n", (int)offset); +        return MEMTX_ERROR; +    } +    gic_update(s); +    return MEMTX_OK; +} + +/* Wrappers to read/write the GIC CPU interface for the current CPU */ +static MemTxResult gic_thiscpu_read(void *opaque, hwaddr addr, uint64_t *data, +                                    unsigned size, MemTxAttrs attrs) +{ +    GICState *s = (GICState *)opaque; +    return gic_cpu_read(s, gic_get_current_cpu(s), addr, data, attrs); +} + +static MemTxResult gic_thiscpu_write(void *opaque, hwaddr addr, +                                     uint64_t value, unsigned size, +                                     MemTxAttrs attrs) +{ +    GICState *s = (GICState *)opaque; +    return gic_cpu_write(s, gic_get_current_cpu(s), addr, value, attrs); +} + +/* Wrappers to read/write the GIC CPU interface for a specific CPU. + * These just decode the opaque pointer into GICState* + cpu id. + */ +static MemTxResult gic_do_cpu_read(void *opaque, hwaddr addr, uint64_t *data, +                                   unsigned size, MemTxAttrs attrs) +{ +    GICState **backref = (GICState **)opaque; +    GICState *s = *backref; +    int id = (backref - s->backref); +    return gic_cpu_read(s, id, addr, data, attrs); +} + +static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr, +                                    uint64_t value, unsigned size, +                                    MemTxAttrs attrs) +{ +    GICState **backref = (GICState **)opaque; +    GICState *s = *backref; +    int id = (backref - s->backref); +    return gic_cpu_write(s, id, addr, value, attrs); +} + +static const MemoryRegionOps gic_thiscpu_ops = { +    .read_with_attrs = gic_thiscpu_read, +    .write_with_attrs = gic_thiscpu_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const MemoryRegionOps gic_cpu_ops = { +    .read_with_attrs = gic_do_cpu_read, +    .write_with_attrs = gic_do_cpu_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +}; + +void gic_init_irqs_and_distributor(GICState *s) +{ +    SysBusDevice *sbd = SYS_BUS_DEVICE(s); +    int i; + +    i = s->num_irq - GIC_INTERNAL; +    /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU. +     * GPIO array layout is thus: +     *  [0..N-1] SPIs +     *  [N..N+31] PPIs for CPU 0 +     *  [N+32..N+63] PPIs for CPU 1 +     *   ... +     */ +    if (s->revision != REV_NVIC) { +        i += (GIC_INTERNAL * s->num_cpu); +    } +    qdev_init_gpio_in(DEVICE(s), gic_set_irq, i); +    for (i = 0; i < NUM_CPU(s); i++) { +        sysbus_init_irq(sbd, &s->parent_irq[i]); +    } +    for (i = 0; i < NUM_CPU(s); i++) { +        sysbus_init_irq(sbd, &s->parent_fiq[i]); +    } +    memory_region_init_io(&s->iomem, OBJECT(s), &gic_dist_ops, s, +                          "gic_dist", 0x1000); +} + +static void arm_gic_realize(DeviceState *dev, Error **errp) +{ +    /* Device instance realize function for the GIC sysbus device */ +    int i; +    GICState *s = ARM_GIC(dev); +    SysBusDevice *sbd = SYS_BUS_DEVICE(dev); +    ARMGICClass *agc = ARM_GIC_GET_CLASS(s); +    Error *local_err = NULL; + +    agc->parent_realize(dev, &local_err); +    if (local_err) { +        error_propagate(errp, local_err); +        return; +    } + +    gic_init_irqs_and_distributor(s); + +    /* Memory regions for the CPU interfaces (NVIC doesn't have these): +     * a region for "CPU interface for this core", then a region for +     * "CPU interface for core 0", "for core 1", ... +     * NB that the memory region size of 0x100 applies for the 11MPCore +     * and also cores following the GIC v1 spec (ie A9). +     * GIC v2 defines a larger memory region (0x1000) so this will need +     * to be extended when we implement A15. +     */ +    memory_region_init_io(&s->cpuiomem[0], OBJECT(s), &gic_thiscpu_ops, s, +                          "gic_cpu", 0x100); +    for (i = 0; i < NUM_CPU(s); i++) { +        s->backref[i] = s; +        memory_region_init_io(&s->cpuiomem[i+1], OBJECT(s), &gic_cpu_ops, +                              &s->backref[i], "gic_cpu", 0x100); +    } +    /* Distributor */ +    sysbus_init_mmio(sbd, &s->iomem); +    /* cpu interfaces (one for "current cpu" plus one per cpu) */ +    for (i = 0; i <= NUM_CPU(s); i++) { +        sysbus_init_mmio(sbd, &s->cpuiomem[i]); +    } +} + +static void arm_gic_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    ARMGICClass *agc = ARM_GIC_CLASS(klass); + +    agc->parent_realize = dc->realize; +    dc->realize = arm_gic_realize; +} + +static const TypeInfo arm_gic_info = { +    .name = TYPE_ARM_GIC, +    .parent = TYPE_ARM_GIC_COMMON, +    .instance_size = sizeof(GICState), +    .class_init = arm_gic_class_init, +    .class_size = sizeof(ARMGICClass), +}; + +static void arm_gic_register_types(void) +{ +    type_register_static(&arm_gic_info); +} + +type_init(arm_gic_register_types) diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c new file mode 100644 index 00000000..a64d0714 --- /dev/null +++ b/hw/intc/arm_gic_common.c @@ -0,0 +1,204 @@ +/* + * ARM GIC support - common bits of emulated and KVM kernel model + * + * Copyright (c) 2012 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "gic_internal.h" + +static void gic_pre_save(void *opaque) +{ +    GICState *s = (GICState *)opaque; +    ARMGICCommonClass *c = ARM_GIC_COMMON_GET_CLASS(s); + +    if (c->pre_save) { +        c->pre_save(s); +    } +} + +static int gic_post_load(void *opaque, int version_id) +{ +    GICState *s = (GICState *)opaque; +    ARMGICCommonClass *c = ARM_GIC_COMMON_GET_CLASS(s); + +    if (c->post_load) { +        c->post_load(s); +    } +    return 0; +} + +static const VMStateDescription vmstate_gic_irq_state = { +    .name = "arm_gic_irq_state", +    .version_id = 1, +    .minimum_version_id = 1, +    .fields = (VMStateField[]) { +        VMSTATE_UINT8(enabled, gic_irq_state), +        VMSTATE_UINT8(pending, gic_irq_state), +        VMSTATE_UINT8(active, gic_irq_state), +        VMSTATE_UINT8(level, gic_irq_state), +        VMSTATE_BOOL(model, gic_irq_state), +        VMSTATE_BOOL(edge_trigger, gic_irq_state), +        VMSTATE_UINT8(group, gic_irq_state), +        VMSTATE_END_OF_LIST() +    } +}; + +static const VMStateDescription vmstate_gic = { +    .name = "arm_gic", +    .version_id = 10, +    .minimum_version_id = 10, +    .pre_save = gic_pre_save, +    .post_load = gic_post_load, +    .fields = (VMStateField[]) { +        VMSTATE_UINT32(ctlr, GICState), +        VMSTATE_UINT32_ARRAY(cpu_ctlr, GICState, GIC_NCPU), +        VMSTATE_STRUCT_ARRAY(irq_state, GICState, GIC_MAXIRQ, 1, +                             vmstate_gic_irq_state, gic_irq_state), +        VMSTATE_UINT8_ARRAY(irq_target, GICState, GIC_MAXIRQ), +        VMSTATE_UINT8_2DARRAY(priority1, GICState, GIC_INTERNAL, GIC_NCPU), +        VMSTATE_UINT8_ARRAY(priority2, GICState, GIC_MAXIRQ - GIC_INTERNAL), +        VMSTATE_UINT16_2DARRAY(last_active, GICState, GIC_MAXIRQ, GIC_NCPU), +        VMSTATE_UINT8_2DARRAY(sgi_pending, GICState, GIC_NR_SGIS, GIC_NCPU), +        VMSTATE_UINT16_ARRAY(priority_mask, GICState, GIC_NCPU), +        VMSTATE_UINT16_ARRAY(running_irq, GICState, GIC_NCPU), +        VMSTATE_UINT16_ARRAY(running_priority, GICState, GIC_NCPU), +        VMSTATE_UINT16_ARRAY(current_pending, GICState, GIC_NCPU), +        VMSTATE_UINT8_ARRAY(bpr, GICState, GIC_NCPU), +        VMSTATE_UINT8_ARRAY(abpr, GICState, GIC_NCPU), +        VMSTATE_UINT32_2DARRAY(apr, GICState, GIC_NR_APRS, GIC_NCPU), +        VMSTATE_END_OF_LIST() +    } +}; + +static void arm_gic_common_realize(DeviceState *dev, Error **errp) +{ +    GICState *s = ARM_GIC_COMMON(dev); +    int num_irq = s->num_irq; + +    if (s->num_cpu > GIC_NCPU) { +        error_setg(errp, "requested %u CPUs exceeds GIC maximum %d", +                   s->num_cpu, GIC_NCPU); +        return; +    } +    s->num_irq += GIC_BASE_IRQ; +    if (s->num_irq > GIC_MAXIRQ) { +        error_setg(errp, +                   "requested %u interrupt lines exceeds GIC maximum %d", +                   num_irq, GIC_MAXIRQ); +        return; +    } +    /* ITLinesNumber is represented as (N / 32) - 1 (see +     * gic_dist_readb) so this is an implementation imposed +     * restriction, not an architectural one: +     */ +    if (s->num_irq < 32 || (s->num_irq % 32)) { +        error_setg(errp, +                   "%d interrupt lines unsupported: not divisible by 32", +                   num_irq); +        return; +    } + +    if (s->security_extn && +        (s->revision == REV_11MPCORE || s->revision == REV_NVIC)) { +        error_setg(errp, "this GIC revision does not implement " +                   "the security extensions"); +        return; +    } +} + +static void arm_gic_common_reset(DeviceState *dev) +{ +    GICState *s = ARM_GIC_COMMON(dev); +    int i, j; +    memset(s->irq_state, 0, GIC_MAXIRQ * sizeof(gic_irq_state)); +    for (i = 0 ; i < s->num_cpu; i++) { +        if (s->revision == REV_11MPCORE) { +            s->priority_mask[i] = 0xf0; +        } else { +            s->priority_mask[i] = 0; +        } +        s->current_pending[i] = 1023; +        s->running_irq[i] = 1023; +        s->running_priority[i] = 0x100; +        s->cpu_ctlr[i] = 0; +        s->bpr[i] = GIC_MIN_BPR; +        s->abpr[i] = GIC_MIN_ABPR; +        for (j = 0; j < GIC_INTERNAL; j++) { +            s->priority1[j][i] = 0; +        } +        for (j = 0; j < GIC_NR_SGIS; j++) { +            s->sgi_pending[j][i] = 0; +        } +    } +    for (i = 0; i < GIC_NR_SGIS; i++) { +        GIC_SET_ENABLED(i, ALL_CPU_MASK); +        GIC_SET_EDGE_TRIGGER(i); +    } + +    for (i = 0; i < ARRAY_SIZE(s->priority2); i++) { +        s->priority2[i] = 0; +    } + +    for (i = 0; i < GIC_MAXIRQ; i++) { +        /* For uniprocessor GICs all interrupts always target the sole CPU */ +        if (s->num_cpu == 1) { +            s->irq_target[i] = 1; +        } else { +            s->irq_target[i] = 0; +        } +    } +    s->ctlr = 0; +} + +static Property arm_gic_common_properties[] = { +    DEFINE_PROP_UINT32("num-cpu", GICState, num_cpu, 1), +    DEFINE_PROP_UINT32("num-irq", GICState, num_irq, 32), +    /* Revision can be 1 or 2 for GIC architecture specification +     * versions 1 or 2, or 0 to indicate the legacy 11MPCore GIC. +     * (Internally, 0xffffffff also indicates "not a GIC but an NVIC".) +     */ +    DEFINE_PROP_UINT32("revision", GICState, revision, 1), +    /* True if the GIC should implement the security extensions */ +    DEFINE_PROP_BOOL("has-security-extensions", GICState, security_extn, 0), +    DEFINE_PROP_END_OF_LIST(), +}; + +static void arm_gic_common_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); + +    dc->reset = arm_gic_common_reset; +    dc->realize = arm_gic_common_realize; +    dc->props = arm_gic_common_properties; +    dc->vmsd = &vmstate_gic; +} + +static const TypeInfo arm_gic_common_type = { +    .name = TYPE_ARM_GIC_COMMON, +    .parent = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(GICState), +    .class_size = sizeof(ARMGICCommonClass), +    .class_init = arm_gic_common_class_init, +    .abstract = true, +}; + +static void register_types(void) +{ +    type_register_static(&arm_gic_common_type); +} + +type_init(register_types) diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c new file mode 100644 index 00000000..f56bff1a --- /dev/null +++ b/hw/intc/arm_gic_kvm.c @@ -0,0 +1,663 @@ +/* + * ARM Generic Interrupt Controller using KVM in-kernel support + * + * Copyright (c) 2012 Linaro Limited + * Written by Peter Maydell + * Save/Restore logic added by Christoffer Dall. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "hw/sysbus.h" +#include "sysemu/kvm.h" +#include "kvm_arm.h" +#include "gic_internal.h" + +//#define DEBUG_GIC_KVM + +#ifdef DEBUG_GIC_KVM +static const int debug_gic_kvm = 1; +#else +static const int debug_gic_kvm = 0; +#endif + +#define DPRINTF(fmt, ...) do { \ +        if (debug_gic_kvm) { \ +            printf("arm_gic: " fmt , ## __VA_ARGS__); \ +        } \ +    } while (0) + +#define TYPE_KVM_ARM_GIC "kvm-arm-gic" +#define KVM_ARM_GIC(obj) \ +     OBJECT_CHECK(GICState, (obj), TYPE_KVM_ARM_GIC) +#define KVM_ARM_GIC_CLASS(klass) \ +     OBJECT_CLASS_CHECK(KVMARMGICClass, (klass), TYPE_KVM_ARM_GIC) +#define KVM_ARM_GIC_GET_CLASS(obj) \ +     OBJECT_GET_CLASS(KVMARMGICClass, (obj), TYPE_KVM_ARM_GIC) + +typedef struct KVMARMGICClass { +    ARMGICCommonClass parent_class; +    DeviceRealize parent_realize; +    void (*parent_reset)(DeviceState *dev); +} KVMARMGICClass; + +static void kvm_arm_gic_set_irq(void *opaque, int irq, int level) +{ +    /* Meaning of the 'irq' parameter: +     *  [0..N-1] : external interrupts +     *  [N..N+31] : PPI (internal) interrupts for CPU 0 +     *  [N+32..N+63] : PPI (internal interrupts for CPU 1 +     *  ... +     * Convert this to the kernel's desired encoding, which +     * has separate fields in the irq number for type, +     * CPU number and interrupt number. +     */ +    GICState *s = (GICState *)opaque; +    int kvm_irq, irqtype, cpu; + +    if (irq < (s->num_irq - GIC_INTERNAL)) { +        /* External interrupt. The kernel numbers these like the GIC +         * hardware, with external interrupt IDs starting after the +         * internal ones. +         */ +        irqtype = KVM_ARM_IRQ_TYPE_SPI; +        cpu = 0; +        irq += GIC_INTERNAL; +    } else { +        /* Internal interrupt: decode into (cpu, interrupt id) */ +        irqtype = KVM_ARM_IRQ_TYPE_PPI; +        irq -= (s->num_irq - GIC_INTERNAL); +        cpu = irq / GIC_INTERNAL; +        irq %= GIC_INTERNAL; +    } +    kvm_irq = (irqtype << KVM_ARM_IRQ_TYPE_SHIFT) +        | (cpu << KVM_ARM_IRQ_VCPU_SHIFT) | irq; + +    kvm_set_irq(kvm_state, kvm_irq, !!level); +} + +static bool kvm_arm_gic_can_save_restore(GICState *s) +{ +    return s->dev_fd >= 0; +} + +static bool kvm_gic_supports_attr(GICState *s, int group, int attrnum) +{ +    struct kvm_device_attr attr = { +        .group = group, +        .attr = attrnum, +        .flags = 0, +    }; + +    if (s->dev_fd == -1) { +        return false; +    } + +    return kvm_device_ioctl(s->dev_fd, KVM_HAS_DEVICE_ATTR, &attr) == 0; +} + +static void kvm_gic_access(GICState *s, int group, int offset, +                                   int cpu, uint32_t *val, bool write) +{ +    struct kvm_device_attr attr; +    int type; +    int err; + +    cpu = cpu & 0xff; + +    attr.flags = 0; +    attr.group = group; +    attr.attr = (((uint64_t)cpu << KVM_DEV_ARM_VGIC_CPUID_SHIFT) & +                 KVM_DEV_ARM_VGIC_CPUID_MASK) | +                (((uint64_t)offset << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) & +                 KVM_DEV_ARM_VGIC_OFFSET_MASK); +    attr.addr = (uintptr_t)val; + +    if (write) { +        type = KVM_SET_DEVICE_ATTR; +    } else { +        type = KVM_GET_DEVICE_ATTR; +    } + +    err = kvm_device_ioctl(s->dev_fd, type, &attr); +    if (err < 0) { +        fprintf(stderr, "KVM_{SET/GET}_DEVICE_ATTR failed: %s\n", +                strerror(-err)); +        abort(); +    } +} + +static void kvm_gicd_access(GICState *s, int offset, int cpu, +                            uint32_t *val, bool write) +{ +    kvm_gic_access(s, KVM_DEV_ARM_VGIC_GRP_DIST_REGS, +                   offset, cpu, val, write); +} + +static void kvm_gicc_access(GICState *s, int offset, int cpu, +                            uint32_t *val, bool write) +{ +    kvm_gic_access(s, KVM_DEV_ARM_VGIC_GRP_CPU_REGS, +                   offset, cpu, val, write); +} + +#define for_each_irq_reg(_ctr, _max_irq, _field_width) \ +    for (_ctr = 0; _ctr < ((_max_irq) / (32 / (_field_width))); _ctr++) + +/* + * Translate from the in-kernel field for an IRQ value to/from the qemu + * representation. + */ +typedef void (*vgic_translate_fn)(GICState *s, int irq, int cpu, +                                  uint32_t *field, bool to_kernel); + +/* synthetic translate function used for clear/set registers to completely + * clear a setting using a clear-register before setting the remaining bits + * using a set-register */ +static void translate_clear(GICState *s, int irq, int cpu, +                            uint32_t *field, bool to_kernel) +{ +    if (to_kernel) { +        *field = ~0; +    } else { +        /* does not make sense: qemu model doesn't use set/clear regs */ +        abort(); +    } +} + +static void translate_group(GICState *s, int irq, int cpu, +                            uint32_t *field, bool to_kernel) +{ +    int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; + +    if (to_kernel) { +        *field = GIC_TEST_GROUP(irq, cm); +    } else { +        if (*field & 1) { +            GIC_SET_GROUP(irq, cm); +        } +    } +} + +static void translate_enabled(GICState *s, int irq, int cpu, +                              uint32_t *field, bool to_kernel) +{ +    int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; + +    if (to_kernel) { +        *field = GIC_TEST_ENABLED(irq, cm); +    } else { +        if (*field & 1) { +            GIC_SET_ENABLED(irq, cm); +        } +    } +} + +static void translate_pending(GICState *s, int irq, int cpu, +                              uint32_t *field, bool to_kernel) +{ +    int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; + +    if (to_kernel) { +        *field = gic_test_pending(s, irq, cm); +    } else { +        if (*field & 1) { +            GIC_SET_PENDING(irq, cm); +            /* TODO: Capture is level-line is held high in the kernel */ +        } +    } +} + +static void translate_active(GICState *s, int irq, int cpu, +                             uint32_t *field, bool to_kernel) +{ +    int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; + +    if (to_kernel) { +        *field = GIC_TEST_ACTIVE(irq, cm); +    } else { +        if (*field & 1) { +            GIC_SET_ACTIVE(irq, cm); +        } +    } +} + +static void translate_trigger(GICState *s, int irq, int cpu, +                              uint32_t *field, bool to_kernel) +{ +    if (to_kernel) { +        *field = (GIC_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0; +    } else { +        if (*field & 0x2) { +            GIC_SET_EDGE_TRIGGER(irq); +        } +    } +} + +static void translate_priority(GICState *s, int irq, int cpu, +                               uint32_t *field, bool to_kernel) +{ +    if (to_kernel) { +        *field = GIC_GET_PRIORITY(irq, cpu) & 0xff; +    } else { +        gic_set_priority(s, cpu, irq, *field & 0xff, MEMTXATTRS_UNSPECIFIED); +    } +} + +static void translate_targets(GICState *s, int irq, int cpu, +                              uint32_t *field, bool to_kernel) +{ +    if (to_kernel) { +        *field = s->irq_target[irq] & 0xff; +    } else { +        s->irq_target[irq] = *field & 0xff; +    } +} + +static void translate_sgisource(GICState *s, int irq, int cpu, +                                uint32_t *field, bool to_kernel) +{ +    if (to_kernel) { +        *field = s->sgi_pending[irq][cpu] & 0xff; +    } else { +        s->sgi_pending[irq][cpu] = *field & 0xff; +    } +} + +/* Read a register group from the kernel VGIC */ +static void kvm_dist_get(GICState *s, uint32_t offset, int width, +                         int maxirq, vgic_translate_fn translate_fn) +{ +    uint32_t reg; +    int i; +    int j; +    int irq; +    int cpu; +    int regsz = 32 / width; /* irqs per kernel register */ +    uint32_t field; + +    for_each_irq_reg(i, maxirq, width) { +        irq = i * regsz; +        cpu = 0; +        while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) { +            kvm_gicd_access(s, offset, cpu, ®, false); +            for (j = 0; j < regsz; j++) { +                field = extract32(reg, j * width, width); +                translate_fn(s, irq + j, cpu, &field, false); +            } + +            cpu++; +        } +        offset += 4; +    } +} + +/* Write a register group to the kernel VGIC */ +static void kvm_dist_put(GICState *s, uint32_t offset, int width, +                         int maxirq, vgic_translate_fn translate_fn) +{ +    uint32_t reg; +    int i; +    int j; +    int irq; +    int cpu; +    int regsz = 32 / width; /* irqs per kernel register */ +    uint32_t field; + +    for_each_irq_reg(i, maxirq, width) { +        irq = i * regsz; +        cpu = 0; +        while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) { +            reg = 0; +            for (j = 0; j < regsz; j++) { +                translate_fn(s, irq + j, cpu, &field, true); +                reg = deposit32(reg, j * width, width, field); +            } +            kvm_gicd_access(s, offset, cpu, ®, true); + +            cpu++; +        } +        offset += 4; +    } +} + +static void kvm_arm_gic_put(GICState *s) +{ +    uint32_t reg; +    int i; +    int cpu; +    int num_cpu; +    int num_irq; + +    if (!kvm_arm_gic_can_save_restore(s)) { +            DPRINTF("Cannot put kernel gic state, no kernel interface"); +            return; +    } + +    /* Note: We do the restore in a slightly different order than the save +     * (where the order doesn't matter and is simply ordered according to the +     * register offset values */ + +    /***************************************************************** +     * Distributor State +     */ + +    /* s->ctlr -> GICD_CTLR */ +    reg = s->ctlr; +    kvm_gicd_access(s, 0x0, 0, ®, true); + +    /* Sanity checking on GICD_TYPER and s->num_irq, s->num_cpu */ +    kvm_gicd_access(s, 0x4, 0, ®, false); +    num_irq = ((reg & 0x1f) + 1) * 32; +    num_cpu = ((reg & 0xe0) >> 5) + 1; + +    if (num_irq < s->num_irq) { +            fprintf(stderr, "Restoring %u IRQs, but kernel supports max %d\n", +                    s->num_irq, num_irq); +            abort(); +    } else if (num_cpu != s->num_cpu) { +            fprintf(stderr, "Restoring %u CPU interfaces, kernel only has %d\n", +                    s->num_cpu, num_cpu); +            /* Did we not create the VCPUs in the kernel yet? */ +            abort(); +    } + +    /* TODO: Consider checking compatibility with the IIDR ? */ + +    /* irq_state[n].enabled -> GICD_ISENABLERn */ +    kvm_dist_put(s, 0x180, 1, s->num_irq, translate_clear); +    kvm_dist_put(s, 0x100, 1, s->num_irq, translate_enabled); + +    /* irq_state[n].group -> GICD_IGROUPRn */ +    kvm_dist_put(s, 0x80, 1, s->num_irq, translate_group); + +    /* s->irq_target[irq] -> GICD_ITARGETSRn +     * (restore targets before pending to ensure the pending state is set on +     * the appropriate CPU interfaces in the kernel) */ +    kvm_dist_put(s, 0x800, 8, s->num_irq, translate_targets); + +    /* irq_state[n].trigger -> GICD_ICFGRn +     * (restore configuration registers before pending IRQs so we treat +     * level/edge correctly) */ +    kvm_dist_put(s, 0xc00, 2, s->num_irq, translate_trigger); + +    /* irq_state[n].pending + irq_state[n].level -> GICD_ISPENDRn */ +    kvm_dist_put(s, 0x280, 1, s->num_irq, translate_clear); +    kvm_dist_put(s, 0x200, 1, s->num_irq, translate_pending); + +    /* irq_state[n].active -> GICD_ISACTIVERn */ +    kvm_dist_put(s, 0x380, 1, s->num_irq, translate_clear); +    kvm_dist_put(s, 0x300, 1, s->num_irq, translate_active); + + +    /* s->priorityX[irq] -> ICD_IPRIORITYRn */ +    kvm_dist_put(s, 0x400, 8, s->num_irq, translate_priority); + +    /* s->sgi_pending -> ICD_CPENDSGIRn */ +    kvm_dist_put(s, 0xf10, 8, GIC_NR_SGIS, translate_clear); +    kvm_dist_put(s, 0xf20, 8, GIC_NR_SGIS, translate_sgisource); + + +    /***************************************************************** +     * CPU Interface(s) State +     */ + +    for (cpu = 0; cpu < s->num_cpu; cpu++) { +        /* s->cpu_ctlr[cpu] -> GICC_CTLR */ +        reg = s->cpu_ctlr[cpu]; +        kvm_gicc_access(s, 0x00, cpu, ®, true); + +        /* s->priority_mask[cpu] -> GICC_PMR */ +        reg = (s->priority_mask[cpu] & 0xff); +        kvm_gicc_access(s, 0x04, cpu, ®, true); + +        /* s->bpr[cpu] -> GICC_BPR */ +        reg = (s->bpr[cpu] & 0x7); +        kvm_gicc_access(s, 0x08, cpu, ®, true); + +        /* s->abpr[cpu] -> GICC_ABPR */ +        reg = (s->abpr[cpu] & 0x7); +        kvm_gicc_access(s, 0x1c, cpu, ®, true); + +        /* s->apr[n][cpu] -> GICC_APRn */ +        for (i = 0; i < 4; i++) { +            reg = s->apr[i][cpu]; +            kvm_gicc_access(s, 0xd0 + i * 4, cpu, ®, true); +        } +    } +} + +static void kvm_arm_gic_get(GICState *s) +{ +    uint32_t reg; +    int i; +    int cpu; + +    if (!kvm_arm_gic_can_save_restore(s)) { +            DPRINTF("Cannot get kernel gic state, no kernel interface"); +            return; +    } + +    /***************************************************************** +     * Distributor State +     */ + +    /* GICD_CTLR -> s->ctlr */ +    kvm_gicd_access(s, 0x0, 0, ®, false); +    s->ctlr = reg; + +    /* Sanity checking on GICD_TYPER -> s->num_irq, s->num_cpu */ +    kvm_gicd_access(s, 0x4, 0, ®, false); +    s->num_irq = ((reg & 0x1f) + 1) * 32; +    s->num_cpu = ((reg & 0xe0) >> 5) + 1; + +    if (s->num_irq > GIC_MAXIRQ) { +            fprintf(stderr, "Too many IRQs reported from the kernel: %d\n", +                    s->num_irq); +            abort(); +    } + +    /* GICD_IIDR -> ? */ +    kvm_gicd_access(s, 0x8, 0, ®, false); + +    /* Clear all the IRQ settings */ +    for (i = 0; i < s->num_irq; i++) { +        memset(&s->irq_state[i], 0, sizeof(s->irq_state[0])); +    } + +    /* GICD_IGROUPRn -> irq_state[n].group */ +    kvm_dist_get(s, 0x80, 1, s->num_irq, translate_group); + +    /* GICD_ISENABLERn -> irq_state[n].enabled */ +    kvm_dist_get(s, 0x100, 1, s->num_irq, translate_enabled); + +    /* GICD_ISPENDRn -> irq_state[n].pending + irq_state[n].level */ +    kvm_dist_get(s, 0x200, 1, s->num_irq, translate_pending); + +    /* GICD_ISACTIVERn -> irq_state[n].active */ +    kvm_dist_get(s, 0x300, 1, s->num_irq, translate_active); + +    /* GICD_ICFRn -> irq_state[n].trigger */ +    kvm_dist_get(s, 0xc00, 2, s->num_irq, translate_trigger); + +    /* GICD_IPRIORITYRn -> s->priorityX[irq] */ +    kvm_dist_get(s, 0x400, 8, s->num_irq, translate_priority); + +    /* GICD_ITARGETSRn -> s->irq_target[irq] */ +    kvm_dist_get(s, 0x800, 8, s->num_irq, translate_targets); + +    /* GICD_CPENDSGIRn -> s->sgi_pending */ +    kvm_dist_get(s, 0xf10, 8, GIC_NR_SGIS, translate_sgisource); + + +    /***************************************************************** +     * CPU Interface(s) State +     */ + +    for (cpu = 0; cpu < s->num_cpu; cpu++) { +        /* GICC_CTLR -> s->cpu_ctlr[cpu] */ +        kvm_gicc_access(s, 0x00, cpu, ®, false); +        s->cpu_ctlr[cpu] = reg; + +        /* GICC_PMR -> s->priority_mask[cpu] */ +        kvm_gicc_access(s, 0x04, cpu, ®, false); +        s->priority_mask[cpu] = (reg & 0xff); + +        /* GICC_BPR -> s->bpr[cpu] */ +        kvm_gicc_access(s, 0x08, cpu, ®, false); +        s->bpr[cpu] = (reg & 0x7); + +        /* GICC_ABPR -> s->abpr[cpu] */ +        kvm_gicc_access(s, 0x1c, cpu, ®, false); +        s->abpr[cpu] = (reg & 0x7); + +        /* GICC_APRn -> s->apr[n][cpu] */ +        for (i = 0; i < 4; i++) { +            kvm_gicc_access(s, 0xd0 + i * 4, cpu, ®, false); +            s->apr[i][cpu] = reg; +        } +    } +} + +static void kvm_arm_gic_reset(DeviceState *dev) +{ +    GICState *s = ARM_GIC_COMMON(dev); +    KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s); + +    kgc->parent_reset(dev); +    kvm_arm_gic_put(s); +} + +static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) +{ +    int i; +    GICState *s = KVM_ARM_GIC(dev); +    SysBusDevice *sbd = SYS_BUS_DEVICE(dev); +    KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s); +    Error *local_err = NULL; +    int ret; + +    kgc->parent_realize(dev, &local_err); +    if (local_err) { +        error_propagate(errp, local_err); +        return; +    } + +    if (s->security_extn) { +        error_setg(errp, "the in-kernel VGIC does not implement the " +                   "security extensions"); +        return; +    } + +    i = s->num_irq - GIC_INTERNAL; +    /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU. +     * GPIO array layout is thus: +     *  [0..N-1] SPIs +     *  [N..N+31] PPIs for CPU 0 +     *  [N+32..N+63] PPIs for CPU 1 +     *   ... +     */ +    i += (GIC_INTERNAL * s->num_cpu); +    qdev_init_gpio_in(dev, kvm_arm_gic_set_irq, i); + +    for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { +        qemu_irq irq = qdev_get_gpio_in(dev, i); +        kvm_irqchip_set_qemuirq_gsi(kvm_state, irq, i); +    } + +    /* We never use our outbound IRQ/FIQ lines but provide them so that +     * we maintain the same interface as the non-KVM GIC. +     */ +    for (i = 0; i < s->num_cpu; i++) { +        sysbus_init_irq(sbd, &s->parent_irq[i]); +    } +    for (i = 0; i < s->num_cpu; i++) { +        sysbus_init_irq(sbd, &s->parent_fiq[i]); +    } + +    /* Try to create the device via the device control API */ +    s->dev_fd = -1; +    ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V2, false); +    if (ret >= 0) { +        s->dev_fd = ret; +    } else if (ret != -ENODEV && ret != -ENOTSUP) { +        error_setg_errno(errp, -ret, "error creating in-kernel VGIC"); +        return; +    } + +    if (kvm_gic_supports_attr(s, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0)) { +        uint32_t numirqs = s->num_irq; +        kvm_gic_access(s, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0, 0, &numirqs, 1); +    } + +    /* Tell the kernel to complete VGIC initialization now */ +    if (kvm_gic_supports_attr(s, KVM_DEV_ARM_VGIC_GRP_CTRL, +                              KVM_DEV_ARM_VGIC_CTRL_INIT)) { +        kvm_gic_access(s, KVM_DEV_ARM_VGIC_GRP_CTRL, +                          KVM_DEV_ARM_VGIC_CTRL_INIT, 0, 0, 1); +    } + +    /* Distributor */ +    memory_region_init_reservation(&s->iomem, OBJECT(s), +                                   "kvm-gic_dist", 0x1000); +    sysbus_init_mmio(sbd, &s->iomem); +    kvm_arm_register_device(&s->iomem, +                            (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT) +                            | KVM_VGIC_V2_ADDR_TYPE_DIST, +                            KVM_DEV_ARM_VGIC_GRP_ADDR, +                            KVM_VGIC_V2_ADDR_TYPE_DIST, +                            s->dev_fd); +    /* CPU interface for current core. Unlike arm_gic, we don't +     * provide the "interface for core #N" memory regions, because +     * cores with a VGIC don't have those. +     */ +    memory_region_init_reservation(&s->cpuiomem[0], OBJECT(s), +                                   "kvm-gic_cpu", 0x1000); +    sysbus_init_mmio(sbd, &s->cpuiomem[0]); +    kvm_arm_register_device(&s->cpuiomem[0], +                            (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT) +                            | KVM_VGIC_V2_ADDR_TYPE_CPU, +                            KVM_DEV_ARM_VGIC_GRP_ADDR, +                            KVM_VGIC_V2_ADDR_TYPE_CPU, +                            s->dev_fd); +} + +static void kvm_arm_gic_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    ARMGICCommonClass *agcc = ARM_GIC_COMMON_CLASS(klass); +    KVMARMGICClass *kgc = KVM_ARM_GIC_CLASS(klass); + +    agcc->pre_save = kvm_arm_gic_get; +    agcc->post_load = kvm_arm_gic_put; +    kgc->parent_realize = dc->realize; +    kgc->parent_reset = dc->reset; +    dc->realize = kvm_arm_gic_realize; +    dc->reset = kvm_arm_gic_reset; +} + +static const TypeInfo kvm_arm_gic_info = { +    .name = TYPE_KVM_ARM_GIC, +    .parent = TYPE_ARM_GIC_COMMON, +    .instance_size = sizeof(GICState), +    .class_init = kvm_arm_gic_class_init, +    .class_size = sizeof(KVMARMGICClass), +}; + +static void kvm_arm_gic_register_types(void) +{ +    type_register_static(&kvm_arm_gic_info); +} + +type_init(kvm_arm_gic_register_types) diff --git a/hw/intc/arm_gicv2m.c b/hw/intc/arm_gicv2m.c new file mode 100644 index 00000000..43d1976c --- /dev/null +++ b/hw/intc/arm_gicv2m.c @@ -0,0 +1,192 @@ +/* + *  GICv2m extension for MSI/MSI-x support with a GICv2-based system + * + * Copyright (C) 2015 Linaro, All rights reserved. + * + * Author: Christoffer Dall <christoffer.dall@linaro.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +/* This file implements an emulated GICv2m widget as described in the ARM + * Server Base System Architecture (SBSA) specification Version 2.2 + * (ARM-DEN-0029 v2.2) pages 35-39 without any optional implementation defined + * identification registers and with a single non-secure MSI register frame. + */ + +#include "hw/sysbus.h" +#include "hw/pci/msi.h" + +#define TYPE_ARM_GICV2M "arm-gicv2m" +#define ARM_GICV2M(obj) OBJECT_CHECK(ARMGICv2mState, (obj), TYPE_ARM_GICV2M) + +#define GICV2M_NUM_SPI_MAX 128 + +#define V2M_MSI_TYPER           0x008 +#define V2M_MSI_SETSPI_NS       0x040 +#define V2M_MSI_IIDR            0xFCC +#define V2M_IIDR0               0xFD0 +#define V2M_IIDR11              0xFFC + +#define PRODUCT_ID_QEMU         0x51 /* ASCII code Q */ + +typedef struct ARMGICv2mState { +    SysBusDevice parent_obj; + +    MemoryRegion iomem; +    qemu_irq spi[GICV2M_NUM_SPI_MAX]; + +    uint32_t base_spi; +    uint32_t num_spi; +} ARMGICv2mState; + +static void gicv2m_set_irq(void *opaque, int irq) +{ +    ARMGICv2mState *s = (ARMGICv2mState *)opaque; + +    qemu_irq_pulse(s->spi[irq]); +} + +static uint64_t gicv2m_read(void *opaque, hwaddr offset, +                            unsigned size) +{ +    ARMGICv2mState *s = (ARMGICv2mState *)opaque; +    uint32_t val; + +    if (size != 4) { +        qemu_log_mask(LOG_GUEST_ERROR, "gicv2m_read: bad size %u\n", size); +        return 0; +    } + +    switch (offset) { +    case V2M_MSI_TYPER: +        val = (s->base_spi + 32) << 16; +        val |= s->num_spi; +        return val; +    case V2M_MSI_IIDR: +        /* We don't have any valid implementor so we leave that field as zero +         * and we return 0 in the arch revision as per the spec. +         */ +        return (PRODUCT_ID_QEMU << 20); +    case V2M_IIDR0 ... V2M_IIDR11: +        /* We do not implement any optional identification registers and the +         * mandatory MSI_PIDR2 register reads as 0x0, so we capture all +         * implementation defined registers here. +         */ +        return 0; +    default: +        qemu_log_mask(LOG_GUEST_ERROR, +                      "gicv2m_read: Bad offset %x\n", (int)offset); +        return 0; +    } +} + +static void gicv2m_write(void *opaque, hwaddr offset, +                        uint64_t value, unsigned size) +{ +    ARMGICv2mState *s = (ARMGICv2mState *)opaque; + +    if (size != 2 && size != 4) { +        qemu_log_mask(LOG_GUEST_ERROR, "gicv2m_write: bad size %u\n", size); +        return; +    } + +    switch (offset) { +    case V2M_MSI_SETSPI_NS: { +        int spi; + +        spi = (value & 0x3ff) - (s->base_spi + 32); +        if (spi >= 0 && spi < s->num_spi) { +            gicv2m_set_irq(s, spi); +        } +        return; +    } +    default: +        qemu_log_mask(LOG_GUEST_ERROR, +                      "gicv2m_write: Bad offset %x\n", (int)offset); +    } +} + +static const MemoryRegionOps gicv2m_ops = { +    .read = gicv2m_read, +    .write = gicv2m_write, +    .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void gicv2m_realize(DeviceState *dev, Error **errp) +{ +    ARMGICv2mState *s = ARM_GICV2M(dev); +    int i; + +    if (s->num_spi > GICV2M_NUM_SPI_MAX) { +        error_setg(errp, +                   "requested %u SPIs exceeds GICv2m frame maximum %d", +                   s->num_spi, GICV2M_NUM_SPI_MAX); +        return; +    } + +    if (s->base_spi + 32 > 1020 - s->num_spi) { +        error_setg(errp, +                   "requested base SPI %u+%u exceeds max. number 1020", +                   s->base_spi + 32, s->num_spi); +        return; +    } + +    for (i = 0; i < s->num_spi; i++) { +        sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->spi[i]); +    } + +    msi_supported = true; +    kvm_gsi_direct_mapping = true; +    kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled(); +} + +static void gicv2m_init(Object *obj) +{ +    SysBusDevice *sbd = SYS_BUS_DEVICE(obj); +    ARMGICv2mState *s = ARM_GICV2M(obj); + +    memory_region_init_io(&s->iomem, OBJECT(s), &gicv2m_ops, s, +                          "gicv2m", 0x1000); +    sysbus_init_mmio(sbd, &s->iomem); +} + +static Property gicv2m_properties[] = { +    DEFINE_PROP_UINT32("base-spi", ARMGICv2mState, base_spi, 0), +    DEFINE_PROP_UINT32("num-spi", ARMGICv2mState, num_spi, 64), +    DEFINE_PROP_END_OF_LIST(), +}; + +static void gicv2m_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); + +    dc->props = gicv2m_properties; +    dc->realize = gicv2m_realize; +} + +static const TypeInfo gicv2m_info = { +    .name          = TYPE_ARM_GICV2M, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(ARMGICv2mState), +    .instance_init = gicv2m_init, +    .class_init    = gicv2m_class_init, +}; + +static void gicv2m_register_types(void) +{ +    type_register_static(&gicv2m_info); +} + +type_init(gicv2m_register_types) diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c new file mode 100644 index 00000000..e13b729e --- /dev/null +++ b/hw/intc/armv7m_nvic.c @@ -0,0 +1,572 @@ +/* + * ARM Nested Vectored Interrupt Controller + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + * + * The ARMv7M System controller is fairly tightly tied in with the + * NVIC.  Much of that is also implemented here. + */ + +#include "hw/sysbus.h" +#include "qemu/timer.h" +#include "hw/arm/arm.h" +#include "exec/address-spaces.h" +#include "gic_internal.h" + +typedef struct { +    GICState gic; +    struct { +        uint32_t control; +        uint32_t reload; +        int64_t tick; +        QEMUTimer *timer; +    } systick; +    MemoryRegion sysregmem; +    MemoryRegion gic_iomem_alias; +    MemoryRegion container; +    uint32_t num_irq; +} nvic_state; + +#define TYPE_NVIC "armv7m_nvic" +/** + * NVICClass: + * @parent_reset: the parent class' reset handler. + * + * A model of the v7M NVIC and System Controller + */ +typedef struct NVICClass { +    /*< private >*/ +    ARMGICClass parent_class; +    /*< public >*/ +    DeviceRealize parent_realize; +    void (*parent_reset)(DeviceState *dev); +} NVICClass; + +#define NVIC_CLASS(klass) \ +    OBJECT_CLASS_CHECK(NVICClass, (klass), TYPE_NVIC) +#define NVIC_GET_CLASS(obj) \ +    OBJECT_GET_CLASS(NVICClass, (obj), TYPE_NVIC) +#define NVIC(obj) \ +    OBJECT_CHECK(nvic_state, (obj), TYPE_NVIC) + +static const uint8_t nvic_id[] = { +    0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1 +}; + +/* qemu timers run at 1GHz.   We want something closer to 1MHz.  */ +#define SYSTICK_SCALE 1000ULL + +#define SYSTICK_ENABLE    (1 << 0) +#define SYSTICK_TICKINT   (1 << 1) +#define SYSTICK_CLKSOURCE (1 << 2) +#define SYSTICK_COUNTFLAG (1 << 16) + +int system_clock_scale; + +/* Conversion factor from qemu timer to SysTick frequencies.  */ +static inline int64_t systick_scale(nvic_state *s) +{ +    if (s->systick.control & SYSTICK_CLKSOURCE) +        return system_clock_scale; +    else +        return 1000; +} + +static void systick_reload(nvic_state *s, int reset) +{ +    /* The Cortex-M3 Devices Generic User Guide says that "When the +     * ENABLE bit is set to 1, the counter loads the RELOAD value from the +     * SYST RVR register and then counts down". So, we need to check the +     * ENABLE bit before reloading the value. +     */ +    if ((s->systick.control & SYSTICK_ENABLE) == 0) { +        return; +    } + +    if (reset) +        s->systick.tick = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +    s->systick.tick += (s->systick.reload + 1) * systick_scale(s); +    timer_mod(s->systick.timer, s->systick.tick); +} + +static void systick_timer_tick(void * opaque) +{ +    nvic_state *s = (nvic_state *)opaque; +    s->systick.control |= SYSTICK_COUNTFLAG; +    if (s->systick.control & SYSTICK_TICKINT) { +        /* Trigger the interrupt.  */ +        armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK); +    } +    if (s->systick.reload == 0) { +        s->systick.control &= ~SYSTICK_ENABLE; +    } else { +        systick_reload(s, 0); +    } +} + +static void systick_reset(nvic_state *s) +{ +    s->systick.control = 0; +    s->systick.reload = 0; +    s->systick.tick = 0; +    timer_del(s->systick.timer); +} + +/* The external routines use the hardware vector numbering, ie. the first +   IRQ is #16.  The internal GIC routines use #32 as the first IRQ.  */ +void armv7m_nvic_set_pending(void *opaque, int irq) +{ +    nvic_state *s = (nvic_state *)opaque; +    if (irq >= 16) +        irq += 16; +    gic_set_pending_private(&s->gic, 0, irq); +} + +/* Make pending IRQ active.  */ +int armv7m_nvic_acknowledge_irq(void *opaque) +{ +    nvic_state *s = (nvic_state *)opaque; +    uint32_t irq; + +    irq = gic_acknowledge_irq(&s->gic, 0, MEMTXATTRS_UNSPECIFIED); +    if (irq == 1023) +        hw_error("Interrupt but no vector\n"); +    if (irq >= 32) +        irq -= 16; +    return irq; +} + +void armv7m_nvic_complete_irq(void *opaque, int irq) +{ +    nvic_state *s = (nvic_state *)opaque; +    if (irq >= 16) +        irq += 16; +    gic_complete_irq(&s->gic, 0, irq, MEMTXATTRS_UNSPECIFIED); +} + +static uint32_t nvic_readl(nvic_state *s, uint32_t offset) +{ +    ARMCPU *cpu; +    uint32_t val; +    int irq; + +    switch (offset) { +    case 4: /* Interrupt Control Type.  */ +        return (s->num_irq / 32) - 1; +    case 0x10: /* SysTick Control and Status.  */ +        val = s->systick.control; +        s->systick.control &= ~SYSTICK_COUNTFLAG; +        return val; +    case 0x14: /* SysTick Reload Value.  */ +        return s->systick.reload; +    case 0x18: /* SysTick Current Value.  */ +        { +            int64_t t; +            if ((s->systick.control & SYSTICK_ENABLE) == 0) +                return 0; +            t = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +            if (t >= s->systick.tick) +                return 0; +            val = ((s->systick.tick - (t + 1)) / systick_scale(s)) + 1; +            /* The interrupt in triggered when the timer reaches zero. +               However the counter is not reloaded until the next clock +               tick.  This is a hack to return zero during the first tick.  */ +            if (val > s->systick.reload) +                val = 0; +            return val; +        } +    case 0x1c: /* SysTick Calibration Value.  */ +        return 10000; +    case 0xd00: /* CPUID Base.  */ +        cpu = ARM_CPU(current_cpu); +        return cpu->midr; +    case 0xd04: /* Interrupt Control State.  */ +        /* VECTACTIVE */ +        val = s->gic.running_irq[0]; +        if (val == 1023) { +            val = 0; +        } else if (val >= 32) { +            val -= 16; +        } +        /* RETTOBASE */ +        if (s->gic.running_irq[0] == 1023 +                || s->gic.last_active[s->gic.running_irq[0]][0] == 1023) { +            val |= (1 << 11); +        } +        /* VECTPENDING */ +        if (s->gic.current_pending[0] != 1023) +            val |= (s->gic.current_pending[0] << 12); +        /* ISRPENDING */ +        for (irq = 32; irq < s->num_irq; irq++) { +            if (s->gic.irq_state[irq].pending) { +                val |= (1 << 22); +                break; +            } +        } +        /* PENDSTSET */ +        if (s->gic.irq_state[ARMV7M_EXCP_SYSTICK].pending) +            val |= (1 << 26); +        /* PENDSVSET */ +        if (s->gic.irq_state[ARMV7M_EXCP_PENDSV].pending) +            val |= (1 << 28); +        /* NMIPENDSET */ +        if (s->gic.irq_state[ARMV7M_EXCP_NMI].pending) +            val |= (1 << 31); +        return val; +    case 0xd08: /* Vector Table Offset.  */ +        cpu = ARM_CPU(current_cpu); +        return cpu->env.v7m.vecbase; +    case 0xd0c: /* Application Interrupt/Reset Control.  */ +        return 0xfa050000; +    case 0xd10: /* System Control.  */ +        /* TODO: Implement SLEEPONEXIT.  */ +        return 0; +    case 0xd14: /* Configuration Control.  */ +        /* TODO: Implement Configuration Control bits.  */ +        return 0; +    case 0xd24: /* System Handler Status.  */ +        val = 0; +        if (s->gic.irq_state[ARMV7M_EXCP_MEM].active) val |= (1 << 0); +        if (s->gic.irq_state[ARMV7M_EXCP_BUS].active) val |= (1 << 1); +        if (s->gic.irq_state[ARMV7M_EXCP_USAGE].active) val |= (1 << 3); +        if (s->gic.irq_state[ARMV7M_EXCP_SVC].active) val |= (1 << 7); +        if (s->gic.irq_state[ARMV7M_EXCP_DEBUG].active) val |= (1 << 8); +        if (s->gic.irq_state[ARMV7M_EXCP_PENDSV].active) val |= (1 << 10); +        if (s->gic.irq_state[ARMV7M_EXCP_SYSTICK].active) val |= (1 << 11); +        if (s->gic.irq_state[ARMV7M_EXCP_USAGE].pending) val |= (1 << 12); +        if (s->gic.irq_state[ARMV7M_EXCP_MEM].pending) val |= (1 << 13); +        if (s->gic.irq_state[ARMV7M_EXCP_BUS].pending) val |= (1 << 14); +        if (s->gic.irq_state[ARMV7M_EXCP_SVC].pending) val |= (1 << 15); +        if (s->gic.irq_state[ARMV7M_EXCP_MEM].enabled) val |= (1 << 16); +        if (s->gic.irq_state[ARMV7M_EXCP_BUS].enabled) val |= (1 << 17); +        if (s->gic.irq_state[ARMV7M_EXCP_USAGE].enabled) val |= (1 << 18); +        return val; +    case 0xd28: /* Configurable Fault Status.  */ +        /* TODO: Implement Fault Status.  */ +        qemu_log_mask(LOG_UNIMP, "Configurable Fault Status unimplemented\n"); +        return 0; +    case 0xd2c: /* Hard Fault Status.  */ +    case 0xd30: /* Debug Fault Status.  */ +    case 0xd34: /* Mem Manage Address.  */ +    case 0xd38: /* Bus Fault Address.  */ +    case 0xd3c: /* Aux Fault Status.  */ +        /* TODO: Implement fault status registers.  */ +        qemu_log_mask(LOG_UNIMP, "Fault status registers unimplemented\n"); +        return 0; +    case 0xd40: /* PFR0.  */ +        return 0x00000030; +    case 0xd44: /* PRF1.  */ +        return 0x00000200; +    case 0xd48: /* DFR0.  */ +        return 0x00100000; +    case 0xd4c: /* AFR0.  */ +        return 0x00000000; +    case 0xd50: /* MMFR0.  */ +        return 0x00000030; +    case 0xd54: /* MMFR1.  */ +        return 0x00000000; +    case 0xd58: /* MMFR2.  */ +        return 0x00000000; +    case 0xd5c: /* MMFR3.  */ +        return 0x00000000; +    case 0xd60: /* ISAR0.  */ +        return 0x01141110; +    case 0xd64: /* ISAR1.  */ +        return 0x02111000; +    case 0xd68: /* ISAR2.  */ +        return 0x21112231; +    case 0xd6c: /* ISAR3.  */ +        return 0x01111110; +    case 0xd70: /* ISAR4.  */ +        return 0x01310102; +    /* TODO: Implement debug registers.  */ +    default: +        qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset); +        return 0; +    } +} + +static void nvic_writel(nvic_state *s, uint32_t offset, uint32_t value) +{ +    ARMCPU *cpu; +    uint32_t oldval; +    switch (offset) { +    case 0x10: /* SysTick Control and Status.  */ +        oldval = s->systick.control; +        s->systick.control &= 0xfffffff8; +        s->systick.control |= value & 7; +        if ((oldval ^ value) & SYSTICK_ENABLE) { +            int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +            if (value & SYSTICK_ENABLE) { +                if (s->systick.tick) { +                    s->systick.tick += now; +                    timer_mod(s->systick.timer, s->systick.tick); +                } else { +                    systick_reload(s, 1); +                } +            } else { +                timer_del(s->systick.timer); +                s->systick.tick -= now; +                if (s->systick.tick < 0) +                  s->systick.tick = 0; +            } +        } else if ((oldval ^ value) & SYSTICK_CLKSOURCE) { +            /* This is a hack. Force the timer to be reloaded +               when the reference clock is changed.  */ +            systick_reload(s, 1); +        } +        break; +    case 0x14: /* SysTick Reload Value.  */ +        s->systick.reload = value; +        break; +    case 0x18: /* SysTick Current Value.  Writes reload the timer.  */ +        systick_reload(s, 1); +        s->systick.control &= ~SYSTICK_COUNTFLAG; +        break; +    case 0xd04: /* Interrupt Control State.  */ +        if (value & (1 << 31)) { +            armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI); +        } +        if (value & (1 << 28)) { +            armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV); +        } else if (value & (1 << 27)) { +            s->gic.irq_state[ARMV7M_EXCP_PENDSV].pending = 0; +            gic_update(&s->gic); +        } +        if (value & (1 << 26)) { +            armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK); +        } else if (value & (1 << 25)) { +            s->gic.irq_state[ARMV7M_EXCP_SYSTICK].pending = 0; +            gic_update(&s->gic); +        } +        break; +    case 0xd08: /* Vector Table Offset.  */ +        cpu = ARM_CPU(current_cpu); +        cpu->env.v7m.vecbase = value & 0xffffff80; +        break; +    case 0xd0c: /* Application Interrupt/Reset Control.  */ +        if ((value >> 16) == 0x05fa) { +            if (value & 2) { +                qemu_log_mask(LOG_UNIMP, "VECTCLRACTIVE unimplemented\n"); +            } +            if (value & 5) { +                qemu_log_mask(LOG_UNIMP, "AIRCR system reset unimplemented\n"); +            } +            if (value & 0x700) { +                qemu_log_mask(LOG_UNIMP, "PRIGROUP unimplemented\n"); +            } +        } +        break; +    case 0xd10: /* System Control.  */ +    case 0xd14: /* Configuration Control.  */ +        /* TODO: Implement control registers.  */ +        qemu_log_mask(LOG_UNIMP, "NVIC: SCR and CCR unimplemented\n"); +        break; +    case 0xd24: /* System Handler Control.  */ +        /* TODO: Real hardware allows you to set/clear the active bits +           under some circumstances.  We don't implement this.  */ +        s->gic.irq_state[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0; +        s->gic.irq_state[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0; +        s->gic.irq_state[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0; +        break; +    case 0xd28: /* Configurable Fault Status.  */ +    case 0xd2c: /* Hard Fault Status.  */ +    case 0xd30: /* Debug Fault Status.  */ +    case 0xd34: /* Mem Manage Address.  */ +    case 0xd38: /* Bus Fault Address.  */ +    case 0xd3c: /* Aux Fault Status.  */ +        qemu_log_mask(LOG_UNIMP, +                      "NVIC: fault status registers unimplemented\n"); +        break; +    case 0xf00: /* Software Triggered Interrupt Register */ +        if ((value & 0x1ff) < s->num_irq) { +            gic_set_pending_private(&s->gic, 0, value & 0x1ff); +        } +        break; +    default: +        qemu_log_mask(LOG_GUEST_ERROR, +                      "NVIC: Bad write offset 0x%x\n", offset); +    } +} + +static uint64_t nvic_sysreg_read(void *opaque, hwaddr addr, +                                 unsigned size) +{ +    nvic_state *s = (nvic_state *)opaque; +    uint32_t offset = addr; +    int i; +    uint32_t val; + +    switch (offset) { +    case 0xd18 ... 0xd23: /* System Handler Priority.  */ +        val = 0; +        for (i = 0; i < size; i++) { +            val |= s->gic.priority1[(offset - 0xd14) + i][0] << (i * 8); +        } +        return val; +    case 0xfe0 ... 0xfff: /* ID.  */ +        if (offset & 3) { +            return 0; +        } +        return nvic_id[(offset - 0xfe0) >> 2]; +    } +    if (size == 4) { +        return nvic_readl(s, offset); +    } +    qemu_log_mask(LOG_GUEST_ERROR, +                  "NVIC: Bad read of size %d at offset 0x%x\n", size, offset); +    return 0; +} + +static void nvic_sysreg_write(void *opaque, hwaddr addr, +                              uint64_t value, unsigned size) +{ +    nvic_state *s = (nvic_state *)opaque; +    uint32_t offset = addr; +    int i; + +    switch (offset) { +    case 0xd18 ... 0xd23: /* System Handler Priority.  */ +        for (i = 0; i < size; i++) { +            s->gic.priority1[(offset - 0xd14) + i][0] = +                (value >> (i * 8)) & 0xff; +        } +        gic_update(&s->gic); +        return; +    } +    if (size == 4) { +        nvic_writel(s, offset, value); +        return; +    } +    qemu_log_mask(LOG_GUEST_ERROR, +                  "NVIC: Bad write of size %d at offset 0x%x\n", size, offset); +} + +static const MemoryRegionOps nvic_sysreg_ops = { +    .read = nvic_sysreg_read, +    .write = nvic_sysreg_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_nvic = { +    .name = "armv7m_nvic", +    .version_id = 1, +    .minimum_version_id = 1, +    .fields = (VMStateField[]) { +        VMSTATE_UINT32(systick.control, nvic_state), +        VMSTATE_UINT32(systick.reload, nvic_state), +        VMSTATE_INT64(systick.tick, nvic_state), +        VMSTATE_TIMER_PTR(systick.timer, nvic_state), +        VMSTATE_END_OF_LIST() +    } +}; + +static void armv7m_nvic_reset(DeviceState *dev) +{ +    nvic_state *s = NVIC(dev); +    NVICClass *nc = NVIC_GET_CLASS(s); +    nc->parent_reset(dev); +    /* Common GIC reset resets to disabled; the NVIC doesn't have +     * per-CPU interfaces so mark our non-existent CPU interface +     * as enabled by default, and with a priority mask which allows +     * all interrupts through. +     */ +    s->gic.cpu_ctlr[0] = GICC_CTLR_EN_GRP0; +    s->gic.priority_mask[0] = 0x100; +    /* The NVIC as a whole is always enabled. */ +    s->gic.ctlr = 1; +    systick_reset(s); +} + +static void armv7m_nvic_realize(DeviceState *dev, Error **errp) +{ +    nvic_state *s = NVIC(dev); +    NVICClass *nc = NVIC_GET_CLASS(s); +    Error *local_err = NULL; + +    /* The NVIC always has only one CPU */ +    s->gic.num_cpu = 1; +    /* Tell the common code we're an NVIC */ +    s->gic.revision = 0xffffffff; +    s->num_irq = s->gic.num_irq; +    nc->parent_realize(dev, &local_err); +    if (local_err) { +        error_propagate(errp, local_err); +        return; +    } +    gic_init_irqs_and_distributor(&s->gic); +    /* The NVIC and system controller register area looks like this: +     *  0..0xff : system control registers, including systick +     *  0x100..0xcff : GIC-like registers +     *  0xd00..0xfff : system control registers +     * We use overlaying to put the GIC like registers +     * over the top of the system control register region. +     */ +    memory_region_init(&s->container, OBJECT(s), "nvic", 0x1000); +    /* The system register region goes at the bottom of the priority +     * stack as it covers the whole page. +     */ +    memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s, +                          "nvic_sysregs", 0x1000); +    memory_region_add_subregion(&s->container, 0, &s->sysregmem); +    /* Alias the GIC region so we can get only the section of it +     * we need, and layer it on top of the system register region. +     */ +    memory_region_init_alias(&s->gic_iomem_alias, OBJECT(s), +                             "nvic-gic", &s->gic.iomem, +                             0x100, 0xc00); +    memory_region_add_subregion_overlap(&s->container, 0x100, +                                        &s->gic_iomem_alias, 1); +    /* Map the whole thing into system memory at the location required +     * by the v7M architecture. +     */ +    memory_region_add_subregion(get_system_memory(), 0xe000e000, &s->container); +    s->systick.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, systick_timer_tick, s); +} + +static void armv7m_nvic_instance_init(Object *obj) +{ +    /* We have a different default value for the num-irq property +     * than our superclass. This function runs after qdev init +     * has set the defaults from the Property array and before +     * any user-specified property setting, so just modify the +     * value in the GICState struct. +     */ +    GICState *s = ARM_GIC_COMMON(obj); +    /* The ARM v7m may have anything from 0 to 496 external interrupt +     * IRQ lines. We default to 64. Other boards may differ and should +     * set the num-irq property appropriately. +     */ +    s->num_irq = 64; +} + +static void armv7m_nvic_class_init(ObjectClass *klass, void *data) +{ +    NVICClass *nc = NVIC_CLASS(klass); +    DeviceClass *dc = DEVICE_CLASS(klass); + +    nc->parent_reset = dc->reset; +    nc->parent_realize = dc->realize; +    dc->vmsd  = &vmstate_nvic; +    dc->reset = armv7m_nvic_reset; +    dc->realize = armv7m_nvic_realize; +} + +static const TypeInfo armv7m_nvic_info = { +    .name          = TYPE_NVIC, +    .parent        = TYPE_ARM_GIC_COMMON, +    .instance_init = armv7m_nvic_instance_init, +    .instance_size = sizeof(nvic_state), +    .class_init    = armv7m_nvic_class_init, +    .class_size    = sizeof(NVICClass), +}; + +static void armv7m_nvic_register_types(void) +{ +    type_register_static(&armv7m_nvic_info); +} + +type_init(armv7m_nvic_register_types) diff --git a/hw/intc/etraxfs_pic.c b/hw/intc/etraxfs_pic.c new file mode 100644 index 00000000..bd588681 --- /dev/null +++ b/hw/intc/etraxfs_pic.c @@ -0,0 +1,193 @@ +/* + * QEMU ETRAX Interrupt Controller. + * + * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "hw/sysbus.h" +#include "hw/hw.h" +//#include "pc.h" +//#include "etraxfs.h" + +#define D(x) + +#define R_RW_MASK   0 +#define R_R_VECT    1 +#define R_R_MASKED_VECT 2 +#define R_R_NMI     3 +#define R_R_GURU    4 +#define R_MAX       5 + +#define TYPE_ETRAX_FS_PIC "etraxfs,pic" +#define ETRAX_FS_PIC(obj) \ +    OBJECT_CHECK(struct etrax_pic, (obj), TYPE_ETRAX_FS_PIC) + +struct etrax_pic +{ +    SysBusDevice parent_obj; + +    MemoryRegion mmio; +    void *interrupt_vector; +    qemu_irq parent_irq; +    qemu_irq parent_nmi; +    uint32_t regs[R_MAX]; +}; + +static void pic_update(struct etrax_pic *fs) +{    +    uint32_t vector = 0; +    int i; + +    fs->regs[R_R_MASKED_VECT] = fs->regs[R_R_VECT] & fs->regs[R_RW_MASK]; + +    /* The ETRAX interrupt controller signals interrupts to the core +       through an interrupt request wire and an irq vector bus. If  +       multiple interrupts are simultaneously active it chooses vector  +       0x30 and lets the sw choose the priorities.  */ +    if (fs->regs[R_R_MASKED_VECT]) { +        uint32_t mv = fs->regs[R_R_MASKED_VECT]; +        for (i = 0; i < 31; i++) { +            if (mv & 1) { +                vector = 0x31 + i; +                /* Check for multiple interrupts.  */ +                if (mv > 1) +                    vector = 0x30; +                break; +            } +            mv >>= 1; +        } +    } + +    if (fs->interrupt_vector) { +        /* hack alert: ptr property */ +        *(uint32_t*)(fs->interrupt_vector) = vector; +    } +    qemu_set_irq(fs->parent_irq, !!vector); +} + +static uint64_t +pic_read(void *opaque, hwaddr addr, unsigned int size) +{ +    struct etrax_pic *fs = opaque; +    uint32_t rval; + +    rval = fs->regs[addr >> 2]; +    D(printf("%s %x=%x\n", __func__, addr, rval)); +    return rval; +} + +static void pic_write(void *opaque, hwaddr addr, +                      uint64_t value, unsigned int size) +{ +    struct etrax_pic *fs = opaque; +    D(printf("%s addr=%x val=%x\n", __func__, addr, value)); + +    if (addr == R_RW_MASK) { +        fs->regs[R_RW_MASK] = value; +        pic_update(fs); +    } +} + +static const MemoryRegionOps pic_ops = { +    .read = pic_read, +    .write = pic_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +    .valid = { +        .min_access_size = 4, +        .max_access_size = 4 +    } +}; + +static void nmi_handler(void *opaque, int irq, int level) +{    +    struct etrax_pic *fs = (void *)opaque; +    uint32_t mask; + +    mask = 1 << irq; +    if (level) +        fs->regs[R_R_NMI] |= mask; +    else +        fs->regs[R_R_NMI] &= ~mask; + +    qemu_set_irq(fs->parent_nmi, !!fs->regs[R_R_NMI]); +} + +static void irq_handler(void *opaque, int irq, int level) +{ +    struct etrax_pic *fs = (void *)opaque; + +    if (irq >= 30) { +        nmi_handler(opaque, irq, level); +        return; +    } + +    irq -= 1; +    fs->regs[R_R_VECT] &= ~(1 << irq); +    fs->regs[R_R_VECT] |= (!!level << irq); +    pic_update(fs); +} + +static int etraxfs_pic_init(SysBusDevice *sbd) +{ +    DeviceState *dev = DEVICE(sbd); +    struct etrax_pic *s = ETRAX_FS_PIC(dev); + +    qdev_init_gpio_in(dev, irq_handler, 32); +    sysbus_init_irq(sbd, &s->parent_irq); +    sysbus_init_irq(sbd, &s->parent_nmi); + +    memory_region_init_io(&s->mmio, OBJECT(s), &pic_ops, s, +                          "etraxfs-pic", R_MAX * 4); +    sysbus_init_mmio(sbd, &s->mmio); +    return 0; +} + +static Property etraxfs_pic_properties[] = { +    DEFINE_PROP_PTR("interrupt_vector", struct etrax_pic, interrupt_vector), +    DEFINE_PROP_END_OF_LIST(), +}; + +static void etraxfs_pic_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + +    k->init = etraxfs_pic_init; +    dc->props = etraxfs_pic_properties; +    /* +     * Note: pointer property "interrupt_vector" may remain null, thus +     * no need for dc->cannot_instantiate_with_device_add_yet = true; +     */ +} + +static const TypeInfo etraxfs_pic_info = { +    .name          = TYPE_ETRAX_FS_PIC, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(struct etrax_pic), +    .class_init    = etraxfs_pic_class_init, +}; + +static void etraxfs_pic_register_types(void) +{ +    type_register_static(&etraxfs_pic_info); +} + +type_init(etraxfs_pic_register_types) diff --git a/hw/intc/exynos4210_combiner.c b/hw/intc/exynos4210_combiner.c new file mode 100644 index 00000000..a6b70289 --- /dev/null +++ b/hw/intc/exynos4210_combiner.c @@ -0,0 +1,458 @@ +/* + * Samsung exynos4210 Interrupt Combiner + * + * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. + * All rights reserved. + * + * Evgeny Voevodin <e.voevodin@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +/* + * Exynos4210 Combiner represents an OR gate for SOC's IRQ lines. It combines + * IRQ sources into groups and provides signal output to GIC from each group. It + * is driven by common mask and enable/disable logic. Take a note that not all + * IRQs are passed to GIC through Combiner. + */ + +#include "hw/sysbus.h" + +#include "hw/arm/exynos4210.h" + +//#define DEBUG_COMBINER + +#ifdef DEBUG_COMBINER +#define DPRINTF(fmt, ...) \ +        do { fprintf(stdout, "COMBINER: [%s:%d] " fmt, __func__ , __LINE__, \ +                ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do {} while (0) +#endif + +#define    IIC_NGRP        64            /* Internal Interrupt Combiner +                                            Groups number */ +#define    IIC_NIRQ        (IIC_NGRP * 8)/* Internal Interrupt Combiner +                                            Interrupts number */ +#define IIC_REGION_SIZE    0x108         /* Size of memory mapped region */ +#define IIC_REGSET_SIZE    0x41 + +/* + * State for each output signal of internal combiner + */ +typedef struct CombinerGroupState { +    uint8_t src_mask;            /* 1 - source enabled, 0 - disabled */ +    uint8_t src_pending;        /* Pending source interrupts before masking */ +} CombinerGroupState; + +#define TYPE_EXYNOS4210_COMBINER "exynos4210.combiner" +#define EXYNOS4210_COMBINER(obj) \ +    OBJECT_CHECK(Exynos4210CombinerState, (obj), TYPE_EXYNOS4210_COMBINER) + +typedef struct Exynos4210CombinerState { +    SysBusDevice parent_obj; + +    MemoryRegion iomem; + +    struct CombinerGroupState group[IIC_NGRP]; +    uint32_t reg_set[IIC_REGSET_SIZE]; +    uint32_t icipsr[2]; +    uint32_t external;          /* 1 means that this combiner is external */ + +    qemu_irq output_irq[IIC_NGRP]; +} Exynos4210CombinerState; + +static const VMStateDescription vmstate_exynos4210_combiner_group_state = { +    .name = "exynos4210.combiner.groupstate", +    .version_id = 1, +    .minimum_version_id = 1, +    .fields = (VMStateField[]) { +        VMSTATE_UINT8(src_mask, CombinerGroupState), +        VMSTATE_UINT8(src_pending, CombinerGroupState), +        VMSTATE_END_OF_LIST() +    } +}; + +static const VMStateDescription vmstate_exynos4210_combiner = { +    .name = "exynos4210.combiner", +    .version_id = 1, +    .minimum_version_id = 1, +    .fields = (VMStateField[]) { +        VMSTATE_STRUCT_ARRAY(group, Exynos4210CombinerState, IIC_NGRP, 0, +                vmstate_exynos4210_combiner_group_state, CombinerGroupState), +        VMSTATE_UINT32_ARRAY(reg_set, Exynos4210CombinerState, +                IIC_REGSET_SIZE), +        VMSTATE_UINT32_ARRAY(icipsr, Exynos4210CombinerState, 2), +        VMSTATE_UINT32(external, Exynos4210CombinerState), +        VMSTATE_END_OF_LIST() +    } +}; + +/* + * Get Combiner input GPIO into irqs structure + */ +void exynos4210_combiner_get_gpioin(Exynos4210Irq *irqs, DeviceState *dev, +        int ext) +{ +    int n; +    int bit; +    int max; +    qemu_irq *irq; + +    max = ext ? EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ : +        EXYNOS4210_MAX_INT_COMBINER_IN_IRQ; +    irq = ext ? irqs->ext_combiner_irq : irqs->int_combiner_irq; + +    /* +     * Some IRQs of Int/External Combiner are going to two Combiners groups, +     * so let split them. +     */ +    for (n = 0; n < max; n++) { + +        bit = EXYNOS4210_COMBINER_GET_BIT_NUM(n); + +        switch (n) { +        /* MDNIE_LCD1 INTG1 */ +        case EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 0) ... +             EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 3): +            irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), +                    irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(0, bit + 4)]); +            continue; + +        /* TMU INTG3 */ +        case EXYNOS4210_COMBINER_GET_IRQ_NUM(3, 4): +            irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), +                    irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(2, bit)]); +            continue; + +        /* LCD1 INTG12 */ +        case EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 0) ... +             EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 3): +            irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), +                    irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(11, bit + 4)]); +            continue; + +        /* Multi-Core Timer INTG12 */ +        case EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 4) ... +             EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 8): +               irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), +                       irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]); +            continue; + +        /* Multi-Core Timer INTG35 */ +        case EXYNOS4210_COMBINER_GET_IRQ_NUM(35, 4) ... +             EXYNOS4210_COMBINER_GET_IRQ_NUM(35, 8): +            irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), +                    irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]); +            continue; + +        /* Multi-Core Timer INTG51 */ +        case EXYNOS4210_COMBINER_GET_IRQ_NUM(51, 4) ... +             EXYNOS4210_COMBINER_GET_IRQ_NUM(51, 8): +            irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), +                    irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]); +            continue; + +        /* Multi-Core Timer INTG53 */ +        case EXYNOS4210_COMBINER_GET_IRQ_NUM(53, 4) ... +             EXYNOS4210_COMBINER_GET_IRQ_NUM(53, 8): +            irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), +                    irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]); +            continue; +        } + +        irq[n] = qdev_get_gpio_in(dev, n); +    } +} + +static uint64_t +exynos4210_combiner_read(void *opaque, hwaddr offset, unsigned size) +{ +    struct Exynos4210CombinerState *s = +            (struct Exynos4210CombinerState *)opaque; +    uint32_t req_quad_base_n;    /* Base of registers quad. Multiply it by 4 and +                                   get a start of corresponding group quad */ +    uint32_t grp_quad_base_n;    /* Base of group quad */ +    uint32_t reg_n;              /* Register number inside the quad */ +    uint32_t val; + +    req_quad_base_n = offset >> 4; +    grp_quad_base_n = req_quad_base_n << 2; +    reg_n = (offset - (req_quad_base_n << 4)) >> 2; + +    if (req_quad_base_n >= IIC_NGRP) { +        /* Read of ICIPSR register */ +        return s->icipsr[reg_n]; +    } + +    val = 0; + +    switch (reg_n) { +    /* IISTR */ +    case 2: +        val |= s->group[grp_quad_base_n].src_pending; +        val |= s->group[grp_quad_base_n + 1].src_pending << 8; +        val |= s->group[grp_quad_base_n + 2].src_pending << 16; +        val |= s->group[grp_quad_base_n + 3].src_pending << 24; +        break; +    /* IIMSR */ +    case 3: +        val |= s->group[grp_quad_base_n].src_mask & +        s->group[grp_quad_base_n].src_pending; +        val |= (s->group[grp_quad_base_n + 1].src_mask & +                s->group[grp_quad_base_n + 1].src_pending) << 8; +        val |= (s->group[grp_quad_base_n + 2].src_mask & +                s->group[grp_quad_base_n + 2].src_pending) << 16; +        val |= (s->group[grp_quad_base_n + 3].src_mask & +                s->group[grp_quad_base_n + 3].src_pending) << 24; +        break; +    default: +        if (offset >> 2 >= IIC_REGSET_SIZE) { +            hw_error("exynos4210.combiner: overflow of reg_set by 0x" +                    TARGET_FMT_plx "offset\n", offset); +        } +        val = s->reg_set[offset >> 2]; +        return 0; +    } +    return val; +} + +static void exynos4210_combiner_update(void *opaque, uint8_t group_n) +{ +    struct Exynos4210CombinerState *s = +            (struct Exynos4210CombinerState *)opaque; + +    /* Send interrupt if needed */ +    if (s->group[group_n].src_mask & s->group[group_n].src_pending) { +#ifdef DEBUG_COMBINER +        if (group_n != 26) { +            /* skip uart */ +            DPRINTF("%s raise IRQ[%d]\n", s->external ? "EXT" : "INT", group_n); +        } +#endif + +        /* Set Combiner interrupt pending status after masking */ +        if (group_n >= 32) { +            s->icipsr[1] |= 1 << (group_n - 32); +        } else { +            s->icipsr[0] |= 1 << group_n; +        } + +        qemu_irq_raise(s->output_irq[group_n]); +    } else { +#ifdef DEBUG_COMBINER +        if (group_n != 26) { +            /* skip uart */ +            DPRINTF("%s lower IRQ[%d]\n", s->external ? "EXT" : "INT", group_n); +        } +#endif + +        /* Set Combiner interrupt pending status after masking */ +        if (group_n >= 32) { +            s->icipsr[1] &= ~(1 << (group_n - 32)); +        } else { +            s->icipsr[0] &= ~(1 << group_n); +        } + +        qemu_irq_lower(s->output_irq[group_n]); +    } +} + +static void exynos4210_combiner_write(void *opaque, hwaddr offset, +        uint64_t val, unsigned size) +{ +    struct Exynos4210CombinerState *s = +            (struct Exynos4210CombinerState *)opaque; +    uint32_t req_quad_base_n;    /* Base of registers quad. Multiply it by 4 and +                                   get a start of corresponding group quad */ +    uint32_t grp_quad_base_n;    /* Base of group quad */ +    uint32_t reg_n;              /* Register number inside the quad */ + +    req_quad_base_n = offset >> 4; +    grp_quad_base_n = req_quad_base_n << 2; +    reg_n = (offset - (req_quad_base_n << 4)) >> 2; + +    if (req_quad_base_n >= IIC_NGRP) { +        hw_error("exynos4210.combiner: unallowed write access at offset 0x" +                TARGET_FMT_plx "\n", offset); +        return; +    } + +    if (reg_n > 1) { +        hw_error("exynos4210.combiner: unallowed write access at offset 0x" +                TARGET_FMT_plx "\n", offset); +        return; +    } + +    if (offset >> 2 >= IIC_REGSET_SIZE) { +        hw_error("exynos4210.combiner: overflow of reg_set by 0x" +                TARGET_FMT_plx "offset\n", offset); +    } +    s->reg_set[offset >> 2] = val; + +    switch (reg_n) { +    /* IIESR */ +    case 0: +        /* FIXME: what if irq is pending, allowed by mask, and we allow it +         * again. Interrupt will rise again! */ + +        DPRINTF("%s enable IRQ for groups %d, %d, %d, %d\n", +                s->external ? "EXT" : "INT", +                grp_quad_base_n, +                grp_quad_base_n + 1, +                grp_quad_base_n + 2, +                grp_quad_base_n + 3); + +        /* Enable interrupt sources */ +        s->group[grp_quad_base_n].src_mask |= val & 0xFF; +        s->group[grp_quad_base_n + 1].src_mask |= (val & 0xFF00) >> 8; +        s->group[grp_quad_base_n + 2].src_mask |= (val & 0xFF0000) >> 16; +        s->group[grp_quad_base_n + 3].src_mask |= (val & 0xFF000000) >> 24; + +        exynos4210_combiner_update(s, grp_quad_base_n); +        exynos4210_combiner_update(s, grp_quad_base_n + 1); +        exynos4210_combiner_update(s, grp_quad_base_n + 2); +        exynos4210_combiner_update(s, grp_quad_base_n + 3); +        break; +        /* IIECR */ +    case 1: +        DPRINTF("%s disable IRQ for groups %d, %d, %d, %d\n", +                s->external ? "EXT" : "INT", +                grp_quad_base_n, +                grp_quad_base_n + 1, +                grp_quad_base_n + 2, +                grp_quad_base_n + 3); + +        /* Disable interrupt sources */ +        s->group[grp_quad_base_n].src_mask &= ~(val & 0xFF); +        s->group[grp_quad_base_n + 1].src_mask &= ~((val & 0xFF00) >> 8); +        s->group[grp_quad_base_n + 2].src_mask &= ~((val & 0xFF0000) >> 16); +        s->group[grp_quad_base_n + 3].src_mask &= ~((val & 0xFF000000) >> 24); + +        exynos4210_combiner_update(s, grp_quad_base_n); +        exynos4210_combiner_update(s, grp_quad_base_n + 1); +        exynos4210_combiner_update(s, grp_quad_base_n + 2); +        exynos4210_combiner_update(s, grp_quad_base_n + 3); +        break; +    default: +        hw_error("exynos4210.combiner: unallowed write access at offset 0x" +                TARGET_FMT_plx "\n", offset); +        break; +    } +} + +/* Get combiner group and bit from irq number */ +static uint8_t get_combiner_group_and_bit(int irq, uint8_t *bit) +{ +    *bit = irq - ((irq >> 3) << 3); +    return irq >> 3; +} + +/* Process a change in an external IRQ input.  */ +static void exynos4210_combiner_handler(void *opaque, int irq, int level) +{ +    struct Exynos4210CombinerState *s = +            (struct Exynos4210CombinerState *)opaque; +    uint8_t bit_n, group_n; + +    group_n = get_combiner_group_and_bit(irq, &bit_n); + +    if (s->external && group_n >= EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ) { +        DPRINTF("%s unallowed IRQ group 0x%x\n", s->external ? "EXT" : "INT" +                , group_n); +        return; +    } + +    if (level) { +        s->group[group_n].src_pending |= 1 << bit_n; +    } else { +        s->group[group_n].src_pending &= ~(1 << bit_n); +    } + +    exynos4210_combiner_update(s, group_n); +} + +static void exynos4210_combiner_reset(DeviceState *d) +{ +    struct Exynos4210CombinerState *s = (struct Exynos4210CombinerState *)d; + +    memset(&s->group, 0, sizeof(s->group)); +    memset(&s->reg_set, 0, sizeof(s->reg_set)); + +    s->reg_set[0xC0 >> 2] = 0x01010101; +    s->reg_set[0xC4 >> 2] = 0x01010101; +    s->reg_set[0xD0 >> 2] = 0x01010101; +    s->reg_set[0xD4 >> 2] = 0x01010101; +} + +static const MemoryRegionOps exynos4210_combiner_ops = { +    .read = exynos4210_combiner_read, +    .write = exynos4210_combiner_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/* + * Internal Combiner initialization. + */ +static int exynos4210_combiner_init(SysBusDevice *sbd) +{ +    DeviceState *dev = DEVICE(sbd); +    Exynos4210CombinerState *s = EXYNOS4210_COMBINER(dev); +    unsigned int i; + +    /* Allocate general purpose input signals and connect a handler to each of +     * them */ +    qdev_init_gpio_in(dev, exynos4210_combiner_handler, IIC_NIRQ); + +    /* Connect SysBusDev irqs to device specific irqs */ +    for (i = 0; i < IIC_NGRP; i++) { +        sysbus_init_irq(sbd, &s->output_irq[i]); +    } + +    memory_region_init_io(&s->iomem, OBJECT(s), &exynos4210_combiner_ops, s, +                          "exynos4210-combiner", IIC_REGION_SIZE); +    sysbus_init_mmio(sbd, &s->iomem); + +    return 0; +} + +static Property exynos4210_combiner_properties[] = { +    DEFINE_PROP_UINT32("external", Exynos4210CombinerState, external, 0), +    DEFINE_PROP_END_OF_LIST(), +}; + +static void exynos4210_combiner_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + +    k->init = exynos4210_combiner_init; +    dc->reset = exynos4210_combiner_reset; +    dc->props = exynos4210_combiner_properties; +    dc->vmsd = &vmstate_exynos4210_combiner; +} + +static const TypeInfo exynos4210_combiner_info = { +    .name          = TYPE_EXYNOS4210_COMBINER, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(Exynos4210CombinerState), +    .class_init    = exynos4210_combiner_class_init, +}; + +static void exynos4210_combiner_register_types(void) +{ +    type_register_static(&exynos4210_combiner_info); +} + +type_init(exynos4210_combiner_register_types) diff --git a/hw/intc/exynos4210_gic.c b/hw/intc/exynos4210_gic.c new file mode 100644 index 00000000..b2a4950b --- /dev/null +++ b/hw/intc/exynos4210_gic.c @@ -0,0 +1,471 @@ +/* + * Samsung exynos4210 GIC implementation. Based on hw/arm_gic.c + * + * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. + * All rights reserved. + * + * Evgeny Voevodin <e.voevodin@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "hw/sysbus.h" +#include "qemu-common.h" +#include "hw/irq.h" +#include "hw/arm/exynos4210.h" + +enum ExtGicId { +    EXT_GIC_ID_MDMA_LCD0 = 66, +    EXT_GIC_ID_PDMA0, +    EXT_GIC_ID_PDMA1, +    EXT_GIC_ID_TIMER0, +    EXT_GIC_ID_TIMER1, +    EXT_GIC_ID_TIMER2, +    EXT_GIC_ID_TIMER3, +    EXT_GIC_ID_TIMER4, +    EXT_GIC_ID_MCT_L0, +    EXT_GIC_ID_WDT, +    EXT_GIC_ID_RTC_ALARM, +    EXT_GIC_ID_RTC_TIC, +    EXT_GIC_ID_GPIO_XB, +    EXT_GIC_ID_GPIO_XA, +    EXT_GIC_ID_MCT_L1, +    EXT_GIC_ID_IEM_APC, +    EXT_GIC_ID_IEM_IEC, +    EXT_GIC_ID_NFC, +    EXT_GIC_ID_UART0, +    EXT_GIC_ID_UART1, +    EXT_GIC_ID_UART2, +    EXT_GIC_ID_UART3, +    EXT_GIC_ID_UART4, +    EXT_GIC_ID_MCT_G0, +    EXT_GIC_ID_I2C0, +    EXT_GIC_ID_I2C1, +    EXT_GIC_ID_I2C2, +    EXT_GIC_ID_I2C3, +    EXT_GIC_ID_I2C4, +    EXT_GIC_ID_I2C5, +    EXT_GIC_ID_I2C6, +    EXT_GIC_ID_I2C7, +    EXT_GIC_ID_SPI0, +    EXT_GIC_ID_SPI1, +    EXT_GIC_ID_SPI2, +    EXT_GIC_ID_MCT_G1, +    EXT_GIC_ID_USB_HOST, +    EXT_GIC_ID_USB_DEVICE, +    EXT_GIC_ID_MODEMIF, +    EXT_GIC_ID_HSMMC0, +    EXT_GIC_ID_HSMMC1, +    EXT_GIC_ID_HSMMC2, +    EXT_GIC_ID_HSMMC3, +    EXT_GIC_ID_SDMMC, +    EXT_GIC_ID_MIPI_CSI_4LANE, +    EXT_GIC_ID_MIPI_DSI_4LANE, +    EXT_GIC_ID_MIPI_CSI_2LANE, +    EXT_GIC_ID_MIPI_DSI_2LANE, +    EXT_GIC_ID_ONENAND_AUDI, +    EXT_GIC_ID_ROTATOR, +    EXT_GIC_ID_FIMC0, +    EXT_GIC_ID_FIMC1, +    EXT_GIC_ID_FIMC2, +    EXT_GIC_ID_FIMC3, +    EXT_GIC_ID_JPEG, +    EXT_GIC_ID_2D, +    EXT_GIC_ID_PCIe, +    EXT_GIC_ID_MIXER, +    EXT_GIC_ID_HDMI, +    EXT_GIC_ID_HDMI_I2C, +    EXT_GIC_ID_MFC, +    EXT_GIC_ID_TVENC, +}; + +enum ExtInt { +    EXT_GIC_ID_EXTINT0 = 48, +    EXT_GIC_ID_EXTINT1, +    EXT_GIC_ID_EXTINT2, +    EXT_GIC_ID_EXTINT3, +    EXT_GIC_ID_EXTINT4, +    EXT_GIC_ID_EXTINT5, +    EXT_GIC_ID_EXTINT6, +    EXT_GIC_ID_EXTINT7, +    EXT_GIC_ID_EXTINT8, +    EXT_GIC_ID_EXTINT9, +    EXT_GIC_ID_EXTINT10, +    EXT_GIC_ID_EXTINT11, +    EXT_GIC_ID_EXTINT12, +    EXT_GIC_ID_EXTINT13, +    EXT_GIC_ID_EXTINT14, +    EXT_GIC_ID_EXTINT15 +}; + +/* + * External GIC sources which are not from External Interrupt Combiner or + * External Interrupts are starting from EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ, + * which is INTG16 in Internal Interrupt Combiner. + */ + +static uint32_t +combiner_grp_to_gic_id[64-EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ][8] = { +    /* int combiner groups 16-19 */ +    { }, { }, { }, { }, +    /* int combiner group 20 */ +    { 0, EXT_GIC_ID_MDMA_LCD0 }, +    /* int combiner group 21 */ +    { EXT_GIC_ID_PDMA0, EXT_GIC_ID_PDMA1 }, +    /* int combiner group 22 */ +    { EXT_GIC_ID_TIMER0, EXT_GIC_ID_TIMER1, EXT_GIC_ID_TIMER2, +            EXT_GIC_ID_TIMER3, EXT_GIC_ID_TIMER4 }, +    /* int combiner group 23 */ +    { EXT_GIC_ID_RTC_ALARM, EXT_GIC_ID_RTC_TIC }, +    /* int combiner group 24 */ +    { EXT_GIC_ID_GPIO_XB, EXT_GIC_ID_GPIO_XA }, +    /* int combiner group 25 */ +    { EXT_GIC_ID_IEM_APC, EXT_GIC_ID_IEM_IEC }, +    /* int combiner group 26 */ +    { EXT_GIC_ID_UART0, EXT_GIC_ID_UART1, EXT_GIC_ID_UART2, EXT_GIC_ID_UART3, +            EXT_GIC_ID_UART4 }, +    /* int combiner group 27 */ +    { EXT_GIC_ID_I2C0, EXT_GIC_ID_I2C1, EXT_GIC_ID_I2C2, EXT_GIC_ID_I2C3, +            EXT_GIC_ID_I2C4, EXT_GIC_ID_I2C5, EXT_GIC_ID_I2C6, +            EXT_GIC_ID_I2C7 }, +    /* int combiner group 28 */ +    { EXT_GIC_ID_SPI0, EXT_GIC_ID_SPI1, EXT_GIC_ID_SPI2 , EXT_GIC_ID_USB_HOST}, +    /* int combiner group 29 */ +    { EXT_GIC_ID_HSMMC0, EXT_GIC_ID_HSMMC1, EXT_GIC_ID_HSMMC2, +     EXT_GIC_ID_HSMMC3, EXT_GIC_ID_SDMMC }, +    /* int combiner group 30 */ +    { EXT_GIC_ID_MIPI_CSI_4LANE, EXT_GIC_ID_MIPI_CSI_2LANE }, +    /* int combiner group 31 */ +    { EXT_GIC_ID_MIPI_DSI_4LANE, EXT_GIC_ID_MIPI_DSI_2LANE }, +    /* int combiner group 32 */ +    { EXT_GIC_ID_FIMC0, EXT_GIC_ID_FIMC1 }, +    /* int combiner group 33 */ +    { EXT_GIC_ID_FIMC2, EXT_GIC_ID_FIMC3 }, +    /* int combiner group 34 */ +    { EXT_GIC_ID_ONENAND_AUDI, EXT_GIC_ID_NFC }, +    /* int combiner group 35 */ +    { 0, 0, 0, EXT_GIC_ID_MCT_L1, EXT_GIC_ID_MCT_G0, EXT_GIC_ID_MCT_G1 }, +    /* int combiner group 36 */ +    { EXT_GIC_ID_MIXER }, +    /* int combiner group 37 */ +    { EXT_GIC_ID_EXTINT4, EXT_GIC_ID_EXTINT5, EXT_GIC_ID_EXTINT6, +     EXT_GIC_ID_EXTINT7 }, +    /* groups 38-50 */ +    { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, +    /* int combiner group 51 */ +    { EXT_GIC_ID_MCT_L0, 0, 0, 0, EXT_GIC_ID_MCT_G0, EXT_GIC_ID_MCT_G1 }, +    /* group 52 */ +    { }, +    /* int combiner group 53 */ +    { EXT_GIC_ID_WDT, 0, 0, 0, EXT_GIC_ID_MCT_G0, EXT_GIC_ID_MCT_G1 }, +    /* groups 54-63 */ +    { }, { }, { }, { }, { }, { }, { }, { }, { }, { } +}; + +#define EXYNOS4210_GIC_NIRQ 160 + +#define EXYNOS4210_EXT_GIC_CPU_REGION_SIZE     0x10000 +#define EXYNOS4210_EXT_GIC_DIST_REGION_SIZE    0x10000 + +#define EXYNOS4210_EXT_GIC_PER_CPU_OFFSET      0x8000 +#define EXYNOS4210_EXT_GIC_CPU_GET_OFFSET(n) \ +    ((n) * EXYNOS4210_EXT_GIC_PER_CPU_OFFSET) +#define EXYNOS4210_EXT_GIC_DIST_GET_OFFSET(n) \ +    ((n) * EXYNOS4210_EXT_GIC_PER_CPU_OFFSET) + +#define EXYNOS4210_GIC_CPU_REGION_SIZE  0x100 +#define EXYNOS4210_GIC_DIST_REGION_SIZE 0x1000 + +static void exynos4210_irq_handler(void *opaque, int irq, int level) +{ +    Exynos4210Irq *s = (Exynos4210Irq *)opaque; + +    /* Bypass */ +    qemu_set_irq(s->board_irqs[irq], level); +} + +/* + * Initialize exynos4210 IRQ subsystem stub. + */ +qemu_irq *exynos4210_init_irq(Exynos4210Irq *s) +{ +    return qemu_allocate_irqs(exynos4210_irq_handler, s, +            EXYNOS4210_MAX_INT_COMBINER_IN_IRQ); +} + +/* + * Initialize board IRQs. + * These IRQs contain splitted Int/External Combiner and External Gic IRQs. + */ +void exynos4210_init_board_irqs(Exynos4210Irq *s) +{ +    uint32_t grp, bit, irq_id, n; + +    for (n = 0; n < EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ; n++) { +        irq_id = 0; +        if (n == EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 4) || +                n == EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 4)) { +            /* MCT_G0 is passed to External GIC */ +            irq_id = EXT_GIC_ID_MCT_G0; +        } +        if (n == EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 5) || +                n == EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 5)) { +            /* MCT_G1 is passed to External and GIC */ +            irq_id = EXT_GIC_ID_MCT_G1; +        } +        if (irq_id) { +            s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n], +                    s->ext_gic_irq[irq_id-32]); +        } else { +            s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n], +                    s->ext_combiner_irq[n]); +        } +    } +    for (; n < EXYNOS4210_MAX_INT_COMBINER_IN_IRQ; n++) { +        /* these IDs are passed to Internal Combiner and External GIC */ +        grp = EXYNOS4210_COMBINER_GET_GRP_NUM(n); +        bit = EXYNOS4210_COMBINER_GET_BIT_NUM(n); +        irq_id = combiner_grp_to_gic_id[grp - +                     EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ][bit]; + +        if (irq_id) { +            s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n], +                    s->ext_gic_irq[irq_id-32]); +        } +    } +} + +/* + * Get IRQ number from exynos4210 IRQ subsystem stub. + * To identify IRQ source use internal combiner group and bit number + *  grp - group number + *  bit - bit number inside group + */ +uint32_t exynos4210_get_irq(uint32_t grp, uint32_t bit) +{ +    return EXYNOS4210_COMBINER_GET_IRQ_NUM(grp, bit); +} + +/********* GIC part *********/ + +#define TYPE_EXYNOS4210_GIC "exynos4210.gic" +#define EXYNOS4210_GIC(obj) \ +    OBJECT_CHECK(Exynos4210GicState, (obj), TYPE_EXYNOS4210_GIC) + +typedef struct { +    SysBusDevice parent_obj; + +    MemoryRegion cpu_container; +    MemoryRegion dist_container; +    MemoryRegion cpu_alias[EXYNOS4210_NCPUS]; +    MemoryRegion dist_alias[EXYNOS4210_NCPUS]; +    uint32_t num_cpu; +    DeviceState *gic; +} Exynos4210GicState; + +static void exynos4210_gic_set_irq(void *opaque, int irq, int level) +{ +    Exynos4210GicState *s = (Exynos4210GicState *)opaque; +    qemu_set_irq(qdev_get_gpio_in(s->gic, irq), level); +} + +static int exynos4210_gic_init(SysBusDevice *sbd) +{ +    DeviceState *dev = DEVICE(sbd); +    Exynos4210GicState *s = EXYNOS4210_GIC(dev); +    uint32_t i; +    const char cpu_prefix[] = "exynos4210-gic-alias_cpu"; +    const char dist_prefix[] = "exynos4210-gic-alias_dist"; +    char cpu_alias_name[sizeof(cpu_prefix) + 3]; +    char dist_alias_name[sizeof(cpu_prefix) + 3]; +    SysBusDevice *busdev; + +    s->gic = qdev_create(NULL, "arm_gic"); +    qdev_prop_set_uint32(s->gic, "num-cpu", s->num_cpu); +    qdev_prop_set_uint32(s->gic, "num-irq", EXYNOS4210_GIC_NIRQ); +    qdev_init_nofail(s->gic); +    busdev = SYS_BUS_DEVICE(s->gic); + +    /* Pass through outbound IRQ lines from the GIC */ +    sysbus_pass_irq(sbd, busdev); + +    /* Pass through inbound GPIO lines to the GIC */ +    qdev_init_gpio_in(dev, exynos4210_gic_set_irq, +                      EXYNOS4210_GIC_NIRQ - 32); + +    memory_region_init(&s->cpu_container, OBJECT(s), "exynos4210-cpu-container", +            EXYNOS4210_EXT_GIC_CPU_REGION_SIZE); +    memory_region_init(&s->dist_container, OBJECT(s), "exynos4210-dist-container", +            EXYNOS4210_EXT_GIC_DIST_REGION_SIZE); + +    for (i = 0; i < s->num_cpu; i++) { +        /* Map CPU interface per SMP Core */ +        sprintf(cpu_alias_name, "%s%x", cpu_prefix, i); +        memory_region_init_alias(&s->cpu_alias[i], OBJECT(s), +                                 cpu_alias_name, +                                 sysbus_mmio_get_region(busdev, 1), +                                 0, +                                 EXYNOS4210_GIC_CPU_REGION_SIZE); +        memory_region_add_subregion(&s->cpu_container, +                EXYNOS4210_EXT_GIC_CPU_GET_OFFSET(i), &s->cpu_alias[i]); + +        /* Map Distributor per SMP Core */ +        sprintf(dist_alias_name, "%s%x", dist_prefix, i); +        memory_region_init_alias(&s->dist_alias[i], OBJECT(s), +                                 dist_alias_name, +                                 sysbus_mmio_get_region(busdev, 0), +                                 0, +                                 EXYNOS4210_GIC_DIST_REGION_SIZE); +        memory_region_add_subregion(&s->dist_container, +                EXYNOS4210_EXT_GIC_DIST_GET_OFFSET(i), &s->dist_alias[i]); +    } + +    sysbus_init_mmio(sbd, &s->cpu_container); +    sysbus_init_mmio(sbd, &s->dist_container); + +    return 0; +} + +static Property exynos4210_gic_properties[] = { +    DEFINE_PROP_UINT32("num-cpu", Exynos4210GicState, num_cpu, 1), +    DEFINE_PROP_END_OF_LIST(), +}; + +static void exynos4210_gic_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + +    k->init = exynos4210_gic_init; +    dc->props = exynos4210_gic_properties; +} + +static const TypeInfo exynos4210_gic_info = { +    .name          = TYPE_EXYNOS4210_GIC, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(Exynos4210GicState), +    .class_init    = exynos4210_gic_class_init, +}; + +static void exynos4210_gic_register_types(void) +{ +    type_register_static(&exynos4210_gic_info); +} + +type_init(exynos4210_gic_register_types) + +/* IRQ OR Gate struct. + * + * This device models an OR gate. There are n_in input qdev gpio lines and one + * output sysbus IRQ line. The output IRQ level is formed as OR between all + * gpio inputs. + */ + +#define TYPE_EXYNOS4210_IRQ_GATE "exynos4210.irq_gate" +#define EXYNOS4210_IRQ_GATE(obj) \ +    OBJECT_CHECK(Exynos4210IRQGateState, (obj), TYPE_EXYNOS4210_IRQ_GATE) + +typedef struct Exynos4210IRQGateState { +    SysBusDevice parent_obj; + +    uint32_t n_in;      /* inputs amount */ +    uint32_t *level;    /* input levels */ +    qemu_irq out;       /* output IRQ */ +} Exynos4210IRQGateState; + +static Property exynos4210_irq_gate_properties[] = { +    DEFINE_PROP_UINT32("n_in", Exynos4210IRQGateState, n_in, 1), +    DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_exynos4210_irq_gate = { +    .name = "exynos4210.irq_gate", +    .version_id = 2, +    .minimum_version_id = 2, +    .fields = (VMStateField[]) { +        VMSTATE_VBUFFER_UINT32(level, Exynos4210IRQGateState, 1, NULL, 0, n_in), +        VMSTATE_END_OF_LIST() +    } +}; + +/* Process a change in IRQ input. */ +static void exynos4210_irq_gate_handler(void *opaque, int irq, int level) +{ +    Exynos4210IRQGateState *s = (Exynos4210IRQGateState *)opaque; +    uint32_t i; + +    assert(irq < s->n_in); + +    s->level[irq] = level; + +    for (i = 0; i < s->n_in; i++) { +        if (s->level[i] >= 1) { +            qemu_irq_raise(s->out); +            return; +        } +    } + +    qemu_irq_lower(s->out); +} + +static void exynos4210_irq_gate_reset(DeviceState *d) +{ +    Exynos4210IRQGateState *s = EXYNOS4210_IRQ_GATE(d); + +    memset(s->level, 0, s->n_in * sizeof(*s->level)); +} + +/* + * IRQ Gate initialization. + */ +static int exynos4210_irq_gate_init(SysBusDevice *sbd) +{ +    DeviceState *dev = DEVICE(sbd); +    Exynos4210IRQGateState *s = EXYNOS4210_IRQ_GATE(dev); + +    /* Allocate general purpose input signals and connect a handler to each of +     * them */ +    qdev_init_gpio_in(dev, exynos4210_irq_gate_handler, s->n_in); + +    s->level = g_malloc0(s->n_in * sizeof(*s->level)); + +    sysbus_init_irq(sbd, &s->out); + +    return 0; +} + +static void exynos4210_irq_gate_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + +    k->init = exynos4210_irq_gate_init; +    dc->reset = exynos4210_irq_gate_reset; +    dc->vmsd = &vmstate_exynos4210_irq_gate; +    dc->props = exynos4210_irq_gate_properties; +} + +static const TypeInfo exynos4210_irq_gate_info = { +    .name          = TYPE_EXYNOS4210_IRQ_GATE, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(Exynos4210IRQGateState), +    .class_init    = exynos4210_irq_gate_class_init, +}; + +static void exynos4210_irq_gate_register_types(void) +{ +    type_register_static(&exynos4210_irq_gate_info); +} + +type_init(exynos4210_irq_gate_register_types) diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h new file mode 100644 index 00000000..20c1e8a2 --- /dev/null +++ b/hw/intc/gic_internal.h @@ -0,0 +1,103 @@ +/* + * ARM GIC support - internal interfaces + * + * Copyright (c) 2012 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef QEMU_ARM_GIC_INTERNAL_H +#define QEMU_ARM_GIC_INTERNAL_H + +#include "hw/intc/arm_gic.h" + +#define ALL_CPU_MASK ((unsigned)(((1 << GIC_NCPU) - 1))) + +/* The NVIC has 16 internal vectors.  However these are not exposed +   through the normal GIC interface.  */ +#define GIC_BASE_IRQ ((s->revision == REV_NVIC) ? 32 : 0) + +#define GIC_SET_ENABLED(irq, cm) s->irq_state[irq].enabled |= (cm) +#define GIC_CLEAR_ENABLED(irq, cm) s->irq_state[irq].enabled &= ~(cm) +#define GIC_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0) +#define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm) +#define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm) +#define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm) +#define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm) +#define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0) +#define GIC_SET_MODEL(irq) s->irq_state[irq].model = true +#define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = false +#define GIC_TEST_MODEL(irq) s->irq_state[irq].model +#define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level |= (cm) +#define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm) +#define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0) +#define GIC_SET_EDGE_TRIGGER(irq) s->irq_state[irq].edge_trigger = true +#define GIC_CLEAR_EDGE_TRIGGER(irq) s->irq_state[irq].edge_trigger = false +#define GIC_TEST_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger) +#define GIC_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ?            \ +                                    s->priority1[irq][cpu] :            \ +                                    s->priority2[(irq) - GIC_INTERNAL]) +#define GIC_TARGET(irq) s->irq_target[irq] +#define GIC_CLEAR_GROUP(irq, cm) (s->irq_state[irq].group &= ~(cm)) +#define GIC_SET_GROUP(irq, cm) (s->irq_state[irq].group |= (cm)) +#define GIC_TEST_GROUP(irq, cm) ((s->irq_state[irq].group & (cm)) != 0) + +#define GICD_CTLR_EN_GRP0 (1U << 0) +#define GICD_CTLR_EN_GRP1 (1U << 1) + +#define GICC_CTLR_EN_GRP0    (1U << 0) +#define GICC_CTLR_EN_GRP1    (1U << 1) +#define GICC_CTLR_ACK_CTL    (1U << 2) +#define GICC_CTLR_FIQ_EN     (1U << 3) +#define GICC_CTLR_CBPR       (1U << 4) /* GICv1: SBPR */ +#define GICC_CTLR_EOIMODE    (1U << 9) +#define GICC_CTLR_EOIMODE_NS (1U << 10) + +/* Valid bits for GICC_CTLR for GICv1, v1 with security extensions, + * GICv2 and GICv2 with security extensions: + */ +#define GICC_CTLR_V1_MASK    0x1 +#define GICC_CTLR_V1_S_MASK  0x1f +#define GICC_CTLR_V2_MASK    0x21f +#define GICC_CTLR_V2_S_MASK  0x61f + +/* The special cases for the revision property: */ +#define REV_11MPCORE 0 +#define REV_NVIC 0xffffffff + +void gic_set_pending_private(GICState *s, int cpu, int irq); +uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs); +void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs); +void gic_update(GICState *s); +void gic_init_irqs_and_distributor(GICState *s); +void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val, +                      MemTxAttrs attrs); + +static inline bool gic_test_pending(GICState *s, int irq, int cm) +{ +    if (s->revision == REV_NVIC || s->revision == REV_11MPCORE) { +        return s->irq_state[irq].pending & cm; +    } else { +        /* Edge-triggered interrupts are marked pending on a rising edge, but +         * level-triggered interrupts are either considered pending when the +         * level is active or if software has explicitly written to +         * GICD_ISPENDR to set the state pending. +         */ +        return (s->irq_state[irq].pending & cm) || +            (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_LEVEL(irq, cm)); +    } +} + +#endif /* !QEMU_ARM_GIC_INTERNAL_H */ diff --git a/hw/intc/grlib_irqmp.c b/hw/intc/grlib_irqmp.c new file mode 100644 index 00000000..d1813f76 --- /dev/null +++ b/hw/intc/grlib_irqmp.c @@ -0,0 +1,374 @@ +/* + * QEMU GRLIB IRQMP Emulator + * + * (Multiprocessor and extended interrupt not supported) + * + * Copyright (c) 2010-2011 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "hw/sysbus.h" +#include "cpu.h" + +#include "hw/sparc/grlib.h" + +#include "trace.h" + +#define IRQMP_MAX_CPU 16 +#define IRQMP_REG_SIZE 256      /* Size of memory mapped registers */ + +/* Memory mapped register offsets */ +#define LEVEL_OFFSET     0x00 +#define PENDING_OFFSET   0x04 +#define FORCE0_OFFSET    0x08 +#define CLEAR_OFFSET     0x0C +#define MP_STATUS_OFFSET 0x10 +#define BROADCAST_OFFSET 0x14 +#define MASK_OFFSET      0x40 +#define FORCE_OFFSET     0x80 +#define EXTENDED_OFFSET  0xC0 + +#define TYPE_GRLIB_IRQMP "grlib,irqmp" +#define GRLIB_IRQMP(obj) OBJECT_CHECK(IRQMP, (obj), TYPE_GRLIB_IRQMP) + +typedef struct IRQMPState IRQMPState; + +typedef struct IRQMP { +    SysBusDevice parent_obj; + +    MemoryRegion iomem; + +    void *set_pil_in; +    void *set_pil_in_opaque; + +    IRQMPState *state; +} IRQMP; + +struct IRQMPState { +    uint32_t level; +    uint32_t pending; +    uint32_t clear; +    uint32_t broadcast; + +    uint32_t mask[IRQMP_MAX_CPU]; +    uint32_t force[IRQMP_MAX_CPU]; +    uint32_t extended[IRQMP_MAX_CPU]; + +    IRQMP    *parent; +}; + +static void grlib_irqmp_check_irqs(IRQMPState *state) +{ +    uint32_t      pend   = 0; +    uint32_t      level0 = 0; +    uint32_t      level1 = 0; +    set_pil_in_fn set_pil_in; + +    assert(state != NULL); +    assert(state->parent != NULL); + +    /* IRQ for CPU 0 (no SMP support) */ +    pend = (state->pending | state->force[0]) +        & state->mask[0]; + +    level0 = pend & ~state->level; +    level1 = pend &  state->level; + +    trace_grlib_irqmp_check_irqs(state->pending, state->force[0], +                                 state->mask[0], level1, level0); + +    set_pil_in = (set_pil_in_fn)state->parent->set_pil_in; + +    /* Trigger level1 interrupt first and level0 if there is no level1 */ +    if (level1 != 0) { +        set_pil_in(state->parent->set_pil_in_opaque, level1); +    } else { +        set_pil_in(state->parent->set_pil_in_opaque, level0); +    } +} + +void grlib_irqmp_ack(DeviceState *dev, int intno) +{ +    IRQMP        *irqmp = GRLIB_IRQMP(dev); +    IRQMPState   *state; +    uint32_t      mask; + +    state = irqmp->state; +    assert(state != NULL); + +    intno &= 15; +    mask = 1 << intno; + +    trace_grlib_irqmp_ack(intno); + +    /* Clear registers */ +    state->pending  &= ~mask; +    state->force[0] &= ~mask; /* Only CPU 0 (No SMP support) */ + +    grlib_irqmp_check_irqs(state); +} + +void grlib_irqmp_set_irq(void *opaque, int irq, int level) +{ +    IRQMP      *irqmp = GRLIB_IRQMP(opaque); +    IRQMPState *s; +    int         i = 0; + +    s = irqmp->state; +    assert(s         != NULL); +    assert(s->parent != NULL); + + +    if (level) { +        trace_grlib_irqmp_set_irq(irq); + +        if (s->broadcast & 1 << irq) { +            /* Broadcasted IRQ */ +            for (i = 0; i < IRQMP_MAX_CPU; i++) { +                s->force[i] |= 1 << irq; +            } +        } else { +            s->pending |= 1 << irq; +        } +        grlib_irqmp_check_irqs(s); + +    } +} + +static uint64_t grlib_irqmp_read(void *opaque, hwaddr addr, +                                 unsigned size) +{ +    IRQMP      *irqmp = opaque; +    IRQMPState *state; + +    assert(irqmp != NULL); +    state = irqmp->state; +    assert(state != NULL); + +    addr &= 0xff; + +    /* global registers */ +    switch (addr) { +    case LEVEL_OFFSET: +        return state->level; + +    case PENDING_OFFSET: +        return state->pending; + +    case FORCE0_OFFSET: +        /* This register is an "alias" for the force register of CPU 0 */ +        return state->force[0]; + +    case CLEAR_OFFSET: +    case MP_STATUS_OFFSET: +        /* Always read as 0 */ +        return 0; + +    case BROADCAST_OFFSET: +        return state->broadcast; + +    default: +        break; +    } + +    /* mask registers */ +    if (addr >= MASK_OFFSET && addr < FORCE_OFFSET) { +        int cpu = (addr - MASK_OFFSET) / 4; +        assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); + +        return state->mask[cpu]; +    } + +    /* force registers */ +    if (addr >= FORCE_OFFSET && addr < EXTENDED_OFFSET) { +        int cpu = (addr - FORCE_OFFSET) / 4; +        assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); + +        return state->force[cpu]; +    } + +    /* extended (not supported) */ +    if (addr >= EXTENDED_OFFSET && addr < IRQMP_REG_SIZE) { +        int cpu = (addr - EXTENDED_OFFSET) / 4; +        assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); + +        return state->extended[cpu]; +    } + +    trace_grlib_irqmp_readl_unknown(addr); +    return 0; +} + +static void grlib_irqmp_write(void *opaque, hwaddr addr, +                              uint64_t value, unsigned size) +{ +    IRQMP      *irqmp = opaque; +    IRQMPState *state; + +    assert(irqmp != NULL); +    state = irqmp->state; +    assert(state != NULL); + +    addr &= 0xff; + +    /* global registers */ +    switch (addr) { +    case LEVEL_OFFSET: +        value &= 0xFFFF << 1; /* clean up the value */ +        state->level = value; +        return; + +    case PENDING_OFFSET: +        /* Read Only */ +        return; + +    case FORCE0_OFFSET: +        /* This register is an "alias" for the force register of CPU 0 */ + +        value &= 0xFFFE; /* clean up the value */ +        state->force[0] = value; +        grlib_irqmp_check_irqs(irqmp->state); +        return; + +    case CLEAR_OFFSET: +        value &= ~1; /* clean up the value */ +        state->pending &= ~value; +        return; + +    case MP_STATUS_OFFSET: +        /* Read Only (no SMP support) */ +        return; + +    case BROADCAST_OFFSET: +        value &= 0xFFFE; /* clean up the value */ +        state->broadcast = value; +        return; + +    default: +        break; +    } + +    /* mask registers */ +    if (addr >= MASK_OFFSET && addr < FORCE_OFFSET) { +        int cpu = (addr - MASK_OFFSET) / 4; +        assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); + +        value &= ~1; /* clean up the value */ +        state->mask[cpu] = value; +        grlib_irqmp_check_irqs(irqmp->state); +        return; +    } + +    /* force registers */ +    if (addr >= FORCE_OFFSET && addr < EXTENDED_OFFSET) { +        int cpu = (addr - FORCE_OFFSET) / 4; +        assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); + +        uint32_t force = value & 0xFFFE; +        uint32_t clear = (value >> 16) & 0xFFFE; +        uint32_t old   = state->force[cpu]; + +        state->force[cpu] = (old | force) & ~clear; +        grlib_irqmp_check_irqs(irqmp->state); +        return; +    } + +    /* extended (not supported) */ +    if (addr >= EXTENDED_OFFSET && addr < IRQMP_REG_SIZE) { +        int cpu = (addr - EXTENDED_OFFSET) / 4; +        assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); + +        value &= 0xF; /* clean up the value */ +        state->extended[cpu] = value; +        return; +    } + +    trace_grlib_irqmp_writel_unknown(addr, value); +} + +static const MemoryRegionOps grlib_irqmp_ops = { +    .read = grlib_irqmp_read, +    .write = grlib_irqmp_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +    .valid = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static void grlib_irqmp_reset(DeviceState *d) +{ +    IRQMP *irqmp = GRLIB_IRQMP(d); +    assert(irqmp->state != NULL); + +    memset(irqmp->state, 0, sizeof *irqmp->state); +    irqmp->state->parent = irqmp; +} + +static int grlib_irqmp_init(SysBusDevice *dev) +{ +    IRQMP *irqmp = GRLIB_IRQMP(dev); + +    /* Check parameters */ +    if (irqmp->set_pil_in == NULL) { +        return -1; +    } + +    memory_region_init_io(&irqmp->iomem, OBJECT(dev), &grlib_irqmp_ops, irqmp, +                          "irqmp", IRQMP_REG_SIZE); + +    irqmp->state = g_malloc0(sizeof *irqmp->state); + +    sysbus_init_mmio(dev, &irqmp->iomem); + +    return 0; +} + +static Property grlib_irqmp_properties[] = { +    DEFINE_PROP_PTR("set_pil_in", IRQMP, set_pil_in), +    DEFINE_PROP_PTR("set_pil_in_opaque", IRQMP, set_pil_in_opaque), +    DEFINE_PROP_END_OF_LIST(), +}; + +static void grlib_irqmp_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + +    k->init = grlib_irqmp_init; +    dc->reset = grlib_irqmp_reset; +    dc->props = grlib_irqmp_properties; +    /* Reason: pointer properties "set_pil_in", "set_pil_in_opaque" */ +    dc->cannot_instantiate_with_device_add_yet = true; +} + +static const TypeInfo grlib_irqmp_info = { +    .name          = TYPE_GRLIB_IRQMP, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(IRQMP), +    .class_init    = grlib_irqmp_class_init, +}; + +static void grlib_irqmp_register_types(void) +{ +    type_register_static(&grlib_irqmp_info); +} + +type_init(grlib_irqmp_register_types) diff --git a/hw/intc/heathrow_pic.c b/hw/intc/heathrow_pic.c new file mode 100644 index 00000000..9ff3119e --- /dev/null +++ b/hw/intc/heathrow_pic.c @@ -0,0 +1,213 @@ +/* + * Heathrow PIC support (OldWorld PowerMac) + * + * Copyright (c) 2005-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "hw/hw.h" +#include "hw/ppc/mac.h" + +/* debug PIC */ +//#define DEBUG_PIC + +#ifdef DEBUG_PIC +#define PIC_DPRINTF(fmt, ...)                                   \ +    do { printf("PIC: " fmt , ## __VA_ARGS__); } while (0) +#else +#define PIC_DPRINTF(fmt, ...) +#endif + +typedef struct HeathrowPIC { +    uint32_t events; +    uint32_t mask; +    uint32_t levels; +    uint32_t level_triggered; +} HeathrowPIC; + +typedef struct HeathrowPICS { +    MemoryRegion mem; +    HeathrowPIC pics[2]; +    qemu_irq *irqs; +} HeathrowPICS; + +static inline int check_irq(HeathrowPIC *pic) +{ +    return (pic->events | (pic->levels & pic->level_triggered)) & pic->mask; +} + +/* update the CPU irq state */ +static void heathrow_pic_update(HeathrowPICS *s) +{ +    if (check_irq(&s->pics[0]) || check_irq(&s->pics[1])) { +        qemu_irq_raise(s->irqs[0]); +    } else { +        qemu_irq_lower(s->irqs[0]); +    } +} + +static void pic_write(void *opaque, hwaddr addr, +                      uint64_t value, unsigned size) +{ +    HeathrowPICS *s = opaque; +    HeathrowPIC *pic; +    unsigned int n; + +    n = ((addr & 0xfff) - 0x10) >> 4; +    PIC_DPRINTF("writel: " TARGET_FMT_plx " %u: %08x\n", addr, n, value); +    if (n >= 2) +        return; +    pic = &s->pics[n]; +    switch(addr & 0xf) { +    case 0x04: +        pic->mask = value; +        heathrow_pic_update(s); +        break; +    case 0x08: +        /* do not reset level triggered IRQs */ +        value &= ~pic->level_triggered; +        pic->events &= ~value; +        heathrow_pic_update(s); +        break; +    default: +        break; +    } +} + +static uint64_t pic_read(void *opaque, hwaddr addr, +                         unsigned size) +{ +    HeathrowPICS *s = opaque; +    HeathrowPIC *pic; +    unsigned int n; +    uint32_t value; + +    n = ((addr & 0xfff) - 0x10) >> 4; +    if (n >= 2) { +        value = 0; +    } else { +        pic = &s->pics[n]; +        switch(addr & 0xf) { +        case 0x0: +            value = pic->events; +            break; +        case 0x4: +            value = pic->mask; +            break; +        case 0xc: +            value = pic->levels; +            break; +        default: +            value = 0; +            break; +        } +    } +    PIC_DPRINTF("readl: " TARGET_FMT_plx " %u: %08x\n", addr, n, value); +    return value; +} + +static const MemoryRegionOps heathrow_pic_ops = { +    .read = pic_read, +    .write = pic_write, +    .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void heathrow_pic_set_irq(void *opaque, int num, int level) +{ +    HeathrowPICS *s = opaque; +    HeathrowPIC *pic; +    unsigned int irq_bit; + +#if defined(DEBUG) +    { +        static int last_level[64]; +        if (last_level[num] != level) { +            PIC_DPRINTF("set_irq: num=0x%02x level=%d\n", num, level); +            last_level[num] = level; +        } +    } +#endif +    pic = &s->pics[1 - (num >> 5)]; +    irq_bit = 1 << (num & 0x1f); +    if (level) { +        pic->events |= irq_bit & ~pic->level_triggered; +        pic->levels |= irq_bit; +    } else { +        pic->levels &= ~irq_bit; +    } +    heathrow_pic_update(s); +} + +static const VMStateDescription vmstate_heathrow_pic_one = { +    .name = "heathrow_pic_one", +    .version_id = 0, +    .minimum_version_id = 0, +    .fields = (VMStateField[]) { +        VMSTATE_UINT32(events, HeathrowPIC), +        VMSTATE_UINT32(mask, HeathrowPIC), +        VMSTATE_UINT32(levels, HeathrowPIC), +        VMSTATE_UINT32(level_triggered, HeathrowPIC), +        VMSTATE_END_OF_LIST() +    } +}; + +static const VMStateDescription vmstate_heathrow_pic = { +    .name = "heathrow_pic", +    .version_id = 1, +    .minimum_version_id = 1, +    .fields = (VMStateField[]) { +        VMSTATE_STRUCT_ARRAY(pics, HeathrowPICS, 2, 1, +                             vmstate_heathrow_pic_one, HeathrowPIC), +        VMSTATE_END_OF_LIST() +    } +}; + +static void heathrow_pic_reset_one(HeathrowPIC *s) +{ +    memset(s, '\0', sizeof(HeathrowPIC)); +} + +static void heathrow_pic_reset(void *opaque) +{ +    HeathrowPICS *s = opaque; + +    heathrow_pic_reset_one(&s->pics[0]); +    heathrow_pic_reset_one(&s->pics[1]); + +    s->pics[0].level_triggered = 0; +    s->pics[1].level_triggered = 0x1ff00000; +} + +qemu_irq *heathrow_pic_init(MemoryRegion **pmem, +                            int nb_cpus, qemu_irq **irqs) +{ +    HeathrowPICS *s; + +    s = g_malloc0(sizeof(HeathrowPICS)); +    /* only 1 CPU */ +    s->irqs = irqs[0]; +    memory_region_init_io(&s->mem, NULL, &heathrow_pic_ops, s, +                          "heathrow-pic", 0x1000); +    *pmem = &s->mem; + +    vmstate_register(NULL, -1, &vmstate_heathrow_pic, s); +    qemu_register_reset(heathrow_pic_reset, s); +    return qemu_allocate_irqs(heathrow_pic_set_irq, s, 64); +} diff --git a/hw/intc/i8259.c b/hw/intc/i8259.c new file mode 100644 index 00000000..0f5c0259 --- /dev/null +++ b/hw/intc/i8259.c @@ -0,0 +1,523 @@ +/* + * QEMU 8259 interrupt controller emulation + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "hw/hw.h" +#include "hw/i386/pc.h" +#include "hw/isa/isa.h" +#include "monitor/monitor.h" +#include "qemu/timer.h" +#include "hw/isa/i8259_internal.h" + +/* debug PIC */ +//#define DEBUG_PIC + +#ifdef DEBUG_PIC +#define DPRINTF(fmt, ...)                                       \ +    do { printf("pic: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) +#endif + +//#define DEBUG_IRQ_LATENCY +//#define DEBUG_IRQ_COUNT + +#define TYPE_I8259 "isa-i8259" +#define PIC_CLASS(class) OBJECT_CLASS_CHECK(PICClass, (class), TYPE_I8259) +#define PIC_GET_CLASS(obj) OBJECT_GET_CLASS(PICClass, (obj), TYPE_I8259) + +/** + * PICClass: + * @parent_realize: The parent's realizefn. + */ +typedef struct PICClass { +    PICCommonClass parent_class; + +    DeviceRealize parent_realize; +} PICClass; + +#if defined(DEBUG_PIC) || defined(DEBUG_IRQ_COUNT) +static int irq_level[16]; +#endif +#ifdef DEBUG_IRQ_COUNT +static uint64_t irq_count[16]; +#endif +#ifdef DEBUG_IRQ_LATENCY +static int64_t irq_time[16]; +#endif +DeviceState *isa_pic; +static PICCommonState *slave_pic; + +/* return the highest priority found in mask (highest = smallest +   number). Return 8 if no irq */ +static int get_priority(PICCommonState *s, int mask) +{ +    int priority; + +    if (mask == 0) { +        return 8; +    } +    priority = 0; +    while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) { +        priority++; +    } +    return priority; +} + +/* return the pic wanted interrupt. return -1 if none */ +static int pic_get_irq(PICCommonState *s) +{ +    int mask, cur_priority, priority; + +    mask = s->irr & ~s->imr; +    priority = get_priority(s, mask); +    if (priority == 8) { +        return -1; +    } +    /* compute current priority. If special fully nested mode on the +       master, the IRQ coming from the slave is not taken into account +       for the priority computation. */ +    mask = s->isr; +    if (s->special_mask) { +        mask &= ~s->imr; +    } +    if (s->special_fully_nested_mode && s->master) { +        mask &= ~(1 << 2); +    } +    cur_priority = get_priority(s, mask); +    if (priority < cur_priority) { +        /* higher priority found: an irq should be generated */ +        return (priority + s->priority_add) & 7; +    } else { +        return -1; +    } +} + +/* Update INT output. Must be called every time the output may have changed. */ +static void pic_update_irq(PICCommonState *s) +{ +    int irq; + +    irq = pic_get_irq(s); +    if (irq >= 0) { +        DPRINTF("pic%d: imr=%x irr=%x padd=%d\n", +                s->master ? 0 : 1, s->imr, s->irr, s->priority_add); +        qemu_irq_raise(s->int_out[0]); +    } else { +        qemu_irq_lower(s->int_out[0]); +    } +} + +/* set irq level. If an edge is detected, then the IRR is set to 1 */ +static void pic_set_irq(void *opaque, int irq, int level) +{ +    PICCommonState *s = opaque; +    int mask = 1 << irq; + +#if defined(DEBUG_PIC) || defined(DEBUG_IRQ_COUNT) || \ +    defined(DEBUG_IRQ_LATENCY) +    int irq_index = s->master ? irq : irq + 8; +#endif +#if defined(DEBUG_PIC) || defined(DEBUG_IRQ_COUNT) +    if (level != irq_level[irq_index]) { +        DPRINTF("pic_set_irq: irq=%d level=%d\n", irq_index, level); +        irq_level[irq_index] = level; +#ifdef DEBUG_IRQ_COUNT +        if (level == 1) { +            irq_count[irq_index]++; +        } +#endif +    } +#endif +#ifdef DEBUG_IRQ_LATENCY +    if (level) { +        irq_time[irq_index] = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +    } +#endif + +    if (s->elcr & mask) { +        /* level triggered */ +        if (level) { +            s->irr |= mask; +            s->last_irr |= mask; +        } else { +            s->irr &= ~mask; +            s->last_irr &= ~mask; +        } +    } else { +        /* edge triggered */ +        if (level) { +            if ((s->last_irr & mask) == 0) { +                s->irr |= mask; +            } +            s->last_irr |= mask; +        } else { +            s->last_irr &= ~mask; +        } +    } +    pic_update_irq(s); +} + +/* acknowledge interrupt 'irq' */ +static void pic_intack(PICCommonState *s, int irq) +{ +    if (s->auto_eoi) { +        if (s->rotate_on_auto_eoi) { +            s->priority_add = (irq + 1) & 7; +        } +    } else { +        s->isr |= (1 << irq); +    } +    /* We don't clear a level sensitive interrupt here */ +    if (!(s->elcr & (1 << irq))) { +        s->irr &= ~(1 << irq); +    } +    pic_update_irq(s); +} + +int pic_read_irq(DeviceState *d) +{ +    PICCommonState *s = PIC_COMMON(d); +    int irq, irq2, intno; + +    irq = pic_get_irq(s); +    if (irq >= 0) { +        if (irq == 2) { +            irq2 = pic_get_irq(slave_pic); +            if (irq2 >= 0) { +                pic_intack(slave_pic, irq2); +            } else { +                /* spurious IRQ on slave controller */ +                irq2 = 7; +            } +            intno = slave_pic->irq_base + irq2; +        } else { +            intno = s->irq_base + irq; +        } +        pic_intack(s, irq); +    } else { +        /* spurious IRQ on host controller */ +        irq = 7; +        intno = s->irq_base + irq; +    } + +#if defined(DEBUG_PIC) || defined(DEBUG_IRQ_LATENCY) +    if (irq == 2) { +        irq = irq2 + 8; +    } +#endif +#ifdef DEBUG_IRQ_LATENCY +    printf("IRQ%d latency=%0.3fus\n", +           irq, +           (double)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - +                    irq_time[irq]) * 1000000.0 / get_ticks_per_sec()); +#endif +    DPRINTF("pic_interrupt: irq=%d\n", irq); +    return intno; +} + +static void pic_init_reset(PICCommonState *s) +{ +    pic_reset_common(s); +    pic_update_irq(s); +} + +static void pic_reset(DeviceState *dev) +{ +    PICCommonState *s = PIC_COMMON(dev); + +    s->elcr = 0; +    pic_init_reset(s); +} + +static void pic_ioport_write(void *opaque, hwaddr addr64, +                             uint64_t val64, unsigned size) +{ +    PICCommonState *s = opaque; +    uint32_t addr = addr64; +    uint32_t val = val64; +    int priority, cmd, irq; + +    DPRINTF("write: addr=0x%02x val=0x%02x\n", addr, val); +    if (addr == 0) { +        if (val & 0x10) { +            pic_init_reset(s); +            s->init_state = 1; +            s->init4 = val & 1; +            s->single_mode = val & 2; +            if (val & 0x08) { +                qemu_log_mask(LOG_UNIMP, +                              "i8259: level sensitive irq not supported\n"); +            } +        } else if (val & 0x08) { +            if (val & 0x04) { +                s->poll = 1; +            } +            if (val & 0x02) { +                s->read_reg_select = val & 1; +            } +            if (val & 0x40) { +                s->special_mask = (val >> 5) & 1; +            } +        } else { +            cmd = val >> 5; +            switch (cmd) { +            case 0: +            case 4: +                s->rotate_on_auto_eoi = cmd >> 2; +                break; +            case 1: /* end of interrupt */ +            case 5: +                priority = get_priority(s, s->isr); +                if (priority != 8) { +                    irq = (priority + s->priority_add) & 7; +                    s->isr &= ~(1 << irq); +                    if (cmd == 5) { +                        s->priority_add = (irq + 1) & 7; +                    } +                    pic_update_irq(s); +                } +                break; +            case 3: +                irq = val & 7; +                s->isr &= ~(1 << irq); +                pic_update_irq(s); +                break; +            case 6: +                s->priority_add = (val + 1) & 7; +                pic_update_irq(s); +                break; +            case 7: +                irq = val & 7; +                s->isr &= ~(1 << irq); +                s->priority_add = (irq + 1) & 7; +                pic_update_irq(s); +                break; +            default: +                /* no operation */ +                break; +            } +        } +    } else { +        switch (s->init_state) { +        case 0: +            /* normal mode */ +            s->imr = val; +            pic_update_irq(s); +            break; +        case 1: +            s->irq_base = val & 0xf8; +            s->init_state = s->single_mode ? (s->init4 ? 3 : 0) : 2; +            break; +        case 2: +            if (s->init4) { +                s->init_state = 3; +            } else { +                s->init_state = 0; +            } +            break; +        case 3: +            s->special_fully_nested_mode = (val >> 4) & 1; +            s->auto_eoi = (val >> 1) & 1; +            s->init_state = 0; +            break; +        } +    } +} + +static uint64_t pic_ioport_read(void *opaque, hwaddr addr, +                                unsigned size) +{ +    PICCommonState *s = opaque; +    int ret; + +    if (s->poll) { +        ret = pic_get_irq(s); +        if (ret >= 0) { +            pic_intack(s, ret); +            ret |= 0x80; +        } else { +            ret = 0; +        } +        s->poll = 0; +    } else { +        if (addr == 0) { +            if (s->read_reg_select) { +                ret = s->isr; +            } else { +                ret = s->irr; +            } +        } else { +            ret = s->imr; +        } +    } +    DPRINTF("read: addr=0x%02" HWADDR_PRIx " val=0x%02x\n", addr, ret); +    return ret; +} + +int pic_get_output(DeviceState *d) +{ +    PICCommonState *s = PIC_COMMON(d); + +    return (pic_get_irq(s) >= 0); +} + +static void elcr_ioport_write(void *opaque, hwaddr addr, +                              uint64_t val, unsigned size) +{ +    PICCommonState *s = opaque; +    s->elcr = val & s->elcr_mask; +} + +static uint64_t elcr_ioport_read(void *opaque, hwaddr addr, +                                 unsigned size) +{ +    PICCommonState *s = opaque; +    return s->elcr; +} + +static const MemoryRegionOps pic_base_ioport_ops = { +    .read = pic_ioport_read, +    .write = pic_ioport_write, +    .impl = { +        .min_access_size = 1, +        .max_access_size = 1, +    }, +}; + +static const MemoryRegionOps pic_elcr_ioport_ops = { +    .read = elcr_ioport_read, +    .write = elcr_ioport_write, +    .impl = { +        .min_access_size = 1, +        .max_access_size = 1, +    }, +}; + +static void pic_realize(DeviceState *dev, Error **errp) +{ +    PICCommonState *s = PIC_COMMON(dev); +    PICClass *pc = PIC_GET_CLASS(dev); + +    memory_region_init_io(&s->base_io, OBJECT(s), &pic_base_ioport_ops, s, +                          "pic", 2); +    memory_region_init_io(&s->elcr_io, OBJECT(s), &pic_elcr_ioport_ops, s, +                          "elcr", 1); + +    qdev_init_gpio_out(dev, s->int_out, ARRAY_SIZE(s->int_out)); +    qdev_init_gpio_in(dev, pic_set_irq, 8); + +    pc->parent_realize(dev, errp); +} + +void hmp_info_pic(Monitor *mon, const QDict *qdict) +{ +    int i; +    PICCommonState *s; + +    if (!isa_pic) { +        return; +    } +    for (i = 0; i < 2; i++) { +        s = i == 0 ? PIC_COMMON(isa_pic) : slave_pic; +        monitor_printf(mon, "pic%d: irr=%02x imr=%02x isr=%02x hprio=%d " +                       "irq_base=%02x rr_sel=%d elcr=%02x fnm=%d\n", +                       i, s->irr, s->imr, s->isr, s->priority_add, +                       s->irq_base, s->read_reg_select, s->elcr, +                       s->special_fully_nested_mode); +    } +} + +void hmp_info_irq(Monitor *mon, const QDict *qdict) +{ +#ifndef DEBUG_IRQ_COUNT +    monitor_printf(mon, "irq statistic code not compiled.\n"); +#else +    int i; +    int64_t count; + +    monitor_printf(mon, "IRQ statistics:\n"); +    for (i = 0; i < 16; i++) { +        count = irq_count[i]; +        if (count > 0) { +            monitor_printf(mon, "%2d: %" PRId64 "\n", i, count); +        } +    } +#endif +} + +qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq) +{ +    qemu_irq *irq_set; +    DeviceState *dev; +    ISADevice *isadev; +    int i; + +    irq_set = g_new0(qemu_irq, ISA_NUM_IRQS); + +    isadev = i8259_init_chip(TYPE_I8259, bus, true); +    dev = DEVICE(isadev); + +    qdev_connect_gpio_out(dev, 0, parent_irq); +    for (i = 0 ; i < 8; i++) { +        irq_set[i] = qdev_get_gpio_in(dev, i); +    } + +    isa_pic = dev; + +    isadev = i8259_init_chip(TYPE_I8259, bus, false); +    dev = DEVICE(isadev); + +    qdev_connect_gpio_out(dev, 0, irq_set[2]); +    for (i = 0 ; i < 8; i++) { +        irq_set[i + 8] = qdev_get_gpio_in(dev, i); +    } + +    slave_pic = PIC_COMMON(dev); + +    return irq_set; +} + +static void i8259_class_init(ObjectClass *klass, void *data) +{ +    PICClass *k = PIC_CLASS(klass); +    DeviceClass *dc = DEVICE_CLASS(klass); + +    k->parent_realize = dc->realize; +    dc->realize = pic_realize; +    dc->reset = pic_reset; +} + +static const TypeInfo i8259_info = { +    .name       = TYPE_I8259, +    .instance_size = sizeof(PICCommonState), +    .parent     = TYPE_PIC_COMMON, +    .class_init = i8259_class_init, +    .class_size = sizeof(PICClass), +}; + +static void pic_register_types(void) +{ +    type_register_static(&i8259_info); +} + +type_init(pic_register_types) diff --git a/hw/intc/i8259_common.c b/hw/intc/i8259_common.c new file mode 100644 index 00000000..fbf26e55 --- /dev/null +++ b/hw/intc/i8259_common.c @@ -0,0 +1,162 @@ +/* + * QEMU 8259 - common bits of emulated and KVM kernel model + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2011      Jan Kiszka, Siemens AG + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "hw/i386/pc.h" +#include "hw/isa/i8259_internal.h" + +void pic_reset_common(PICCommonState *s) +{ +    s->last_irr = 0; +    s->irr &= s->elcr; +    s->imr = 0; +    s->isr = 0; +    s->priority_add = 0; +    s->irq_base = 0; +    s->read_reg_select = 0; +    s->poll = 0; +    s->special_mask = 0; +    s->init_state = 0; +    s->auto_eoi = 0; +    s->rotate_on_auto_eoi = 0; +    s->special_fully_nested_mode = 0; +    s->init4 = 0; +    s->single_mode = 0; +    /* Note: ELCR is not reset */ +} + +static void pic_dispatch_pre_save(void *opaque) +{ +    PICCommonState *s = opaque; +    PICCommonClass *info = PIC_COMMON_GET_CLASS(s); + +    if (info->pre_save) { +        info->pre_save(s); +    } +} + +static int pic_dispatch_post_load(void *opaque, int version_id) +{ +    PICCommonState *s = opaque; +    PICCommonClass *info = PIC_COMMON_GET_CLASS(s); + +    if (info->post_load) { +        info->post_load(s); +    } +    return 0; +} + +static void pic_common_realize(DeviceState *dev, Error **errp) +{ +    PICCommonState *s = PIC_COMMON(dev); + +    isa_register_ioport(NULL, &s->base_io, s->iobase); +    if (s->elcr_addr != -1) { +        isa_register_ioport(NULL, &s->elcr_io, s->elcr_addr); +    } + +    qdev_set_legacy_instance_id(dev, s->iobase, 1); +} + +ISADevice *i8259_init_chip(const char *name, ISABus *bus, bool master) +{ +    DeviceState *dev; +    ISADevice *isadev; + +    isadev = isa_create(bus, name); +    dev = DEVICE(isadev); +    qdev_prop_set_uint32(dev, "iobase", master ? 0x20 : 0xa0); +    qdev_prop_set_uint32(dev, "elcr_addr", master ? 0x4d0 : 0x4d1); +    qdev_prop_set_uint8(dev, "elcr_mask", master ? 0xf8 : 0xde); +    qdev_prop_set_bit(dev, "master", master); +    qdev_init_nofail(dev); + +    return isadev; +} + +static const VMStateDescription vmstate_pic_common = { +    .name = "i8259", +    .version_id = 1, +    .minimum_version_id = 1, +    .pre_save = pic_dispatch_pre_save, +    .post_load = pic_dispatch_post_load, +    .fields = (VMStateField[]) { +        VMSTATE_UINT8(last_irr, PICCommonState), +        VMSTATE_UINT8(irr, PICCommonState), +        VMSTATE_UINT8(imr, PICCommonState), +        VMSTATE_UINT8(isr, PICCommonState), +        VMSTATE_UINT8(priority_add, PICCommonState), +        VMSTATE_UINT8(irq_base, PICCommonState), +        VMSTATE_UINT8(read_reg_select, PICCommonState), +        VMSTATE_UINT8(poll, PICCommonState), +        VMSTATE_UINT8(special_mask, PICCommonState), +        VMSTATE_UINT8(init_state, PICCommonState), +        VMSTATE_UINT8(auto_eoi, PICCommonState), +        VMSTATE_UINT8(rotate_on_auto_eoi, PICCommonState), +        VMSTATE_UINT8(special_fully_nested_mode, PICCommonState), +        VMSTATE_UINT8(init4, PICCommonState), +        VMSTATE_UINT8(single_mode, PICCommonState), +        VMSTATE_UINT8(elcr, PICCommonState), +        VMSTATE_END_OF_LIST() +    } +}; + +static Property pic_properties_common[] = { +    DEFINE_PROP_UINT32("iobase", PICCommonState, iobase,  -1), +    DEFINE_PROP_UINT32("elcr_addr", PICCommonState, elcr_addr,  -1), +    DEFINE_PROP_UINT8("elcr_mask", PICCommonState, elcr_mask,  -1), +    DEFINE_PROP_BIT("master", PICCommonState, master,  0, false), +    DEFINE_PROP_END_OF_LIST(), +}; + +static void pic_common_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); + +    dc->vmsd = &vmstate_pic_common; +    dc->props = pic_properties_common; +    dc->realize = pic_common_realize; +    /* +     * Reason: unlike ordinary ISA devices, the PICs need additional +     * wiring: its IRQ input lines are set up by board code, and the +     * wiring of the slave to the master is hard-coded in device model +     * code. +     */ +    dc->cannot_instantiate_with_device_add_yet = true; +} + +static const TypeInfo pic_common_type = { +    .name = TYPE_PIC_COMMON, +    .parent = TYPE_ISA_DEVICE, +    .instance_size = sizeof(PICCommonState), +    .class_size = sizeof(PICCommonClass), +    .class_init = pic_common_class_init, +    .abstract = true, +}; + +static void pic_common_register_types(void) +{ +    type_register_static(&pic_common_type); +} + +type_init(pic_common_register_types) diff --git a/hw/intc/imx_avic.c b/hw/intc/imx_avic.c new file mode 100644 index 00000000..e48f66c8 --- /dev/null +++ b/hw/intc/imx_avic.c @@ -0,0 +1,406 @@ +/* + * i.MX31 Vectored Interrupt Controller + * + * Note this is NOT the PL192 provided by ARM, but + * a custom implementation by Freescale. + * + * Copyright (c) 2008 OKL + * Copyright (c) 2011 NICTA Pty Ltd + * Originally written by Hans Jiang + * + * This code is licensed under the GPL version 2 or later.  See + * the COPYING file in the top-level directory. + * + * TODO: implement vectors. + */ + +#include "hw/hw.h" +#include "hw/sysbus.h" +#include "qemu/host-utils.h" + +#define DEBUG_INT 1 +#undef DEBUG_INT /* comment out for debugging */ + +#ifdef DEBUG_INT +#define DPRINTF(fmt, args...) \ +do { printf("imx_avic: " fmt , ##args); } while (0) +#else +#define DPRINTF(fmt, args...) do {} while (0) +#endif + +/* + * Define to 1 for messages about attempts to + * access unimplemented registers or similar. + */ +#define DEBUG_IMPLEMENTATION 1 +#if DEBUG_IMPLEMENTATION +#  define IPRINTF(fmt, args...) \ +    do  { fprintf(stderr, "imx_avic: " fmt, ##args); } while (0) +#else +#  define IPRINTF(fmt, args...) do {} while (0) +#endif + +#define IMX_AVIC_NUM_IRQS 64 + +/* Interrupt Control Bits */ +#define ABFLAG (1<<25) +#define ABFEN (1<<24) +#define NIDIS (1<<22) /* Normal Interrupt disable */ +#define FIDIS (1<<21) /* Fast interrupt disable */ +#define NIAD  (1<<20) /* Normal Interrupt Arbiter Rise ARM level */ +#define FIAD  (1<<19) /* Fast Interrupt Arbiter Rise ARM level */ +#define NM    (1<<18) /* Normal interrupt mode */ + + +#define PRIO_PER_WORD (sizeof(uint32_t) * 8 / 4) +#define PRIO_WORDS (IMX_AVIC_NUM_IRQS/PRIO_PER_WORD) + +#define TYPE_IMX_AVIC "imx_avic" +#define IMX_AVIC(obj) \ +    OBJECT_CHECK(IMXAVICState, (obj), TYPE_IMX_AVIC) + +typedef struct IMXAVICState { +    SysBusDevice parent_obj; + +    MemoryRegion iomem; +    uint64_t pending; +    uint64_t enabled; +    uint64_t is_fiq; +    uint32_t intcntl; +    uint32_t intmask; +    qemu_irq irq; +    qemu_irq fiq; +    uint32_t prio[PRIO_WORDS]; /* Priorities are 4-bits each */ +} IMXAVICState; + +static const VMStateDescription vmstate_imx_avic = { +    .name = "imx-avic", +    .version_id = 1, +    .minimum_version_id = 1, +    .fields = (VMStateField[]) { +        VMSTATE_UINT64(pending, IMXAVICState), +        VMSTATE_UINT64(enabled, IMXAVICState), +        VMSTATE_UINT64(is_fiq, IMXAVICState), +        VMSTATE_UINT32(intcntl, IMXAVICState), +        VMSTATE_UINT32(intmask, IMXAVICState), +        VMSTATE_UINT32_ARRAY(prio, IMXAVICState, PRIO_WORDS), +        VMSTATE_END_OF_LIST() +    }, +}; + + + +static inline int imx_avic_prio(IMXAVICState *s, int irq) +{ +    uint32_t word = irq / PRIO_PER_WORD; +    uint32_t part = 4 * (irq % PRIO_PER_WORD); +    return 0xf & (s->prio[word] >> part); +} + +/* Update interrupts.  */ +static void imx_avic_update(IMXAVICState *s) +{ +    int i; +    uint64_t new = s->pending & s->enabled; +    uint64_t flags; + +    flags = new & s->is_fiq; +    qemu_set_irq(s->fiq, !!flags); + +    flags = new & ~s->is_fiq; +    if (!flags || (s->intmask == 0x1f)) { +        qemu_set_irq(s->irq, !!flags); +        return; +    } + +    /* +     * Take interrupt if there's a pending interrupt with +     * priority higher than the value of intmask +     */ +    for (i = 0; i < IMX_AVIC_NUM_IRQS; i++) { +        if (flags & (1UL << i)) { +            if (imx_avic_prio(s, i) > s->intmask) { +                qemu_set_irq(s->irq, 1); +                return; +            } +        } +    } +    qemu_set_irq(s->irq, 0); +} + +static void imx_avic_set_irq(void *opaque, int irq, int level) +{ +    IMXAVICState *s = (IMXAVICState *)opaque; + +    if (level) { +        DPRINTF("Raising IRQ %d, prio %d\n", +                irq, imx_avic_prio(s, irq)); +        s->pending |= (1ULL << irq); +    } else { +        DPRINTF("Clearing IRQ %d, prio %d\n", +                irq, imx_avic_prio(s, irq)); +        s->pending &= ~(1ULL << irq); +    } + +    imx_avic_update(s); +} + + +static uint64_t imx_avic_read(void *opaque, +                             hwaddr offset, unsigned size) +{ +    IMXAVICState *s = (IMXAVICState *)opaque; + + +    DPRINTF("read(offset = 0x%x)\n", offset >> 2); +    switch (offset >> 2) { +    case 0: /* INTCNTL */ +        return s->intcntl; + +    case 1: /* Normal Interrupt Mask Register, NIMASK */ +        return s->intmask; + +    case 2: /* Interrupt Enable Number Register, INTENNUM */ +    case 3: /* Interrupt Disable Number Register, INTDISNUM */ +        return 0; + +    case 4: /* Interrupt Enabled Number Register High */ +        return s->enabled >> 32; + +    case 5: /* Interrupt Enabled Number Register Low */ +        return s->enabled & 0xffffffffULL; + +    case 6: /* Interrupt Type Register High */ +        return s->is_fiq >> 32; + +    case 7: /* Interrupt Type Register Low */ +        return s->is_fiq & 0xffffffffULL; + +    case 8: /* Normal Interrupt Priority Register 7 */ +    case 9: /* Normal Interrupt Priority Register 6 */ +    case 10:/* Normal Interrupt Priority Register 5 */ +    case 11:/* Normal Interrupt Priority Register 4 */ +    case 12:/* Normal Interrupt Priority Register 3 */ +    case 13:/* Normal Interrupt Priority Register 2 */ +    case 14:/* Normal Interrupt Priority Register 1 */ +    case 15:/* Normal Interrupt Priority Register 0 */ +        return s->prio[15-(offset>>2)]; + +    case 16: /* Normal interrupt vector and status register */ +    { +        /* +         * This returns the highest priority +         * outstanding interrupt.  Where there is more than +         * one pending IRQ with the same priority, +         * take the highest numbered one. +         */ +        uint64_t flags = s->pending & s->enabled & ~s->is_fiq; +        int i; +        int prio = -1; +        int irq = -1; +        for (i = 63; i >= 0; --i) { +            if (flags & (1ULL<<i)) { +                int irq_prio = imx_avic_prio(s, i); +                if (irq_prio > prio) { +                    irq = i; +                    prio = irq_prio; +                } +            } +        } +        if (irq >= 0) { +            imx_avic_set_irq(s, irq, 0); +            return irq << 16 | prio; +        } +        return 0xffffffffULL; +    } +    case 17:/* Fast Interrupt vector and status register */ +    { +        uint64_t flags = s->pending & s->enabled & s->is_fiq; +        int i = ctz64(flags); +        if (i < 64) { +            imx_avic_set_irq(opaque, i, 0); +            return i; +        } +        return 0xffffffffULL; +    } +    case 18:/* Interrupt source register high */ +        return s->pending >> 32; + +    case 19:/* Interrupt source register low */ +        return s->pending & 0xffffffffULL; + +    case 20:/* Interrupt Force Register high */ +    case 21:/* Interrupt Force Register low */ +        return 0; + +    case 22:/* Normal Interrupt Pending Register High */ +        return (s->pending & s->enabled & ~s->is_fiq) >> 32; + +    case 23:/* Normal Interrupt Pending Register Low */ +        return (s->pending & s->enabled & ~s->is_fiq) & 0xffffffffULL; + +    case 24: /* Fast Interrupt Pending Register High  */ +        return (s->pending & s->enabled & s->is_fiq) >> 32; + +    case 25: /* Fast Interrupt Pending Register Low  */ +        return (s->pending & s->enabled & s->is_fiq) & 0xffffffffULL; + +    case 0x40:            /* AVIC vector 0, use for WFI WAR */ +        return 0x4; + +    default: +        IPRINTF("imx_avic_read: Bad offset 0x%x\n", (int)offset); +        return 0; +    } +} + +static void imx_avic_write(void *opaque, hwaddr offset, +                          uint64_t val, unsigned size) +{ +    IMXAVICState *s = (IMXAVICState *)opaque; + +    /* Vector Registers not yet supported */ +    if (offset >= 0x100 && offset <= 0x2fc) { +        IPRINTF("imx_avic_write to vector register %d ignored\n", +                (unsigned int)((offset - 0x100) >> 2)); +        return; +    } + +    DPRINTF("imx_avic_write(0x%x) = %x\n", +            (unsigned int)offset>>2, (unsigned int)val); +    switch (offset >> 2) { +    case 0: /* Interrupt Control Register, INTCNTL */ +        s->intcntl = val & (ABFEN | NIDIS | FIDIS | NIAD | FIAD | NM); +        if (s->intcntl & ABFEN) { +            s->intcntl &= ~(val & ABFLAG); +        } +        break; + +    case 1: /* Normal Interrupt Mask Register, NIMASK */ +        s->intmask = val & 0x1f; +        break; + +    case 2: /* Interrupt Enable Number Register, INTENNUM */ +        DPRINTF("enable(%d)\n", (int)val); +        val &= 0x3f; +        s->enabled |= (1ULL << val); +        break; + +    case 3: /* Interrupt Disable Number Register, INTDISNUM */ +        DPRINTF("disable(%d)\n", (int)val); +        val &= 0x3f; +        s->enabled &= ~(1ULL << val); +        break; + +    case 4: /* Interrupt Enable Number Register High */ +        s->enabled = (s->enabled & 0xffffffffULL) | (val << 32); +        break; + +    case 5: /* Interrupt Enable Number Register Low */ +        s->enabled = (s->enabled & 0xffffffff00000000ULL) | val; +        break; + +    case 6: /* Interrupt Type Register High */ +        s->is_fiq = (s->is_fiq & 0xffffffffULL) | (val << 32); +        break; + +    case 7: /* Interrupt Type Register Low */ +        s->is_fiq = (s->is_fiq & 0xffffffff00000000ULL) | val; +        break; + +    case 8: /* Normal Interrupt Priority Register 7 */ +    case 9: /* Normal Interrupt Priority Register 6 */ +    case 10:/* Normal Interrupt Priority Register 5 */ +    case 11:/* Normal Interrupt Priority Register 4 */ +    case 12:/* Normal Interrupt Priority Register 3 */ +    case 13:/* Normal Interrupt Priority Register 2 */ +    case 14:/* Normal Interrupt Priority Register 1 */ +    case 15:/* Normal Interrupt Priority Register 0 */ +        s->prio[15-(offset>>2)] = val; +        break; + +        /* Read-only registers, writes ignored */ +    case 16:/* Normal Interrupt Vector and Status register */ +    case 17:/* Fast Interrupt vector and status register */ +    case 18:/* Interrupt source register high */ +    case 19:/* Interrupt source register low */ +        return; + +    case 20:/* Interrupt Force Register high */ +        s->pending = (s->pending & 0xffffffffULL) | (val << 32); +        break; + +    case 21:/* Interrupt Force Register low */ +        s->pending = (s->pending & 0xffffffff00000000ULL) | val; +        break; + +    case 22:/* Normal Interrupt Pending Register High */ +    case 23:/* Normal Interrupt Pending Register Low */ +    case 24: /* Fast Interrupt Pending Register High  */ +    case 25: /* Fast Interrupt Pending Register Low  */ +        return; + +    default: +        IPRINTF("imx_avic_write: Bad offset %x\n", (int)offset); +    } +    imx_avic_update(s); +} + +static const MemoryRegionOps imx_avic_ops = { +    .read = imx_avic_read, +    .write = imx_avic_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void imx_avic_reset(DeviceState *dev) +{ +    IMXAVICState *s = IMX_AVIC(dev); + +    s->pending = 0; +    s->enabled = 0; +    s->is_fiq = 0; +    s->intmask = 0x1f; +    s->intcntl = 0; +    memset(s->prio, 0, sizeof s->prio); +} + +static int imx_avic_init(SysBusDevice *sbd) +{ +    DeviceState *dev = DEVICE(sbd); +    IMXAVICState *s = IMX_AVIC(dev); + +    memory_region_init_io(&s->iomem, OBJECT(s), &imx_avic_ops, s, +                          "imx_avic", 0x1000); +    sysbus_init_mmio(sbd, &s->iomem); + +    qdev_init_gpio_in(dev, imx_avic_set_irq, IMX_AVIC_NUM_IRQS); +    sysbus_init_irq(sbd, &s->irq); +    sysbus_init_irq(sbd, &s->fiq); + +    return 0; +} + + +static void imx_avic_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); +    k->init = imx_avic_init; +    dc->vmsd = &vmstate_imx_avic; +    dc->reset = imx_avic_reset; +    dc->desc = "i.MX Advanced Vector Interrupt Controller"; +} + +static const TypeInfo imx_avic_info = { +    .name = TYPE_IMX_AVIC, +    .parent = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(IMXAVICState), +    .class_init = imx_avic_class_init, +}; + +static void imx_avic_register_types(void) +{ +    type_register_static(&imx_avic_info); +} + +type_init(imx_avic_register_types) diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c new file mode 100644 index 00000000..b5279323 --- /dev/null +++ b/hw/intc/ioapic.c @@ -0,0 +1,261 @@ +/* + *  ioapic.c IOAPIC emulation logic + * + *  Copyright (c) 2004-2005 Fabrice Bellard + * + *  Split the ioapic logic from apic.c + *  Xiantao Zhang <xiantao.zhang@intel.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "hw/hw.h" +#include "hw/i386/pc.h" +#include "hw/i386/ioapic.h" +#include "hw/i386/ioapic_internal.h" + +//#define DEBUG_IOAPIC + +#ifdef DEBUG_IOAPIC +#define DPRINTF(fmt, ...)                                       \ +    do { printf("ioapic: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) +#endif + +static IOAPICCommonState *ioapics[MAX_IOAPICS]; + +/* global variable from ioapic_common.c */ +extern int ioapic_no; + +static void ioapic_service(IOAPICCommonState *s) +{ +    uint8_t i; +    uint8_t trig_mode; +    uint8_t vector; +    uint8_t delivery_mode; +    uint32_t mask; +    uint64_t entry; +    uint8_t dest; +    uint8_t dest_mode; + +    for (i = 0; i < IOAPIC_NUM_PINS; i++) { +        mask = 1 << i; +        if (s->irr & mask) { +            entry = s->ioredtbl[i]; +            if (!(entry & IOAPIC_LVT_MASKED)) { +                trig_mode = ((entry >> IOAPIC_LVT_TRIGGER_MODE_SHIFT) & 1); +                dest = entry >> IOAPIC_LVT_DEST_SHIFT; +                dest_mode = (entry >> IOAPIC_LVT_DEST_MODE_SHIFT) & 1; +                delivery_mode = +                    (entry >> IOAPIC_LVT_DELIV_MODE_SHIFT) & IOAPIC_DM_MASK; +                if (trig_mode == IOAPIC_TRIGGER_EDGE) { +                    s->irr &= ~mask; +                } else { +                    s->ioredtbl[i] |= IOAPIC_LVT_REMOTE_IRR; +                } +                if (delivery_mode == IOAPIC_DM_EXTINT) { +                    vector = pic_read_irq(isa_pic); +                } else { +                    vector = entry & IOAPIC_VECTOR_MASK; +                } +                apic_deliver_irq(dest, dest_mode, delivery_mode, +                                 vector, trig_mode); +            } +        } +    } +} + +static void ioapic_set_irq(void *opaque, int vector, int level) +{ +    IOAPICCommonState *s = opaque; + +    /* ISA IRQs map to GSI 1-1 except for IRQ0 which maps +     * to GSI 2.  GSI maps to ioapic 1-1.  This is not +     * the cleanest way of doing it but it should work. */ + +    DPRINTF("%s: %s vec %x\n", __func__, level ? "raise" : "lower", vector); +    if (vector == 0) { +        vector = 2; +    } +    if (vector >= 0 && vector < IOAPIC_NUM_PINS) { +        uint32_t mask = 1 << vector; +        uint64_t entry = s->ioredtbl[vector]; + +        if (((entry >> IOAPIC_LVT_TRIGGER_MODE_SHIFT) & 1) == +            IOAPIC_TRIGGER_LEVEL) { +            /* level triggered */ +            if (level) { +                s->irr |= mask; +                ioapic_service(s); +            } else { +                s->irr &= ~mask; +            } +        } else { +            /* According to the 82093AA manual, we must ignore edge requests +             * if the input pin is masked. */ +            if (level && !(entry & IOAPIC_LVT_MASKED)) { +                s->irr |= mask; +                ioapic_service(s); +            } +        } +    } +} + +void ioapic_eoi_broadcast(int vector) +{ +    IOAPICCommonState *s; +    uint64_t entry; +    int i, n; + +    for (i = 0; i < MAX_IOAPICS; i++) { +        s = ioapics[i]; +        if (!s) { +            continue; +        } +        for (n = 0; n < IOAPIC_NUM_PINS; n++) { +            entry = s->ioredtbl[n]; +            if ((entry & IOAPIC_LVT_REMOTE_IRR) +                && (entry & IOAPIC_VECTOR_MASK) == vector) { +                s->ioredtbl[n] = entry & ~IOAPIC_LVT_REMOTE_IRR; +                if (!(entry & IOAPIC_LVT_MASKED) && (s->irr & (1 << n))) { +                    ioapic_service(s); +                } +            } +        } +    } +} + +static uint64_t +ioapic_mem_read(void *opaque, hwaddr addr, unsigned int size) +{ +    IOAPICCommonState *s = opaque; +    int index; +    uint32_t val = 0; + +    switch (addr & 0xff) { +    case IOAPIC_IOREGSEL: +        val = s->ioregsel; +        break; +    case IOAPIC_IOWIN: +        if (size != 4) { +            break; +        } +        switch (s->ioregsel) { +        case IOAPIC_REG_ID: +            val = s->id << IOAPIC_ID_SHIFT; +            break; +        case IOAPIC_REG_VER: +            val = IOAPIC_VERSION | +                ((IOAPIC_NUM_PINS - 1) << IOAPIC_VER_ENTRIES_SHIFT); +            break; +        case IOAPIC_REG_ARB: +            val = 0; +            break; +        default: +            index = (s->ioregsel - IOAPIC_REG_REDTBL_BASE) >> 1; +            if (index >= 0 && index < IOAPIC_NUM_PINS) { +                if (s->ioregsel & 1) { +                    val = s->ioredtbl[index] >> 32; +                } else { +                    val = s->ioredtbl[index] & 0xffffffff; +                } +            } +        } +        DPRINTF("read: %08x = %08x\n", s->ioregsel, val); +        break; +    } +    return val; +} + +static void +ioapic_mem_write(void *opaque, hwaddr addr, uint64_t val, +                 unsigned int size) +{ +    IOAPICCommonState *s = opaque; +    int index; + +    switch (addr & 0xff) { +    case IOAPIC_IOREGSEL: +        s->ioregsel = val; +        break; +    case IOAPIC_IOWIN: +        if (size != 4) { +            break; +        } +        DPRINTF("write: %08x = %08" PRIx64 "\n", s->ioregsel, val); +        switch (s->ioregsel) { +        case IOAPIC_REG_ID: +            s->id = (val >> IOAPIC_ID_SHIFT) & IOAPIC_ID_MASK; +            break; +        case IOAPIC_REG_VER: +        case IOAPIC_REG_ARB: +            break; +        default: +            index = (s->ioregsel - IOAPIC_REG_REDTBL_BASE) >> 1; +            if (index >= 0 && index < IOAPIC_NUM_PINS) { +                if (s->ioregsel & 1) { +                    s->ioredtbl[index] &= 0xffffffff; +                    s->ioredtbl[index] |= (uint64_t)val << 32; +                } else { +                    s->ioredtbl[index] &= ~0xffffffffULL; +                    s->ioredtbl[index] |= val; +                } +                ioapic_service(s); +            } +        } +        break; +    } +} + +static const MemoryRegionOps ioapic_io_ops = { +    .read = ioapic_mem_read, +    .write = ioapic_mem_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void ioapic_realize(DeviceState *dev, Error **errp) +{ +    IOAPICCommonState *s = IOAPIC_COMMON(dev); + +    memory_region_init_io(&s->io_memory, OBJECT(s), &ioapic_io_ops, s, +                          "ioapic", 0x1000); + +    qdev_init_gpio_in(dev, ioapic_set_irq, IOAPIC_NUM_PINS); + +    ioapics[ioapic_no] = s; +} + +static void ioapic_class_init(ObjectClass *klass, void *data) +{ +    IOAPICCommonClass *k = IOAPIC_COMMON_CLASS(klass); +    DeviceClass *dc = DEVICE_CLASS(klass); + +    k->realize = ioapic_realize; +    dc->reset = ioapic_reset_common; +} + +static const TypeInfo ioapic_info = { +    .name          = "ioapic", +    .parent        = TYPE_IOAPIC_COMMON, +    .instance_size = sizeof(IOAPICCommonState), +    .class_init    = ioapic_class_init, +}; + +static void ioapic_register_types(void) +{ +    type_register_static(&ioapic_info); +} + +type_init(ioapic_register_types) diff --git a/hw/intc/ioapic_common.c b/hw/intc/ioapic_common.c new file mode 100644 index 00000000..8b7d1180 --- /dev/null +++ b/hw/intc/ioapic_common.c @@ -0,0 +1,123 @@ +/* + *  IOAPIC emulation logic - common bits of emulated and KVM kernel model + * + *  Copyright (c) 2004-2005 Fabrice Bellard + *  Copyright (c) 2009      Xiantao Zhang, Intel + *  Copyright (c) 2011      Jan Kiszka, Siemens AG + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "hw/i386/ioapic.h" +#include "hw/i386/ioapic_internal.h" +#include "hw/sysbus.h" + +/* ioapic_no count start from 0 to MAX_IOAPICS, + * remove as static variable from ioapic_common_init. + * now as a global variable, let child to increase the counter + * then we can drop the 'instance_no' argument + * and convert to our QOM's realize function + */ +int ioapic_no; + +void ioapic_reset_common(DeviceState *dev) +{ +    IOAPICCommonState *s = IOAPIC_COMMON(dev); +    int i; + +    s->id = 0; +    s->ioregsel = 0; +    s->irr = 0; +    for (i = 0; i < IOAPIC_NUM_PINS; i++) { +        s->ioredtbl[i] = 1 << IOAPIC_LVT_MASKED_SHIFT; +    } +} + +static void ioapic_dispatch_pre_save(void *opaque) +{ +    IOAPICCommonState *s = IOAPIC_COMMON(opaque); +    IOAPICCommonClass *info = IOAPIC_COMMON_GET_CLASS(s); + +    if (info->pre_save) { +        info->pre_save(s); +    } +} + +static int ioapic_dispatch_post_load(void *opaque, int version_id) +{ +    IOAPICCommonState *s = IOAPIC_COMMON(opaque); +    IOAPICCommonClass *info = IOAPIC_COMMON_GET_CLASS(s); + +    if (info->post_load) { +        info->post_load(s); +    } +    return 0; +} + +static void ioapic_common_realize(DeviceState *dev, Error **errp) +{ +    IOAPICCommonState *s = IOAPIC_COMMON(dev); +    IOAPICCommonClass *info; + +    if (ioapic_no >= MAX_IOAPICS) { +        error_setg(errp, "Only %d ioapics allowed", MAX_IOAPICS); +        return; +    } + +    info = IOAPIC_COMMON_GET_CLASS(s); +    info->realize(dev, errp); + +    sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->io_memory); +    ioapic_no++; +} + +static const VMStateDescription vmstate_ioapic_common = { +    .name = "ioapic", +    .version_id = 3, +    .minimum_version_id = 1, +    .pre_save = ioapic_dispatch_pre_save, +    .post_load = ioapic_dispatch_post_load, +    .fields = (VMStateField[]) { +        VMSTATE_UINT8(id, IOAPICCommonState), +        VMSTATE_UINT8(ioregsel, IOAPICCommonState), +        VMSTATE_UNUSED_V(2, 8), /* to account for qemu-kvm's v2 format */ +        VMSTATE_UINT32_V(irr, IOAPICCommonState, 2), +        VMSTATE_UINT64_ARRAY(ioredtbl, IOAPICCommonState, IOAPIC_NUM_PINS), +        VMSTATE_END_OF_LIST() +    } +}; + +static void ioapic_common_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); + +    dc->realize = ioapic_common_realize; +    dc->vmsd = &vmstate_ioapic_common; +} + +static const TypeInfo ioapic_common_type = { +    .name = TYPE_IOAPIC_COMMON, +    .parent = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(IOAPICCommonState), +    .class_size = sizeof(IOAPICCommonClass), +    .class_init = ioapic_common_class_init, +    .abstract = true, +}; + +static void ioapic_common_register_types(void) +{ +    type_register_static(&ioapic_common_type); +} + +type_init(ioapic_common_register_types) diff --git a/hw/intc/lm32_pic.c b/hw/intc/lm32_pic.c new file mode 100644 index 00000000..641ee472 --- /dev/null +++ b/hw/intc/lm32_pic.c @@ -0,0 +1,203 @@ +/* + *  LatticeMico32 CPU interrupt controller logic. + * + *  Copyright (c) 2010 Michael Walle <michael@walle.cc> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <assert.h> + +#include "hw/hw.h" +#include "hw/i386/pc.h" +#include "monitor/monitor.h" +#include "hw/sysbus.h" +#include "trace.h" +#include "hw/lm32/lm32_pic.h" + +#define TYPE_LM32_PIC "lm32-pic" +#define LM32_PIC(obj) OBJECT_CHECK(LM32PicState, (obj), TYPE_LM32_PIC) + +struct LM32PicState { +    SysBusDevice parent_obj; + +    qemu_irq parent_irq; +    uint32_t im;        /* interrupt mask */ +    uint32_t ip;        /* interrupt pending */ +    uint32_t irq_state; + +    /* statistics */ +    uint32_t stats_irq_count[32]; +}; +typedef struct LM32PicState LM32PicState; + +static LM32PicState *pic; +void lm32_hmp_info_pic(Monitor *mon, const QDict *qdict) +{ +    if (pic == NULL) { +        return; +    } + +    monitor_printf(mon, "lm32-pic: im=%08x ip=%08x irq_state=%08x\n", +            pic->im, pic->ip, pic->irq_state); +} + +void lm32_hmp_info_irq(Monitor *mon, const QDict *qdict) +{ +    int i; +    uint32_t count; + +    if (pic == NULL) { +        return; +    } + +    monitor_printf(mon, "IRQ statistics:\n"); +    for (i = 0; i < 32; i++) { +        count = pic->stats_irq_count[i]; +        if (count > 0) { +            monitor_printf(mon, "%2d: %u\n", i, count); +        } +    } +} + +static void update_irq(LM32PicState *s) +{ +    s->ip |= s->irq_state; + +    if (s->ip & s->im) { +        trace_lm32_pic_raise_irq(); +        qemu_irq_raise(s->parent_irq); +    } else { +        trace_lm32_pic_lower_irq(); +        qemu_irq_lower(s->parent_irq); +    } +} + +static void irq_handler(void *opaque, int irq, int level) +{ +    LM32PicState *s = opaque; + +    assert(irq < 32); +    trace_lm32_pic_interrupt(irq, level); + +    if (level) { +        s->irq_state |= (1 << irq); +        s->stats_irq_count[irq]++; +    } else { +        s->irq_state &= ~(1 << irq); +    } + +    update_irq(s); +} + +void lm32_pic_set_im(DeviceState *d, uint32_t im) +{ +    LM32PicState *s = LM32_PIC(d); + +    trace_lm32_pic_set_im(im); +    s->im = im; + +    update_irq(s); +} + +void lm32_pic_set_ip(DeviceState *d, uint32_t ip) +{ +    LM32PicState *s = LM32_PIC(d); + +    trace_lm32_pic_set_ip(ip); + +    /* ack interrupt */ +    s->ip &= ~ip; + +    update_irq(s); +} + +uint32_t lm32_pic_get_im(DeviceState *d) +{ +    LM32PicState *s = LM32_PIC(d); + +    trace_lm32_pic_get_im(s->im); +    return s->im; +} + +uint32_t lm32_pic_get_ip(DeviceState *d) +{ +    LM32PicState *s = LM32_PIC(d); + +    trace_lm32_pic_get_ip(s->ip); +    return s->ip; +} + +static void pic_reset(DeviceState *d) +{ +    LM32PicState *s = LM32_PIC(d); +    int i; + +    s->im = 0; +    s->ip = 0; +    s->irq_state = 0; +    for (i = 0; i < 32; i++) { +        s->stats_irq_count[i] = 0; +    } +} + +static int lm32_pic_init(SysBusDevice *sbd) +{ +    DeviceState *dev = DEVICE(sbd); +    LM32PicState *s = LM32_PIC(dev); + +    qdev_init_gpio_in(dev, irq_handler, 32); +    sysbus_init_irq(sbd, &s->parent_irq); + +    pic = s; + +    return 0; +} + +static const VMStateDescription vmstate_lm32_pic = { +    .name = "lm32-pic", +    .version_id = 1, +    .minimum_version_id = 1, +    .fields = (VMStateField[]) { +        VMSTATE_UINT32(im, LM32PicState), +        VMSTATE_UINT32(ip, LM32PicState), +        VMSTATE_UINT32(irq_state, LM32PicState), +        VMSTATE_UINT32_ARRAY(stats_irq_count, LM32PicState, 32), +        VMSTATE_END_OF_LIST() +    } +}; + +static void lm32_pic_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + +    k->init = lm32_pic_init; +    dc->reset = pic_reset; +    dc->vmsd = &vmstate_lm32_pic; +} + +static const TypeInfo lm32_pic_info = { +    .name          = TYPE_LM32_PIC, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(LM32PicState), +    .class_init    = lm32_pic_class_init, +}; + +static void lm32_pic_register_types(void) +{ +    type_register_static(&lm32_pic_info); +} + +type_init(lm32_pic_register_types) diff --git a/hw/intc/omap_intc.c b/hw/intc/omap_intc.c new file mode 100644 index 00000000..e9b38a3c --- /dev/null +++ b/hw/intc/omap_intc.c @@ -0,0 +1,667 @@ +/* + * TI OMAP interrupt controller emulation. + * + * Copyright (C) 2006-2008 Andrzej Zaborowski  <balrog@zabor.org> + * Copyright (C) 2007-2008 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + */ +#include "hw/hw.h" +#include "hw/arm/omap.h" +#include "hw/sysbus.h" + +/* Interrupt Handlers */ +struct omap_intr_handler_bank_s { +    uint32_t irqs; +    uint32_t inputs; +    uint32_t mask; +    uint32_t fiq; +    uint32_t sens_edge; +    uint32_t swi; +    unsigned char priority[32]; +}; + +#define TYPE_OMAP_INTC "common-omap-intc" +#define OMAP_INTC(obj) \ +    OBJECT_CHECK(struct omap_intr_handler_s, (obj), TYPE_OMAP_INTC) + +struct omap_intr_handler_s { +    SysBusDevice parent_obj; + +    qemu_irq *pins; +    qemu_irq parent_intr[2]; +    MemoryRegion mmio; +    void *iclk; +    void *fclk; +    unsigned char nbanks; +    int level_only; +    uint32_t size; + +    uint8_t revision; + +    /* state */ +    uint32_t new_agr[2]; +    int sir_intr[2]; +    int autoidle; +    uint32_t mask; +    struct omap_intr_handler_bank_s bank[3]; +}; + +static void omap_inth_sir_update(struct omap_intr_handler_s *s, int is_fiq) +{ +    int i, j, sir_intr, p_intr, p; +    uint32_t level; +    sir_intr = 0; +    p_intr = 255; + +    /* Find the interrupt line with the highest dynamic priority. +     * Note: 0 denotes the hightest priority. +     * If all interrupts have the same priority, the default order is IRQ_N, +     * IRQ_N-1,...,IRQ_0. */ +    for (j = 0; j < s->nbanks; ++j) { +        level = s->bank[j].irqs & ~s->bank[j].mask & +                (is_fiq ? s->bank[j].fiq : ~s->bank[j].fiq); + +        while (level != 0) { +            i = ctz32(level); +            p = s->bank[j].priority[i]; +            if (p <= p_intr) { +                p_intr = p; +                sir_intr = 32 * j + i; +            } +            level &= level - 1; +        } +    } +    s->sir_intr[is_fiq] = sir_intr; +} + +static inline void omap_inth_update(struct omap_intr_handler_s *s, int is_fiq) +{ +    int i; +    uint32_t has_intr = 0; + +    for (i = 0; i < s->nbanks; ++i) +        has_intr |= s->bank[i].irqs & ~s->bank[i].mask & +                (is_fiq ? s->bank[i].fiq : ~s->bank[i].fiq); + +    if (s->new_agr[is_fiq] & has_intr & s->mask) { +        s->new_agr[is_fiq] = 0; +        omap_inth_sir_update(s, is_fiq); +        qemu_set_irq(s->parent_intr[is_fiq], 1); +    } +} + +#define INT_FALLING_EDGE	0 +#define INT_LOW_LEVEL		1 + +static void omap_set_intr(void *opaque, int irq, int req) +{ +    struct omap_intr_handler_s *ih = (struct omap_intr_handler_s *) opaque; +    uint32_t rise; + +    struct omap_intr_handler_bank_s *bank = &ih->bank[irq >> 5]; +    int n = irq & 31; + +    if (req) { +        rise = ~bank->irqs & (1 << n); +        if (~bank->sens_edge & (1 << n)) +            rise &= ~bank->inputs; + +        bank->inputs |= (1 << n); +        if (rise) { +            bank->irqs |= rise; +            omap_inth_update(ih, 0); +            omap_inth_update(ih, 1); +        } +    } else { +        rise = bank->sens_edge & bank->irqs & (1 << n); +        bank->irqs &= ~rise; +        bank->inputs &= ~(1 << n); +    } +} + +/* Simplified version with no edge detection */ +static void omap_set_intr_noedge(void *opaque, int irq, int req) +{ +    struct omap_intr_handler_s *ih = (struct omap_intr_handler_s *) opaque; +    uint32_t rise; + +    struct omap_intr_handler_bank_s *bank = &ih->bank[irq >> 5]; +    int n = irq & 31; + +    if (req) { +        rise = ~bank->inputs & (1 << n); +        if (rise) { +            bank->irqs |= bank->inputs |= rise; +            omap_inth_update(ih, 0); +            omap_inth_update(ih, 1); +        } +    } else +        bank->irqs = (bank->inputs &= ~(1 << n)) | bank->swi; +} + +static uint64_t omap_inth_read(void *opaque, hwaddr addr, +                               unsigned size) +{ +    struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque; +    int i, offset = addr; +    int bank_no = offset >> 8; +    int line_no; +    struct omap_intr_handler_bank_s *bank = &s->bank[bank_no]; +    offset &= 0xff; + +    switch (offset) { +    case 0x00:	/* ITR */ +        return bank->irqs; + +    case 0x04:	/* MIR */ +        return bank->mask; + +    case 0x10:	/* SIR_IRQ_CODE */ +    case 0x14:  /* SIR_FIQ_CODE */ +        if (bank_no != 0) +            break; +        line_no = s->sir_intr[(offset - 0x10) >> 2]; +        bank = &s->bank[line_no >> 5]; +        i = line_no & 31; +        if (((bank->sens_edge >> i) & 1) == INT_FALLING_EDGE) +            bank->irqs &= ~(1 << i); +        return line_no; + +    case 0x18:	/* CONTROL_REG */ +        if (bank_no != 0) +            break; +        return 0; + +    case 0x1c:	/* ILR0 */ +    case 0x20:	/* ILR1 */ +    case 0x24:	/* ILR2 */ +    case 0x28:	/* ILR3 */ +    case 0x2c:	/* ILR4 */ +    case 0x30:	/* ILR5 */ +    case 0x34:	/* ILR6 */ +    case 0x38:	/* ILR7 */ +    case 0x3c:	/* ILR8 */ +    case 0x40:	/* ILR9 */ +    case 0x44:	/* ILR10 */ +    case 0x48:	/* ILR11 */ +    case 0x4c:	/* ILR12 */ +    case 0x50:	/* ILR13 */ +    case 0x54:	/* ILR14 */ +    case 0x58:	/* ILR15 */ +    case 0x5c:	/* ILR16 */ +    case 0x60:	/* ILR17 */ +    case 0x64:	/* ILR18 */ +    case 0x68:	/* ILR19 */ +    case 0x6c:	/* ILR20 */ +    case 0x70:	/* ILR21 */ +    case 0x74:	/* ILR22 */ +    case 0x78:	/* ILR23 */ +    case 0x7c:	/* ILR24 */ +    case 0x80:	/* ILR25 */ +    case 0x84:	/* ILR26 */ +    case 0x88:	/* ILR27 */ +    case 0x8c:	/* ILR28 */ +    case 0x90:	/* ILR29 */ +    case 0x94:	/* ILR30 */ +    case 0x98:	/* ILR31 */ +        i = (offset - 0x1c) >> 2; +        return (bank->priority[i] << 2) | +                (((bank->sens_edge >> i) & 1) << 1) | +                ((bank->fiq >> i) & 1); + +    case 0x9c:	/* ISR */ +        return 0x00000000; + +    } +    OMAP_BAD_REG(addr); +    return 0; +} + +static void omap_inth_write(void *opaque, hwaddr addr, +                            uint64_t value, unsigned size) +{ +    struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque; +    int i, offset = addr; +    int bank_no = offset >> 8; +    struct omap_intr_handler_bank_s *bank = &s->bank[bank_no]; +    offset &= 0xff; + +    switch (offset) { +    case 0x00:	/* ITR */ +        /* Important: ignore the clearing if the IRQ is level-triggered and +           the input bit is 1 */ +        bank->irqs &= value | (bank->inputs & bank->sens_edge); +        return; + +    case 0x04:	/* MIR */ +        bank->mask = value; +        omap_inth_update(s, 0); +        omap_inth_update(s, 1); +        return; + +    case 0x10:	/* SIR_IRQ_CODE */ +    case 0x14:	/* SIR_FIQ_CODE */ +        OMAP_RO_REG(addr); +        break; + +    case 0x18:	/* CONTROL_REG */ +        if (bank_no != 0) +            break; +        if (value & 2) { +            qemu_set_irq(s->parent_intr[1], 0); +            s->new_agr[1] = ~0; +            omap_inth_update(s, 1); +        } +        if (value & 1) { +            qemu_set_irq(s->parent_intr[0], 0); +            s->new_agr[0] = ~0; +            omap_inth_update(s, 0); +        } +        return; + +    case 0x1c:	/* ILR0 */ +    case 0x20:	/* ILR1 */ +    case 0x24:	/* ILR2 */ +    case 0x28:	/* ILR3 */ +    case 0x2c:	/* ILR4 */ +    case 0x30:	/* ILR5 */ +    case 0x34:	/* ILR6 */ +    case 0x38:	/* ILR7 */ +    case 0x3c:	/* ILR8 */ +    case 0x40:	/* ILR9 */ +    case 0x44:	/* ILR10 */ +    case 0x48:	/* ILR11 */ +    case 0x4c:	/* ILR12 */ +    case 0x50:	/* ILR13 */ +    case 0x54:	/* ILR14 */ +    case 0x58:	/* ILR15 */ +    case 0x5c:	/* ILR16 */ +    case 0x60:	/* ILR17 */ +    case 0x64:	/* ILR18 */ +    case 0x68:	/* ILR19 */ +    case 0x6c:	/* ILR20 */ +    case 0x70:	/* ILR21 */ +    case 0x74:	/* ILR22 */ +    case 0x78:	/* ILR23 */ +    case 0x7c:	/* ILR24 */ +    case 0x80:	/* ILR25 */ +    case 0x84:	/* ILR26 */ +    case 0x88:	/* ILR27 */ +    case 0x8c:	/* ILR28 */ +    case 0x90:	/* ILR29 */ +    case 0x94:	/* ILR30 */ +    case 0x98:	/* ILR31 */ +        i = (offset - 0x1c) >> 2; +        bank->priority[i] = (value >> 2) & 0x1f; +        bank->sens_edge &= ~(1 << i); +        bank->sens_edge |= ((value >> 1) & 1) << i; +        bank->fiq &= ~(1 << i); +        bank->fiq |= (value & 1) << i; +        return; + +    case 0x9c:	/* ISR */ +        for (i = 0; i < 32; i ++) +            if (value & (1 << i)) { +                omap_set_intr(s, 32 * bank_no + i, 1); +                return; +            } +        return; +    } +    OMAP_BAD_REG(addr); +} + +static const MemoryRegionOps omap_inth_mem_ops = { +    .read = omap_inth_read, +    .write = omap_inth_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +    .valid = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static void omap_inth_reset(DeviceState *dev) +{ +    struct omap_intr_handler_s *s = OMAP_INTC(dev); +    int i; + +    for (i = 0; i < s->nbanks; ++i){ +        s->bank[i].irqs = 0x00000000; +        s->bank[i].mask = 0xffffffff; +        s->bank[i].sens_edge = 0x00000000; +        s->bank[i].fiq = 0x00000000; +        s->bank[i].inputs = 0x00000000; +        s->bank[i].swi = 0x00000000; +        memset(s->bank[i].priority, 0, sizeof(s->bank[i].priority)); + +        if (s->level_only) +            s->bank[i].sens_edge = 0xffffffff; +    } + +    s->new_agr[0] = ~0; +    s->new_agr[1] = ~0; +    s->sir_intr[0] = 0; +    s->sir_intr[1] = 0; +    s->autoidle = 0; +    s->mask = ~0; + +    qemu_set_irq(s->parent_intr[0], 0); +    qemu_set_irq(s->parent_intr[1], 0); +} + +static int omap_intc_init(SysBusDevice *sbd) +{ +    DeviceState *dev = DEVICE(sbd); +    struct omap_intr_handler_s *s = OMAP_INTC(dev); + +    if (!s->iclk) { +        hw_error("omap-intc: clk not connected\n"); +    } +    s->nbanks = 1; +    sysbus_init_irq(sbd, &s->parent_intr[0]); +    sysbus_init_irq(sbd, &s->parent_intr[1]); +    qdev_init_gpio_in(dev, omap_set_intr, s->nbanks * 32); +    memory_region_init_io(&s->mmio, OBJECT(s), &omap_inth_mem_ops, s, +                          "omap-intc", s->size); +    sysbus_init_mmio(sbd, &s->mmio); +    return 0; +} + +static Property omap_intc_properties[] = { +    DEFINE_PROP_UINT32("size", struct omap_intr_handler_s, size, 0x100), +    DEFINE_PROP_PTR("clk", struct omap_intr_handler_s, iclk), +    DEFINE_PROP_END_OF_LIST(), +}; + +static void omap_intc_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + +    k->init = omap_intc_init; +    dc->reset = omap_inth_reset; +    dc->props = omap_intc_properties; +    /* Reason: pointer property "clk" */ +    dc->cannot_instantiate_with_device_add_yet = true; +} + +static const TypeInfo omap_intc_info = { +    .name          = "omap-intc", +    .parent        = TYPE_OMAP_INTC, +    .class_init    = omap_intc_class_init, +}; + +static uint64_t omap2_inth_read(void *opaque, hwaddr addr, +                                unsigned size) +{ +    struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque; +    int offset = addr; +    int bank_no, line_no; +    struct omap_intr_handler_bank_s *bank = NULL; + +    if ((offset & 0xf80) == 0x80) { +        bank_no = (offset & 0x60) >> 5; +        if (bank_no < s->nbanks) { +            offset &= ~0x60; +            bank = &s->bank[bank_no]; +        } else { +            OMAP_BAD_REG(addr); +            return 0; +        } +    } + +    switch (offset) { +    case 0x00:	/* INTC_REVISION */ +        return s->revision; + +    case 0x10:	/* INTC_SYSCONFIG */ +        return (s->autoidle >> 2) & 1; + +    case 0x14:	/* INTC_SYSSTATUS */ +        return 1;						/* RESETDONE */ + +    case 0x40:	/* INTC_SIR_IRQ */ +        return s->sir_intr[0]; + +    case 0x44:	/* INTC_SIR_FIQ */ +        return s->sir_intr[1]; + +    case 0x48:	/* INTC_CONTROL */ +        return (!s->mask) << 2;					/* GLOBALMASK */ + +    case 0x4c:	/* INTC_PROTECTION */ +        return 0; + +    case 0x50:	/* INTC_IDLE */ +        return s->autoidle & 3; + +    /* Per-bank registers */ +    case 0x80:	/* INTC_ITR */ +        return bank->inputs; + +    case 0x84:	/* INTC_MIR */ +        return bank->mask; + +    case 0x88:	/* INTC_MIR_CLEAR */ +    case 0x8c:	/* INTC_MIR_SET */ +        return 0; + +    case 0x90:	/* INTC_ISR_SET */ +        return bank->swi; + +    case 0x94:	/* INTC_ISR_CLEAR */ +        return 0; + +    case 0x98:	/* INTC_PENDING_IRQ */ +        return bank->irqs & ~bank->mask & ~bank->fiq; + +    case 0x9c:	/* INTC_PENDING_FIQ */ +        return bank->irqs & ~bank->mask & bank->fiq; + +    /* Per-line registers */ +    case 0x100 ... 0x300:	/* INTC_ILR */ +        bank_no = (offset - 0x100) >> 7; +        if (bank_no > s->nbanks) +            break; +        bank = &s->bank[bank_no]; +        line_no = (offset & 0x7f) >> 2; +        return (bank->priority[line_no] << 2) | +                ((bank->fiq >> line_no) & 1); +    } +    OMAP_BAD_REG(addr); +    return 0; +} + +static void omap2_inth_write(void *opaque, hwaddr addr, +                             uint64_t value, unsigned size) +{ +    struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque; +    int offset = addr; +    int bank_no, line_no; +    struct omap_intr_handler_bank_s *bank = NULL; + +    if ((offset & 0xf80) == 0x80) { +        bank_no = (offset & 0x60) >> 5; +        if (bank_no < s->nbanks) { +            offset &= ~0x60; +            bank = &s->bank[bank_no]; +        } else { +            OMAP_BAD_REG(addr); +            return; +        } +    } + +    switch (offset) { +    case 0x10:	/* INTC_SYSCONFIG */ +        s->autoidle &= 4; +        s->autoidle |= (value & 1) << 2; +        if (value & 2) {                                        /* SOFTRESET */ +            omap_inth_reset(DEVICE(s)); +        } +        return; + +    case 0x48:	/* INTC_CONTROL */ +        s->mask = (value & 4) ? 0 : ~0;				/* GLOBALMASK */ +        if (value & 2) {					/* NEWFIQAGR */ +            qemu_set_irq(s->parent_intr[1], 0); +            s->new_agr[1] = ~0; +            omap_inth_update(s, 1); +        } +        if (value & 1) {					/* NEWIRQAGR */ +            qemu_set_irq(s->parent_intr[0], 0); +            s->new_agr[0] = ~0; +            omap_inth_update(s, 0); +        } +        return; + +    case 0x4c:	/* INTC_PROTECTION */ +        /* TODO: Make a bitmap (or sizeof(char)map) of access privileges +         * for every register, see Chapter 3 and 4 for privileged mode.  */ +        if (value & 1) +            fprintf(stderr, "%s: protection mode enable attempt\n", +                            __FUNCTION__); +        return; + +    case 0x50:	/* INTC_IDLE */ +        s->autoidle &= ~3; +        s->autoidle |= value & 3; +        return; + +    /* Per-bank registers */ +    case 0x84:	/* INTC_MIR */ +        bank->mask = value; +        omap_inth_update(s, 0); +        omap_inth_update(s, 1); +        return; + +    case 0x88:	/* INTC_MIR_CLEAR */ +        bank->mask &= ~value; +        omap_inth_update(s, 0); +        omap_inth_update(s, 1); +        return; + +    case 0x8c:	/* INTC_MIR_SET */ +        bank->mask |= value; +        return; + +    case 0x90:	/* INTC_ISR_SET */ +        bank->irqs |= bank->swi |= value; +        omap_inth_update(s, 0); +        omap_inth_update(s, 1); +        return; + +    case 0x94:	/* INTC_ISR_CLEAR */ +        bank->swi &= ~value; +        bank->irqs = bank->swi & bank->inputs; +        return; + +    /* Per-line registers */ +    case 0x100 ... 0x300:	/* INTC_ILR */ +        bank_no = (offset - 0x100) >> 7; +        if (bank_no > s->nbanks) +            break; +        bank = &s->bank[bank_no]; +        line_no = (offset & 0x7f) >> 2; +        bank->priority[line_no] = (value >> 2) & 0x3f; +        bank->fiq &= ~(1 << line_no); +        bank->fiq |= (value & 1) << line_no; +        return; + +    case 0x00:	/* INTC_REVISION */ +    case 0x14:	/* INTC_SYSSTATUS */ +    case 0x40:	/* INTC_SIR_IRQ */ +    case 0x44:	/* INTC_SIR_FIQ */ +    case 0x80:	/* INTC_ITR */ +    case 0x98:	/* INTC_PENDING_IRQ */ +    case 0x9c:	/* INTC_PENDING_FIQ */ +        OMAP_RO_REG(addr); +        return; +    } +    OMAP_BAD_REG(addr); +} + +static const MemoryRegionOps omap2_inth_mem_ops = { +    .read = omap2_inth_read, +    .write = omap2_inth_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +    .valid = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static int omap2_intc_init(SysBusDevice *sbd) +{ +    DeviceState *dev = DEVICE(sbd); +    struct omap_intr_handler_s *s = OMAP_INTC(dev); + +    if (!s->iclk) { +        hw_error("omap2-intc: iclk not connected\n"); +    } +    if (!s->fclk) { +        hw_error("omap2-intc: fclk not connected\n"); +    } +    s->level_only = 1; +    s->nbanks = 3; +    sysbus_init_irq(sbd, &s->parent_intr[0]); +    sysbus_init_irq(sbd, &s->parent_intr[1]); +    qdev_init_gpio_in(dev, omap_set_intr_noedge, s->nbanks * 32); +    memory_region_init_io(&s->mmio, OBJECT(s), &omap2_inth_mem_ops, s, +                          "omap2-intc", 0x1000); +    sysbus_init_mmio(sbd, &s->mmio); +    return 0; +} + +static Property omap2_intc_properties[] = { +    DEFINE_PROP_UINT8("revision", struct omap_intr_handler_s, +    revision, 0x21), +    DEFINE_PROP_PTR("iclk", struct omap_intr_handler_s, iclk), +    DEFINE_PROP_PTR("fclk", struct omap_intr_handler_s, fclk), +    DEFINE_PROP_END_OF_LIST(), +}; + +static void omap2_intc_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + +    k->init = omap2_intc_init; +    dc->reset = omap_inth_reset; +    dc->props = omap2_intc_properties; +    /* Reason: pointer property "iclk", "fclk" */ +    dc->cannot_instantiate_with_device_add_yet = true; +} + +static const TypeInfo omap2_intc_info = { +    .name          = "omap2-intc", +    .parent        = TYPE_OMAP_INTC, +    .class_init    = omap2_intc_class_init, +}; + +static const TypeInfo omap_intc_type_info = { +    .name          = TYPE_OMAP_INTC, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(struct omap_intr_handler_s), +    .abstract      = true, +}; + +static void omap_intc_register_types(void) +{ +    type_register_static(&omap_intc_type_info); +    type_register_static(&omap_intc_info); +    type_register_static(&omap2_intc_info); +} + +type_init(omap_intc_register_types) diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c new file mode 100644 index 00000000..14ab0e31 --- /dev/null +++ b/hw/intc/openpic.c @@ -0,0 +1,1661 @@ +/* + * OpenPIC emulation + * + * Copyright (c) 2004 Jocelyn Mayer + *               2011 Alexander Graf + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +/* + * + * Based on OpenPic implementations: + * - Intel GW80314 I/O companion chip developer's manual + * - Motorola MPC8245 & MPC8540 user manuals. + * - Motorola MCP750 (aka Raven) programmer manual. + * - Motorola Harrier programmer manuel + * + * Serial interrupts, as implemented in Raven chipset are not supported yet. + * + */ +#include "hw/hw.h" +#include "hw/ppc/mac.h" +#include "hw/pci/pci.h" +#include "hw/ppc/openpic.h" +#include "hw/ppc/ppc_e500.h" +#include "hw/sysbus.h" +#include "hw/pci/msi.h" +#include "qemu/bitops.h" +#include "qapi/qmp/qerror.h" + +//#define DEBUG_OPENPIC + +#ifdef DEBUG_OPENPIC +static const int debug_openpic = 1; +#else +static const int debug_openpic = 0; +#endif + +#define DPRINTF(fmt, ...) do { \ +        if (debug_openpic) { \ +            printf(fmt , ## __VA_ARGS__); \ +        } \ +    } while (0) + +#define MAX_CPU     32 +#define MAX_MSI     8 +#define VID         0x03 /* MPIC version ID */ + +/* OpenPIC capability flags */ +#define OPENPIC_FLAG_IDR_CRIT     (1 << 0) +#define OPENPIC_FLAG_ILR          (2 << 0) + +/* OpenPIC address map */ +#define OPENPIC_GLB_REG_START        0x0 +#define OPENPIC_GLB_REG_SIZE         0x10F0 +#define OPENPIC_TMR_REG_START        0x10F0 +#define OPENPIC_TMR_REG_SIZE         0x220 +#define OPENPIC_MSI_REG_START        0x1600 +#define OPENPIC_MSI_REG_SIZE         0x200 +#define OPENPIC_SUMMARY_REG_START   0x3800 +#define OPENPIC_SUMMARY_REG_SIZE    0x800 +#define OPENPIC_SRC_REG_START        0x10000 +#define OPENPIC_SRC_REG_SIZE         (OPENPIC_MAX_SRC * 0x20) +#define OPENPIC_CPU_REG_START        0x20000 +#define OPENPIC_CPU_REG_SIZE         0x100 + ((MAX_CPU - 1) * 0x1000) + +/* Raven */ +#define RAVEN_MAX_CPU      2 +#define RAVEN_MAX_EXT     48 +#define RAVEN_MAX_IRQ     64 +#define RAVEN_MAX_TMR      OPENPIC_MAX_TMR +#define RAVEN_MAX_IPI      OPENPIC_MAX_IPI + +/* Interrupt definitions */ +#define RAVEN_FE_IRQ     (RAVEN_MAX_EXT)     /* Internal functional IRQ */ +#define RAVEN_ERR_IRQ    (RAVEN_MAX_EXT + 1) /* Error IRQ */ +#define RAVEN_TMR_IRQ    (RAVEN_MAX_EXT + 2) /* First timer IRQ */ +#define RAVEN_IPI_IRQ    (RAVEN_TMR_IRQ + RAVEN_MAX_TMR) /* First IPI IRQ */ +/* First doorbell IRQ */ +#define RAVEN_DBL_IRQ    (RAVEN_IPI_IRQ + (RAVEN_MAX_CPU * RAVEN_MAX_IPI)) + +typedef struct FslMpicInfo { +    int max_ext; +} FslMpicInfo; + +static FslMpicInfo fsl_mpic_20 = { +    .max_ext = 12, +}; + +static FslMpicInfo fsl_mpic_42 = { +    .max_ext = 12, +}; + +#define FRR_NIRQ_SHIFT    16 +#define FRR_NCPU_SHIFT     8 +#define FRR_VID_SHIFT      0 + +#define VID_REVISION_1_2   2 +#define VID_REVISION_1_3   3 + +#define VIR_GENERIC      0x00000000 /* Generic Vendor ID */ + +#define GCR_RESET        0x80000000 +#define GCR_MODE_PASS    0x00000000 +#define GCR_MODE_MIXED   0x20000000 +#define GCR_MODE_PROXY   0x60000000 + +#define TBCR_CI           0x80000000 /* count inhibit */ +#define TCCR_TOG          0x80000000 /* toggles when decrement to zero */ + +#define IDR_EP_SHIFT      31 +#define IDR_EP_MASK       (1U << IDR_EP_SHIFT) +#define IDR_CI0_SHIFT     30 +#define IDR_CI1_SHIFT     29 +#define IDR_P1_SHIFT      1 +#define IDR_P0_SHIFT      0 + +#define ILR_INTTGT_MASK   0x000000ff +#define ILR_INTTGT_INT    0x00 +#define ILR_INTTGT_CINT   0x01 /* critical */ +#define ILR_INTTGT_MCP    0x02 /* machine check */ + +/* The currently supported INTTGT values happen to be the same as QEMU's + * openpic output codes, but don't depend on this.  The output codes + * could change (unlikely, but...) or support could be added for + * more INTTGT values. + */ +static const int inttgt_output[][2] = { +    { ILR_INTTGT_INT, OPENPIC_OUTPUT_INT }, +    { ILR_INTTGT_CINT, OPENPIC_OUTPUT_CINT }, +    { ILR_INTTGT_MCP, OPENPIC_OUTPUT_MCK }, +}; + +static int inttgt_to_output(int inttgt) +{ +    int i; + +    for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) { +        if (inttgt_output[i][0] == inttgt) { +            return inttgt_output[i][1]; +        } +    } + +    fprintf(stderr, "%s: unsupported inttgt %d\n", __func__, inttgt); +    return OPENPIC_OUTPUT_INT; +} + +static int output_to_inttgt(int output) +{ +    int i; + +    for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) { +        if (inttgt_output[i][1] == output) { +            return inttgt_output[i][0]; +        } +    } + +    abort(); +} + +#define MSIIR_OFFSET       0x140 +#define MSIIR_SRS_SHIFT    29 +#define MSIIR_SRS_MASK     (0x7 << MSIIR_SRS_SHIFT) +#define MSIIR_IBS_SHIFT    24 +#define MSIIR_IBS_MASK     (0x1f << MSIIR_IBS_SHIFT) + +static int get_current_cpu(void) +{ +    if (!current_cpu) { +        return -1; +    } + +    return current_cpu->cpu_index; +} + +static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr, +                                          int idx); +static void openpic_cpu_write_internal(void *opaque, hwaddr addr, +                                       uint32_t val, int idx); +static void openpic_reset(DeviceState *d); + +typedef enum IRQType { +    IRQ_TYPE_NORMAL = 0, +    IRQ_TYPE_FSLINT,        /* FSL internal interrupt -- level only */ +    IRQ_TYPE_FSLSPECIAL,    /* FSL timer/IPI interrupt, edge, no polarity */ +} IRQType; + +/* Round up to the nearest 64 IRQs so that the queue length + * won't change when moving between 32 and 64 bit hosts. + */ +#define IRQQUEUE_SIZE_BITS ((OPENPIC_MAX_IRQ + 63) & ~63) + +typedef struct IRQQueue { +    unsigned long *queue; +    int32_t queue_size; /* Only used for VMSTATE_BITMAP */ +    int next; +    int priority; +} IRQQueue; + +typedef struct IRQSource { +    uint32_t ivpr;  /* IRQ vector/priority register */ +    uint32_t idr;   /* IRQ destination register */ +    uint32_t destmask; /* bitmap of CPU destinations */ +    int last_cpu; +    int output;     /* IRQ level, e.g. OPENPIC_OUTPUT_INT */ +    int pending;    /* TRUE if IRQ is pending */ +    IRQType type; +    bool level:1;   /* level-triggered */ +    bool nomask:1;  /* critical interrupts ignore mask on some FSL MPICs */ +} IRQSource; + +#define IVPR_MASK_SHIFT       31 +#define IVPR_MASK_MASK        (1U << IVPR_MASK_SHIFT) +#define IVPR_ACTIVITY_SHIFT   30 +#define IVPR_ACTIVITY_MASK    (1U << IVPR_ACTIVITY_SHIFT) +#define IVPR_MODE_SHIFT       29 +#define IVPR_MODE_MASK        (1U << IVPR_MODE_SHIFT) +#define IVPR_POLARITY_SHIFT   23 +#define IVPR_POLARITY_MASK    (1U << IVPR_POLARITY_SHIFT) +#define IVPR_SENSE_SHIFT      22 +#define IVPR_SENSE_MASK       (1U << IVPR_SENSE_SHIFT) + +#define IVPR_PRIORITY_MASK     (0xFU << 16) +#define IVPR_PRIORITY(_ivprr_) ((int)(((_ivprr_) & IVPR_PRIORITY_MASK) >> 16)) +#define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask) + +/* IDR[EP/CI] are only for FSL MPIC prior to v4.0 */ +#define IDR_EP      0x80000000  /* external pin */ +#define IDR_CI      0x40000000  /* critical interrupt */ + +typedef struct OpenPICTimer { +    uint32_t tccr;  /* Global timer current count register */ +    uint32_t tbcr;  /* Global timer base count register */ +} OpenPICTimer; + +typedef struct OpenPICMSI { +    uint32_t msir;   /* Shared Message Signaled Interrupt Register */ +} OpenPICMSI; + +typedef struct IRQDest { +    int32_t ctpr; /* CPU current task priority */ +    IRQQueue raised; +    IRQQueue servicing; +    qemu_irq *irqs; + +    /* Count of IRQ sources asserting on non-INT outputs */ +    uint32_t outputs_active[OPENPIC_OUTPUT_NB]; +} IRQDest; + +#define OPENPIC(obj) OBJECT_CHECK(OpenPICState, (obj), TYPE_OPENPIC) + +typedef struct OpenPICState { +    /*< private >*/ +    SysBusDevice parent_obj; +    /*< public >*/ + +    MemoryRegion mem; + +    /* Behavior control */ +    FslMpicInfo *fsl; +    uint32_t model; +    uint32_t flags; +    uint32_t nb_irqs; +    uint32_t vid; +    uint32_t vir; /* Vendor identification register */ +    uint32_t vector_mask; +    uint32_t tfrr_reset; +    uint32_t ivpr_reset; +    uint32_t idr_reset; +    uint32_t brr1; +    uint32_t mpic_mode_mask; + +    /* Sub-regions */ +    MemoryRegion sub_io_mem[6]; + +    /* Global registers */ +    uint32_t frr; /* Feature reporting register */ +    uint32_t gcr; /* Global configuration register  */ +    uint32_t pir; /* Processor initialization register */ +    uint32_t spve; /* Spurious vector register */ +    uint32_t tfrr; /* Timer frequency reporting register */ +    /* Source registers */ +    IRQSource src[OPENPIC_MAX_IRQ]; +    /* Local registers per output pin */ +    IRQDest dst[MAX_CPU]; +    uint32_t nb_cpus; +    /* Timer registers */ +    OpenPICTimer timers[OPENPIC_MAX_TMR]; +    /* Shared MSI registers */ +    OpenPICMSI msi[MAX_MSI]; +    uint32_t max_irq; +    uint32_t irq_ipi0; +    uint32_t irq_tim0; +    uint32_t irq_msi; +} OpenPICState; + +static inline void IRQ_setbit(IRQQueue *q, int n_IRQ) +{ +    set_bit(n_IRQ, q->queue); +} + +static inline void IRQ_resetbit(IRQQueue *q, int n_IRQ) +{ +    clear_bit(n_IRQ, q->queue); +} + +static void IRQ_check(OpenPICState *opp, IRQQueue *q) +{ +    int irq = -1; +    int next = -1; +    int priority = -1; + +    for (;;) { +        irq = find_next_bit(q->queue, opp->max_irq, irq + 1); +        if (irq == opp->max_irq) { +            break; +        } + +        DPRINTF("IRQ_check: irq %d set ivpr_pr=%d pr=%d\n", +                irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority); + +        if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) { +            next = irq; +            priority = IVPR_PRIORITY(opp->src[irq].ivpr); +        } +    } + +    q->next = next; +    q->priority = priority; +} + +static int IRQ_get_next(OpenPICState *opp, IRQQueue *q) +{ +    /* XXX: optimize */ +    IRQ_check(opp, q); + +    return q->next; +} + +static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ, +                           bool active, bool was_active) +{ +    IRQDest *dst; +    IRQSource *src; +    int priority; + +    dst = &opp->dst[n_CPU]; +    src = &opp->src[n_IRQ]; + +    DPRINTF("%s: IRQ %d active %d was %d\n", +            __func__, n_IRQ, active, was_active); + +    if (src->output != OPENPIC_OUTPUT_INT) { +        DPRINTF("%s: output %d irq %d active %d was %d count %d\n", +                __func__, src->output, n_IRQ, active, was_active, +                dst->outputs_active[src->output]); + +        /* On Freescale MPIC, critical interrupts ignore priority, +         * IACK, EOI, etc.  Before MPIC v4.1 they also ignore +         * masking. +         */ +        if (active) { +            if (!was_active && dst->outputs_active[src->output]++ == 0) { +                DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d\n", +                        __func__, src->output, n_CPU, n_IRQ); +                qemu_irq_raise(dst->irqs[src->output]); +            } +        } else { +            if (was_active && --dst->outputs_active[src->output] == 0) { +                DPRINTF("%s: Lower OpenPIC output %d cpu %d irq %d\n", +                        __func__, src->output, n_CPU, n_IRQ); +                qemu_irq_lower(dst->irqs[src->output]); +            } +        } + +        return; +    } + +    priority = IVPR_PRIORITY(src->ivpr); + +    /* Even if the interrupt doesn't have enough priority, +     * it is still raised, in case ctpr is lowered later. +     */ +    if (active) { +        IRQ_setbit(&dst->raised, n_IRQ); +    } else { +        IRQ_resetbit(&dst->raised, n_IRQ); +    } + +    IRQ_check(opp, &dst->raised); + +    if (active && priority <= dst->ctpr) { +        DPRINTF("%s: IRQ %d priority %d too low for ctpr %d on CPU %d\n", +                __func__, n_IRQ, priority, dst->ctpr, n_CPU); +        active = 0; +    } + +    if (active) { +        if (IRQ_get_next(opp, &dst->servicing) >= 0 && +                priority <= dst->servicing.priority) { +            DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n", +                    __func__, n_IRQ, dst->servicing.next, n_CPU); +        } else { +            DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n", +                    __func__, n_CPU, n_IRQ, dst->raised.next); +            qemu_irq_raise(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]); +        } +    } else { +        IRQ_get_next(opp, &dst->servicing); +        if (dst->raised.priority > dst->ctpr && +                dst->raised.priority > dst->servicing.priority) { +            DPRINTF("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d\n", +                    __func__, n_IRQ, dst->raised.next, dst->raised.priority, +                    dst->ctpr, dst->servicing.priority, n_CPU); +            /* IRQ line stays asserted */ +        } else { +            DPRINTF("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n", +                    __func__, n_IRQ, dst->ctpr, dst->servicing.priority, n_CPU); +            qemu_irq_lower(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]); +        } +    } +} + +/* update pic state because registers for n_IRQ have changed value */ +static void openpic_update_irq(OpenPICState *opp, int n_IRQ) +{ +    IRQSource *src; +    bool active, was_active; +    int i; + +    src = &opp->src[n_IRQ]; +    active = src->pending; + +    if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) { +        /* Interrupt source is disabled */ +        DPRINTF("%s: IRQ %d is disabled\n", __func__, n_IRQ); +        active = false; +    } + +    was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK); + +    /* +     * We don't have a similar check for already-active because +     * ctpr may have changed and we need to withdraw the interrupt. +     */ +    if (!active && !was_active) { +        DPRINTF("%s: IRQ %d is already inactive\n", __func__, n_IRQ); +        return; +    } + +    if (active) { +        src->ivpr |= IVPR_ACTIVITY_MASK; +    } else { +        src->ivpr &= ~IVPR_ACTIVITY_MASK; +    } + +    if (src->destmask == 0) { +        /* No target */ +        DPRINTF("%s: IRQ %d has no target\n", __func__, n_IRQ); +        return; +    } + +    if (src->destmask == (1 << src->last_cpu)) { +        /* Only one CPU is allowed to receive this IRQ */ +        IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active); +    } else if (!(src->ivpr & IVPR_MODE_MASK)) { +        /* Directed delivery mode */ +        for (i = 0; i < opp->nb_cpus; i++) { +            if (src->destmask & (1 << i)) { +                IRQ_local_pipe(opp, i, n_IRQ, active, was_active); +            } +        } +    } else { +        /* Distributed delivery mode */ +        for (i = src->last_cpu + 1; i != src->last_cpu; i++) { +            if (i == opp->nb_cpus) { +                i = 0; +            } +            if (src->destmask & (1 << i)) { +                IRQ_local_pipe(opp, i, n_IRQ, active, was_active); +                src->last_cpu = i; +                break; +            } +        } +    } +} + +static void openpic_set_irq(void *opaque, int n_IRQ, int level) +{ +    OpenPICState *opp = opaque; +    IRQSource *src; + +    if (n_IRQ >= OPENPIC_MAX_IRQ) { +        fprintf(stderr, "%s: IRQ %d out of range\n", __func__, n_IRQ); +        abort(); +    } + +    src = &opp->src[n_IRQ]; +    DPRINTF("openpic: set irq %d = %d ivpr=0x%08x\n", +            n_IRQ, level, src->ivpr); +    if (src->level) { +        /* level-sensitive irq */ +        src->pending = level; +        openpic_update_irq(opp, n_IRQ); +    } else { +        /* edge-sensitive irq */ +        if (level) { +            src->pending = 1; +            openpic_update_irq(opp, n_IRQ); +        } + +        if (src->output != OPENPIC_OUTPUT_INT) { +            /* Edge-triggered interrupts shouldn't be used +             * with non-INT delivery, but just in case, +             * try to make it do something sane rather than +             * cause an interrupt storm.  This is close to +             * what you'd probably see happen in real hardware. +             */ +            src->pending = 0; +            openpic_update_irq(opp, n_IRQ); +        } +    } +} + +static inline uint32_t read_IRQreg_idr(OpenPICState *opp, int n_IRQ) +{ +    return opp->src[n_IRQ].idr; +} + +static inline uint32_t read_IRQreg_ilr(OpenPICState *opp, int n_IRQ) +{ +    if (opp->flags & OPENPIC_FLAG_ILR) { +        return output_to_inttgt(opp->src[n_IRQ].output); +    } + +    return 0xffffffff; +} + +static inline uint32_t read_IRQreg_ivpr(OpenPICState *opp, int n_IRQ) +{ +    return opp->src[n_IRQ].ivpr; +} + +static inline void write_IRQreg_idr(OpenPICState *opp, int n_IRQ, uint32_t val) +{ +    IRQSource *src = &opp->src[n_IRQ]; +    uint32_t normal_mask = (1UL << opp->nb_cpus) - 1; +    uint32_t crit_mask = 0; +    uint32_t mask = normal_mask; +    int crit_shift = IDR_EP_SHIFT - opp->nb_cpus; +    int i; + +    if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { +        crit_mask = mask << crit_shift; +        mask |= crit_mask | IDR_EP; +    } + +    src->idr = val & mask; +    DPRINTF("Set IDR %d to 0x%08x\n", n_IRQ, src->idr); + +    if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { +        if (src->idr & crit_mask) { +            if (src->idr & normal_mask) { +                DPRINTF("%s: IRQ configured for multiple output types, using " +                        "critical\n", __func__); +            } + +            src->output = OPENPIC_OUTPUT_CINT; +            src->nomask = true; +            src->destmask = 0; + +            for (i = 0; i < opp->nb_cpus; i++) { +                int n_ci = IDR_CI0_SHIFT - i; + +                if (src->idr & (1UL << n_ci)) { +                    src->destmask |= 1UL << i; +                } +            } +        } else { +            src->output = OPENPIC_OUTPUT_INT; +            src->nomask = false; +            src->destmask = src->idr & normal_mask; +        } +    } else { +        src->destmask = src->idr; +    } +} + +static inline void write_IRQreg_ilr(OpenPICState *opp, int n_IRQ, uint32_t val) +{ +    if (opp->flags & OPENPIC_FLAG_ILR) { +        IRQSource *src = &opp->src[n_IRQ]; + +        src->output = inttgt_to_output(val & ILR_INTTGT_MASK); +        DPRINTF("Set ILR %d to 0x%08x, output %d\n", n_IRQ, src->idr, +                src->output); + +        /* TODO: on MPIC v4.0 only, set nomask for non-INT */ +    } +} + +static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val) +{ +    uint32_t mask; + +    /* NOTE when implementing newer FSL MPIC models: starting with v4.0, +     * the polarity bit is read-only on internal interrupts. +     */ +    mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK | +           IVPR_POLARITY_MASK | opp->vector_mask; + +    /* ACTIVITY bit is read-only */ +    opp->src[n_IRQ].ivpr = +        (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask); + +    /* For FSL internal interrupts, The sense bit is reserved and zero, +     * and the interrupt is always level-triggered.  Timers and IPIs +     * have no sense or polarity bits, and are edge-triggered. +     */ +    switch (opp->src[n_IRQ].type) { +    case IRQ_TYPE_NORMAL: +        opp->src[n_IRQ].level = !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK); +        break; + +    case IRQ_TYPE_FSLINT: +        opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK; +        break; + +    case IRQ_TYPE_FSLSPECIAL: +        opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK); +        break; +    } + +    openpic_update_irq(opp, n_IRQ); +    DPRINTF("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ, val, +            opp->src[n_IRQ].ivpr); +} + +static void openpic_gcr_write(OpenPICState *opp, uint64_t val) +{ +    bool mpic_proxy = false; + +    if (val & GCR_RESET) { +        openpic_reset(DEVICE(opp)); +        return; +    } + +    opp->gcr &= ~opp->mpic_mode_mask; +    opp->gcr |= val & opp->mpic_mode_mask; + +    /* Set external proxy mode */ +    if ((val & opp->mpic_mode_mask) == GCR_MODE_PROXY) { +        mpic_proxy = true; +    } + +    ppce500_set_mpic_proxy(mpic_proxy); +} + +static void openpic_gbl_write(void *opaque, hwaddr addr, uint64_t val, +                              unsigned len) +{ +    OpenPICState *opp = opaque; +    IRQDest *dst; +    int idx; + +    DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64 "\n", +            __func__, addr, val); +    if (addr & 0xF) { +        return; +    } +    switch (addr) { +    case 0x00: /* Block Revision Register1 (BRR1) is Readonly */ +        break; +    case 0x40: +    case 0x50: +    case 0x60: +    case 0x70: +    case 0x80: +    case 0x90: +    case 0xA0: +    case 0xB0: +        openpic_cpu_write_internal(opp, addr, val, get_current_cpu()); +        break; +    case 0x1000: /* FRR */ +        break; +    case 0x1020: /* GCR */ +        openpic_gcr_write(opp, val); +        break; +    case 0x1080: /* VIR */ +        break; +    case 0x1090: /* PIR */ +        for (idx = 0; idx < opp->nb_cpus; idx++) { +            if ((val & (1 << idx)) && !(opp->pir & (1 << idx))) { +                DPRINTF("Raise OpenPIC RESET output for CPU %d\n", idx); +                dst = &opp->dst[idx]; +                qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_RESET]); +            } else if (!(val & (1 << idx)) && (opp->pir & (1 << idx))) { +                DPRINTF("Lower OpenPIC RESET output for CPU %d\n", idx); +                dst = &opp->dst[idx]; +                qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_RESET]); +            } +        } +        opp->pir = val; +        break; +    case 0x10A0: /* IPI_IVPR */ +    case 0x10B0: +    case 0x10C0: +    case 0x10D0: +        { +            int idx; +            idx = (addr - 0x10A0) >> 4; +            write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val); +        } +        break; +    case 0x10E0: /* SPVE */ +        opp->spve = val & opp->vector_mask; +        break; +    default: +        break; +    } +} + +static uint64_t openpic_gbl_read(void *opaque, hwaddr addr, unsigned len) +{ +    OpenPICState *opp = opaque; +    uint32_t retval; + +    DPRINTF("%s: addr %#" HWADDR_PRIx "\n", __func__, addr); +    retval = 0xFFFFFFFF; +    if (addr & 0xF) { +        return retval; +    } +    switch (addr) { +    case 0x1000: /* FRR */ +        retval = opp->frr; +        break; +    case 0x1020: /* GCR */ +        retval = opp->gcr; +        break; +    case 0x1080: /* VIR */ +        retval = opp->vir; +        break; +    case 0x1090: /* PIR */ +        retval = 0x00000000; +        break; +    case 0x00: /* Block Revision Register1 (BRR1) */ +        retval = opp->brr1; +        break; +    case 0x40: +    case 0x50: +    case 0x60: +    case 0x70: +    case 0x80: +    case 0x90: +    case 0xA0: +    case 0xB0: +        retval = openpic_cpu_read_internal(opp, addr, get_current_cpu()); +        break; +    case 0x10A0: /* IPI_IVPR */ +    case 0x10B0: +    case 0x10C0: +    case 0x10D0: +        { +            int idx; +            idx = (addr - 0x10A0) >> 4; +            retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx); +        } +        break; +    case 0x10E0: /* SPVE */ +        retval = opp->spve; +        break; +    default: +        break; +    } +    DPRINTF("%s: => 0x%08x\n", __func__, retval); + +    return retval; +} + +static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val, +                                unsigned len) +{ +    OpenPICState *opp = opaque; +    int idx; + +    addr += 0x10f0; + +    DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64 "\n", +            __func__, addr, val); +    if (addr & 0xF) { +        return; +    } + +    if (addr == 0x10f0) { +        /* TFRR */ +        opp->tfrr = val; +        return; +    } + +    idx = (addr >> 6) & 0x3; +    addr = addr & 0x30; + +    switch (addr & 0x30) { +    case 0x00: /* TCCR */ +        break; +    case 0x10: /* TBCR */ +        if ((opp->timers[idx].tccr & TCCR_TOG) != 0 && +            (val & TBCR_CI) == 0 && +            (opp->timers[idx].tbcr & TBCR_CI) != 0) { +            opp->timers[idx].tccr &= ~TCCR_TOG; +        } +        opp->timers[idx].tbcr = val; +        break; +    case 0x20: /* TVPR */ +        write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val); +        break; +    case 0x30: /* TDR */ +        write_IRQreg_idr(opp, opp->irq_tim0 + idx, val); +        break; +    } +} + +static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len) +{ +    OpenPICState *opp = opaque; +    uint32_t retval = -1; +    int idx; + +    DPRINTF("%s: addr %#" HWADDR_PRIx "\n", __func__, addr); +    if (addr & 0xF) { +        goto out; +    } +    idx = (addr >> 6) & 0x3; +    if (addr == 0x0) { +        /* TFRR */ +        retval = opp->tfrr; +        goto out; +    } +    switch (addr & 0x30) { +    case 0x00: /* TCCR */ +        retval = opp->timers[idx].tccr; +        break; +    case 0x10: /* TBCR */ +        retval = opp->timers[idx].tbcr; +        break; +    case 0x20: /* TIPV */ +        retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx); +        break; +    case 0x30: /* TIDE (TIDR) */ +        retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx); +        break; +    } + +out: +    DPRINTF("%s: => 0x%08x\n", __func__, retval); + +    return retval; +} + +static void openpic_src_write(void *opaque, hwaddr addr, uint64_t val, +                              unsigned len) +{ +    OpenPICState *opp = opaque; +    int idx; + +    DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64 "\n", +            __func__, addr, val); + +    addr = addr & 0xffff; +    idx = addr >> 5; + +    switch (addr & 0x1f) { +    case 0x00: +        write_IRQreg_ivpr(opp, idx, val); +        break; +    case 0x10: +        write_IRQreg_idr(opp, idx, val); +        break; +    case 0x18: +        write_IRQreg_ilr(opp, idx, val); +        break; +    } +} + +static uint64_t openpic_src_read(void *opaque, uint64_t addr, unsigned len) +{ +    OpenPICState *opp = opaque; +    uint32_t retval; +    int idx; + +    DPRINTF("%s: addr %#" HWADDR_PRIx "\n", __func__, addr); +    retval = 0xFFFFFFFF; + +    addr = addr & 0xffff; +    idx = addr >> 5; + +    switch (addr & 0x1f) { +    case 0x00: +        retval = read_IRQreg_ivpr(opp, idx); +        break; +    case 0x10: +        retval = read_IRQreg_idr(opp, idx); +        break; +    case 0x18: +        retval = read_IRQreg_ilr(opp, idx); +        break; +    } + +    DPRINTF("%s: => 0x%08x\n", __func__, retval); +    return retval; +} + +static void openpic_msi_write(void *opaque, hwaddr addr, uint64_t val, +                              unsigned size) +{ +    OpenPICState *opp = opaque; +    int idx = opp->irq_msi; +    int srs, ibs; + +    DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64 "\n", +            __func__, addr, val); +    if (addr & 0xF) { +        return; +    } + +    switch (addr) { +    case MSIIR_OFFSET: +        srs = val >> MSIIR_SRS_SHIFT; +        idx += srs; +        ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT; +        opp->msi[srs].msir |= 1 << ibs; +        openpic_set_irq(opp, idx, 1); +        break; +    default: +        /* most registers are read-only, thus ignored */ +        break; +    } +} + +static uint64_t openpic_msi_read(void *opaque, hwaddr addr, unsigned size) +{ +    OpenPICState *opp = opaque; +    uint64_t r = 0; +    int i, srs; + +    DPRINTF("%s: addr %#" HWADDR_PRIx "\n", __func__, addr); +    if (addr & 0xF) { +        return -1; +    } + +    srs = addr >> 4; + +    switch (addr) { +    case 0x00: +    case 0x10: +    case 0x20: +    case 0x30: +    case 0x40: +    case 0x50: +    case 0x60: +    case 0x70: /* MSIRs */ +        r = opp->msi[srs].msir; +        /* Clear on read */ +        opp->msi[srs].msir = 0; +        openpic_set_irq(opp, opp->irq_msi + srs, 0); +        break; +    case 0x120: /* MSISR */ +        for (i = 0; i < MAX_MSI; i++) { +            r |= (opp->msi[i].msir ? 1 : 0) << i; +        } +        break; +    } + +    return r; +} + +static uint64_t openpic_summary_read(void *opaque, hwaddr addr, unsigned size) +{ +    uint64_t r = 0; + +    DPRINTF("%s: addr %#" HWADDR_PRIx "\n", __func__, addr); + +    /* TODO: EISR/EIMR */ + +    return r; +} + +static void openpic_summary_write(void *opaque, hwaddr addr, uint64_t val, +                                  unsigned size) +{ +    DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64 "\n", +            __func__, addr, val); + +    /* TODO: EISR/EIMR */ +} + +static void openpic_cpu_write_internal(void *opaque, hwaddr addr, +                                       uint32_t val, int idx) +{ +    OpenPICState *opp = opaque; +    IRQSource *src; +    IRQDest *dst; +    int s_IRQ, n_IRQ; + +    DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx " <= 0x%08x\n", __func__, idx, +            addr, val); + +    if (idx < 0 || idx >= opp->nb_cpus) { +        return; +    } + +    if (addr & 0xF) { +        return; +    } +    dst = &opp->dst[idx]; +    addr &= 0xFF0; +    switch (addr) { +    case 0x40: /* IPIDR */ +    case 0x50: +    case 0x60: +    case 0x70: +        idx = (addr - 0x40) >> 4; +        /* we use IDE as mask which CPUs to deliver the IPI to still. */ +        opp->src[opp->irq_ipi0 + idx].destmask |= val; +        openpic_set_irq(opp, opp->irq_ipi0 + idx, 1); +        openpic_set_irq(opp, opp->irq_ipi0 + idx, 0); +        break; +    case 0x80: /* CTPR */ +        dst->ctpr = val & 0x0000000F; + +        DPRINTF("%s: set CPU %d ctpr to %d, raised %d servicing %d\n", +                __func__, idx, dst->ctpr, dst->raised.priority, +                dst->servicing.priority); + +        if (dst->raised.priority <= dst->ctpr) { +            DPRINTF("%s: Lower OpenPIC INT output cpu %d due to ctpr\n", +                    __func__, idx); +            qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]); +        } else if (dst->raised.priority > dst->servicing.priority) { +            DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d\n", +                    __func__, idx, dst->raised.next); +            qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_INT]); +        } + +        break; +    case 0x90: /* WHOAMI */ +        /* Read-only register */ +        break; +    case 0xA0: /* IACK */ +        /* Read-only register */ +        break; +    case 0xB0: /* EOI */ +        DPRINTF("EOI\n"); +        s_IRQ = IRQ_get_next(opp, &dst->servicing); + +        if (s_IRQ < 0) { +            DPRINTF("%s: EOI with no interrupt in service\n", __func__); +            break; +        } + +        IRQ_resetbit(&dst->servicing, s_IRQ); +        /* Set up next servicing IRQ */ +        s_IRQ = IRQ_get_next(opp, &dst->servicing); +        /* Check queued interrupts. */ +        n_IRQ = IRQ_get_next(opp, &dst->raised); +        src = &opp->src[n_IRQ]; +        if (n_IRQ != -1 && +            (s_IRQ == -1 || +             IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) { +            DPRINTF("Raise OpenPIC INT output cpu %d irq %d\n", +                    idx, n_IRQ); +            qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]); +        } +        break; +    default: +        break; +    } +} + +static void openpic_cpu_write(void *opaque, hwaddr addr, uint64_t val, +                              unsigned len) +{ +    openpic_cpu_write_internal(opaque, addr, val, (addr & 0x1f000) >> 12); +} + + +static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu) +{ +    IRQSource *src; +    int retval, irq; + +    DPRINTF("Lower OpenPIC INT output\n"); +    qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]); + +    irq = IRQ_get_next(opp, &dst->raised); +    DPRINTF("IACK: irq=%d\n", irq); + +    if (irq == -1) { +        /* No more interrupt pending */ +        return opp->spve; +    } + +    src = &opp->src[irq]; +    if (!(src->ivpr & IVPR_ACTIVITY_MASK) || +            !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) { +        fprintf(stderr, "%s: bad raised IRQ %d ctpr %d ivpr 0x%08x\n", +                __func__, irq, dst->ctpr, src->ivpr); +        openpic_update_irq(opp, irq); +        retval = opp->spve; +    } else { +        /* IRQ enter servicing state */ +        IRQ_setbit(&dst->servicing, irq); +        retval = IVPR_VECTOR(opp, src->ivpr); +    } + +    if (!src->level) { +        /* edge-sensitive IRQ */ +        src->ivpr &= ~IVPR_ACTIVITY_MASK; +        src->pending = 0; +        IRQ_resetbit(&dst->raised, irq); +    } + +    if ((irq >= opp->irq_ipi0) &&  (irq < (opp->irq_ipi0 + OPENPIC_MAX_IPI))) { +        src->destmask &= ~(1 << cpu); +        if (src->destmask && !src->level) { +            /* trigger on CPUs that didn't know about it yet */ +            openpic_set_irq(opp, irq, 1); +            openpic_set_irq(opp, irq, 0); +            /* if all CPUs knew about it, set active bit again */ +            src->ivpr |= IVPR_ACTIVITY_MASK; +        } +    } + +    return retval; +} + +static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr, +                                          int idx) +{ +    OpenPICState *opp = opaque; +    IRQDest *dst; +    uint32_t retval; + +    DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx "\n", __func__, idx, addr); +    retval = 0xFFFFFFFF; + +    if (idx < 0 || idx >= opp->nb_cpus) { +        return retval; +    } + +    if (addr & 0xF) { +        return retval; +    } +    dst = &opp->dst[idx]; +    addr &= 0xFF0; +    switch (addr) { +    case 0x80: /* CTPR */ +        retval = dst->ctpr; +        break; +    case 0x90: /* WHOAMI */ +        retval = idx; +        break; +    case 0xA0: /* IACK */ +        retval = openpic_iack(opp, dst, idx); +        break; +    case 0xB0: /* EOI */ +        retval = 0; +        break; +    default: +        break; +    } +    DPRINTF("%s: => 0x%08x\n", __func__, retval); + +    return retval; +} + +static uint64_t openpic_cpu_read(void *opaque, hwaddr addr, unsigned len) +{ +    return openpic_cpu_read_internal(opaque, addr, (addr & 0x1f000) >> 12); +} + +static const MemoryRegionOps openpic_glb_ops_le = { +    .write = openpic_gbl_write, +    .read  = openpic_gbl_read, +    .endianness = DEVICE_LITTLE_ENDIAN, +    .impl = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static const MemoryRegionOps openpic_glb_ops_be = { +    .write = openpic_gbl_write, +    .read  = openpic_gbl_read, +    .endianness = DEVICE_BIG_ENDIAN, +    .impl = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static const MemoryRegionOps openpic_tmr_ops_le = { +    .write = openpic_tmr_write, +    .read  = openpic_tmr_read, +    .endianness = DEVICE_LITTLE_ENDIAN, +    .impl = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static const MemoryRegionOps openpic_tmr_ops_be = { +    .write = openpic_tmr_write, +    .read  = openpic_tmr_read, +    .endianness = DEVICE_BIG_ENDIAN, +    .impl = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static const MemoryRegionOps openpic_cpu_ops_le = { +    .write = openpic_cpu_write, +    .read  = openpic_cpu_read, +    .endianness = DEVICE_LITTLE_ENDIAN, +    .impl = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static const MemoryRegionOps openpic_cpu_ops_be = { +    .write = openpic_cpu_write, +    .read  = openpic_cpu_read, +    .endianness = DEVICE_BIG_ENDIAN, +    .impl = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static const MemoryRegionOps openpic_src_ops_le = { +    .write = openpic_src_write, +    .read  = openpic_src_read, +    .endianness = DEVICE_LITTLE_ENDIAN, +    .impl = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static const MemoryRegionOps openpic_src_ops_be = { +    .write = openpic_src_write, +    .read  = openpic_src_read, +    .endianness = DEVICE_BIG_ENDIAN, +    .impl = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static const MemoryRegionOps openpic_msi_ops_be = { +    .read = openpic_msi_read, +    .write = openpic_msi_write, +    .endianness = DEVICE_BIG_ENDIAN, +    .impl = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static const MemoryRegionOps openpic_summary_ops_be = { +    .read = openpic_summary_read, +    .write = openpic_summary_write, +    .endianness = DEVICE_BIG_ENDIAN, +    .impl = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static void openpic_reset(DeviceState *d) +{ +    OpenPICState *opp = OPENPIC(d); +    int i; + +    opp->gcr = GCR_RESET; +    /* Initialise controller registers */ +    opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) | +               ((opp->nb_cpus - 1) << FRR_NCPU_SHIFT) | +               (opp->vid << FRR_VID_SHIFT); + +    opp->pir = 0; +    opp->spve = -1 & opp->vector_mask; +    opp->tfrr = opp->tfrr_reset; +    /* Initialise IRQ sources */ +    for (i = 0; i < opp->max_irq; i++) { +        opp->src[i].ivpr = opp->ivpr_reset; +        switch (opp->src[i].type) { +        case IRQ_TYPE_NORMAL: +            opp->src[i].level = !!(opp->ivpr_reset & IVPR_SENSE_MASK); +            break; + +        case IRQ_TYPE_FSLINT: +            opp->src[i].ivpr |= IVPR_POLARITY_MASK; +            break; + +        case IRQ_TYPE_FSLSPECIAL: +            break; +        } + +        write_IRQreg_idr(opp, i, opp->idr_reset); +    } +    /* Initialise IRQ destinations */ +    for (i = 0; i < opp->nb_cpus; i++) { +        opp->dst[i].ctpr      = 15; +        opp->dst[i].raised.next = -1; +        opp->dst[i].raised.priority = 0; +        bitmap_clear(opp->dst[i].raised.queue, 0, IRQQUEUE_SIZE_BITS); +        opp->dst[i].servicing.next = -1; +        opp->dst[i].servicing.priority = 0; +        bitmap_clear(opp->dst[i].servicing.queue, 0, IRQQUEUE_SIZE_BITS); +    } +    /* Initialise timers */ +    for (i = 0; i < OPENPIC_MAX_TMR; i++) { +        opp->timers[i].tccr = 0; +        opp->timers[i].tbcr = TBCR_CI; +    } +    /* Go out of RESET state */ +    opp->gcr = 0; +} + +typedef struct MemReg { +    const char             *name; +    MemoryRegionOps const  *ops; +    hwaddr      start_addr; +    ram_addr_t              size; +} MemReg; + +static void fsl_common_init(OpenPICState *opp) +{ +    int i; +    int virq = OPENPIC_MAX_SRC; + +    opp->vid = VID_REVISION_1_2; +    opp->vir = VIR_GENERIC; +    opp->vector_mask = 0xFFFF; +    opp->tfrr_reset = 0; +    opp->ivpr_reset = IVPR_MASK_MASK; +    opp->idr_reset = 1 << 0; +    opp->max_irq = OPENPIC_MAX_IRQ; + +    opp->irq_ipi0 = virq; +    virq += OPENPIC_MAX_IPI; +    opp->irq_tim0 = virq; +    virq += OPENPIC_MAX_TMR; + +    assert(virq <= OPENPIC_MAX_IRQ); + +    opp->irq_msi = 224; + +    msi_supported = true; +    for (i = 0; i < opp->fsl->max_ext; i++) { +        opp->src[i].level = false; +    } + +    /* Internal interrupts, including message and MSI */ +    for (i = 16; i < OPENPIC_MAX_SRC; i++) { +        opp->src[i].type = IRQ_TYPE_FSLINT; +        opp->src[i].level = true; +    } + +    /* timers and IPIs */ +    for (i = OPENPIC_MAX_SRC; i < virq; i++) { +        opp->src[i].type = IRQ_TYPE_FSLSPECIAL; +        opp->src[i].level = false; +    } +} + +static void map_list(OpenPICState *opp, const MemReg *list, int *count) +{ +    while (list->name) { +        assert(*count < ARRAY_SIZE(opp->sub_io_mem)); + +        memory_region_init_io(&opp->sub_io_mem[*count], OBJECT(opp), list->ops, +                              opp, list->name, list->size); + +        memory_region_add_subregion(&opp->mem, list->start_addr, +                                    &opp->sub_io_mem[*count]); + +        (*count)++; +        list++; +    } +} + +static const VMStateDescription vmstate_openpic_irq_queue = { +    .name = "openpic_irq_queue", +    .version_id = 0, +    .minimum_version_id = 0, +    .fields = (VMStateField[]) { +        VMSTATE_BITMAP(queue, IRQQueue, 0, queue_size), +        VMSTATE_INT32(next, IRQQueue), +        VMSTATE_INT32(priority, IRQQueue), +        VMSTATE_END_OF_LIST() +    } +}; + +static const VMStateDescription vmstate_openpic_irqdest = { +    .name = "openpic_irqdest", +    .version_id = 0, +    .minimum_version_id = 0, +    .fields = (VMStateField[]) { +        VMSTATE_INT32(ctpr, IRQDest), +        VMSTATE_STRUCT(raised, IRQDest, 0, vmstate_openpic_irq_queue, +                       IRQQueue), +        VMSTATE_STRUCT(servicing, IRQDest, 0, vmstate_openpic_irq_queue, +                       IRQQueue), +        VMSTATE_UINT32_ARRAY(outputs_active, IRQDest, OPENPIC_OUTPUT_NB), +        VMSTATE_END_OF_LIST() +    } +}; + +static const VMStateDescription vmstate_openpic_irqsource = { +    .name = "openpic_irqsource", +    .version_id = 0, +    .minimum_version_id = 0, +    .fields = (VMStateField[]) { +        VMSTATE_UINT32(ivpr, IRQSource), +        VMSTATE_UINT32(idr, IRQSource), +        VMSTATE_UINT32(destmask, IRQSource), +        VMSTATE_INT32(last_cpu, IRQSource), +        VMSTATE_INT32(pending, IRQSource), +        VMSTATE_END_OF_LIST() +    } +}; + +static const VMStateDescription vmstate_openpic_timer = { +    .name = "openpic_timer", +    .version_id = 0, +    .minimum_version_id = 0, +    .fields = (VMStateField[]) { +        VMSTATE_UINT32(tccr, OpenPICTimer), +        VMSTATE_UINT32(tbcr, OpenPICTimer), +        VMSTATE_END_OF_LIST() +    } +}; + +static const VMStateDescription vmstate_openpic_msi = { +    .name = "openpic_msi", +    .version_id = 0, +    .minimum_version_id = 0, +    .fields = (VMStateField[]) { +        VMSTATE_UINT32(msir, OpenPICMSI), +        VMSTATE_END_OF_LIST() +    } +}; + +static int openpic_post_load(void *opaque, int version_id) +{ +    OpenPICState *opp = (OpenPICState *)opaque; +    int i; + +    /* Update internal ivpr and idr variables */ +    for (i = 0; i < opp->max_irq; i++) { +        write_IRQreg_idr(opp, i, opp->src[i].idr); +        write_IRQreg_ivpr(opp, i, opp->src[i].ivpr); +    } + +    return 0; +} + +static const VMStateDescription vmstate_openpic = { +    .name = "openpic", +    .version_id = 3, +    .minimum_version_id = 3, +    .post_load = openpic_post_load, +    .fields = (VMStateField[]) { +        VMSTATE_UINT32(gcr, OpenPICState), +        VMSTATE_UINT32(vir, OpenPICState), +        VMSTATE_UINT32(pir, OpenPICState), +        VMSTATE_UINT32(spve, OpenPICState), +        VMSTATE_UINT32(tfrr, OpenPICState), +        VMSTATE_UINT32(max_irq, OpenPICState), +        VMSTATE_STRUCT_VARRAY_UINT32(src, OpenPICState, max_irq, 0, +                                     vmstate_openpic_irqsource, IRQSource), +        VMSTATE_UINT32_EQUAL(nb_cpus, OpenPICState), +        VMSTATE_STRUCT_VARRAY_UINT32(dst, OpenPICState, nb_cpus, 0, +                                     vmstate_openpic_irqdest, IRQDest), +        VMSTATE_STRUCT_ARRAY(timers, OpenPICState, OPENPIC_MAX_TMR, 0, +                             vmstate_openpic_timer, OpenPICTimer), +        VMSTATE_STRUCT_ARRAY(msi, OpenPICState, MAX_MSI, 0, +                             vmstate_openpic_msi, OpenPICMSI), +        VMSTATE_UINT32(irq_ipi0, OpenPICState), +        VMSTATE_UINT32(irq_tim0, OpenPICState), +        VMSTATE_UINT32(irq_msi, OpenPICState), +        VMSTATE_END_OF_LIST() +    } +}; + +static void openpic_init(Object *obj) +{ +    OpenPICState *opp = OPENPIC(obj); + +    memory_region_init(&opp->mem, obj, "openpic", 0x40000); +} + +static void openpic_realize(DeviceState *dev, Error **errp) +{ +    SysBusDevice *d = SYS_BUS_DEVICE(dev); +    OpenPICState *opp = OPENPIC(dev); +    int i, j; +    int list_count = 0; +    static const MemReg list_le[] = { +        {"glb", &openpic_glb_ops_le, +                OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE}, +        {"tmr", &openpic_tmr_ops_le, +                OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE}, +        {"src", &openpic_src_ops_le, +                OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE}, +        {"cpu", &openpic_cpu_ops_le, +                OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE}, +        {NULL} +    }; +    static const MemReg list_be[] = { +        {"glb", &openpic_glb_ops_be, +                OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE}, +        {"tmr", &openpic_tmr_ops_be, +                OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE}, +        {"src", &openpic_src_ops_be, +                OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE}, +        {"cpu", &openpic_cpu_ops_be, +                OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE}, +        {NULL} +    }; +    static const MemReg list_fsl[] = { +        {"msi", &openpic_msi_ops_be, +                OPENPIC_MSI_REG_START, OPENPIC_MSI_REG_SIZE}, +        {"summary", &openpic_summary_ops_be, +                OPENPIC_SUMMARY_REG_START, OPENPIC_SUMMARY_REG_SIZE}, +        {NULL} +    }; + +    if (opp->nb_cpus > MAX_CPU) { +        error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, +                   TYPE_OPENPIC, "nb_cpus", (uint64_t)opp->nb_cpus, +                   (uint64_t)0, (uint64_t)MAX_CPU); +        return; +    } + +    switch (opp->model) { +    case OPENPIC_MODEL_FSL_MPIC_20: +    default: +        opp->fsl = &fsl_mpic_20; +        opp->brr1 = 0x00400200; +        opp->flags |= OPENPIC_FLAG_IDR_CRIT; +        opp->nb_irqs = 80; +        opp->mpic_mode_mask = GCR_MODE_MIXED; + +        fsl_common_init(opp); +        map_list(opp, list_be, &list_count); +        map_list(opp, list_fsl, &list_count); + +        break; + +    case OPENPIC_MODEL_FSL_MPIC_42: +        opp->fsl = &fsl_mpic_42; +        opp->brr1 = 0x00400402; +        opp->flags |= OPENPIC_FLAG_ILR; +        opp->nb_irqs = 196; +        opp->mpic_mode_mask = GCR_MODE_PROXY; + +        fsl_common_init(opp); +        map_list(opp, list_be, &list_count); +        map_list(opp, list_fsl, &list_count); + +        break; + +    case OPENPIC_MODEL_RAVEN: +        opp->nb_irqs = RAVEN_MAX_EXT; +        opp->vid = VID_REVISION_1_3; +        opp->vir = VIR_GENERIC; +        opp->vector_mask = 0xFF; +        opp->tfrr_reset = 4160000; +        opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK; +        opp->idr_reset = 0; +        opp->max_irq = RAVEN_MAX_IRQ; +        opp->irq_ipi0 = RAVEN_IPI_IRQ; +        opp->irq_tim0 = RAVEN_TMR_IRQ; +        opp->brr1 = -1; +        opp->mpic_mode_mask = GCR_MODE_MIXED; + +        if (opp->nb_cpus != 1) { +            error_setg(errp, "Only UP supported today"); +            return; +        } + +        map_list(opp, list_le, &list_count); +        break; +    } + +    for (i = 0; i < opp->nb_cpus; i++) { +        opp->dst[i].irqs = g_new0(qemu_irq, OPENPIC_OUTPUT_NB); +        for (j = 0; j < OPENPIC_OUTPUT_NB; j++) { +            sysbus_init_irq(d, &opp->dst[i].irqs[j]); +        } + +        opp->dst[i].raised.queue_size = IRQQUEUE_SIZE_BITS; +        opp->dst[i].raised.queue = bitmap_new(IRQQUEUE_SIZE_BITS); +        opp->dst[i].servicing.queue_size = IRQQUEUE_SIZE_BITS; +        opp->dst[i].servicing.queue = bitmap_new(IRQQUEUE_SIZE_BITS); +    } + +    sysbus_init_mmio(d, &opp->mem); +    qdev_init_gpio_in(dev, openpic_set_irq, opp->max_irq); +} + +static Property openpic_properties[] = { +    DEFINE_PROP_UINT32("model", OpenPICState, model, OPENPIC_MODEL_FSL_MPIC_20), +    DEFINE_PROP_UINT32("nb_cpus", OpenPICState, nb_cpus, 1), +    DEFINE_PROP_END_OF_LIST(), +}; + +static void openpic_class_init(ObjectClass *oc, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(oc); + +    dc->realize = openpic_realize; +    dc->props = openpic_properties; +    dc->reset = openpic_reset; +    dc->vmsd = &vmstate_openpic; +} + +static const TypeInfo openpic_info = { +    .name          = TYPE_OPENPIC, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(OpenPICState), +    .instance_init = openpic_init, +    .class_init    = openpic_class_init, +}; + +static void openpic_register_types(void) +{ +    type_register_static(&openpic_info); +} + +type_init(openpic_register_types) diff --git a/hw/intc/openpic_kvm.c b/hw/intc/openpic_kvm.c new file mode 100644 index 00000000..f7cac585 --- /dev/null +++ b/hw/intc/openpic_kvm.c @@ -0,0 +1,293 @@ +/* + * KVM in-kernel OpenPIC + * + * Copyright 2013 Freescale Semiconductor, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include <sys/ioctl.h> +#include "exec/address-spaces.h" +#include "hw/hw.h" +#include "hw/ppc/openpic.h" +#include "hw/pci/msi.h" +#include "hw/sysbus.h" +#include "sysemu/kvm.h" +#include "qemu/log.h" + +#define GCR_RESET        0x80000000 + +#define KVM_OPENPIC(obj) \ +    OBJECT_CHECK(KVMOpenPICState, (obj), TYPE_KVM_OPENPIC) + +typedef struct KVMOpenPICState { +    /*< private >*/ +    SysBusDevice parent_obj; +    /*< public >*/ + +    MemoryRegion mem; +    MemoryListener mem_listener; +    uint32_t fd; +    uint32_t model; +    hwaddr mapped; +} KVMOpenPICState; + +static void kvm_openpic_set_irq(void *opaque, int n_IRQ, int level) +{ +    kvm_set_irq(kvm_state, n_IRQ, level); +} + +static void kvm_openpic_write(void *opaque, hwaddr addr, uint64_t val, +                              unsigned size) +{ +    KVMOpenPICState *opp = opaque; +    struct kvm_device_attr attr; +    uint32_t val32 = val; +    int ret; + +    attr.group = KVM_DEV_MPIC_GRP_REGISTER; +    attr.attr = addr; +    attr.addr = (uint64_t)(unsigned long)&val32; + +    ret = ioctl(opp->fd, KVM_SET_DEVICE_ATTR, &attr); +    if (ret < 0) { +        qemu_log_mask(LOG_UNIMP, "%s: %s %" PRIx64 "\n", __func__, +                      strerror(errno), attr.attr); +    } +} + +static void kvm_openpic_reset(DeviceState *d) +{ +    KVMOpenPICState *opp = KVM_OPENPIC(d); + +    /* Trigger the GCR.RESET bit to reset the PIC */ +    kvm_openpic_write(opp, 0x1020, GCR_RESET, sizeof(uint32_t)); +} + +static uint64_t kvm_openpic_read(void *opaque, hwaddr addr, unsigned size) +{ +    KVMOpenPICState *opp = opaque; +    struct kvm_device_attr attr; +    uint32_t val = 0xdeadbeef; +    int ret; + +    attr.group = KVM_DEV_MPIC_GRP_REGISTER; +    attr.attr = addr; +    attr.addr = (uint64_t)(unsigned long)&val; + +    ret = ioctl(opp->fd, KVM_GET_DEVICE_ATTR, &attr); +    if (ret < 0) { +        qemu_log_mask(LOG_UNIMP, "%s: %s %" PRIx64 "\n", __func__, +                      strerror(errno), attr.attr); +        return 0; +    } + +    return val; +} + +static const MemoryRegionOps kvm_openpic_mem_ops = { +    .write = kvm_openpic_write, +    .read  = kvm_openpic_read, +    .endianness = DEVICE_BIG_ENDIAN, +    .impl = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +static void kvm_openpic_region_add(MemoryListener *listener, +                                   MemoryRegionSection *section) +{ +    KVMOpenPICState *opp = container_of(listener, KVMOpenPICState, +                                        mem_listener); +    struct kvm_device_attr attr; +    uint64_t reg_base; +    int ret; + +    if (section->address_space != &address_space_memory) { +        abort(); +    } + +    /* Ignore events on regions that are not us */ +    if (section->mr != &opp->mem) { +        return; +    } + +    if (opp->mapped) { +        /* +         * We can only map the MPIC once. Since we are already mapped, +         * the best we can do is ignore new maps. +         */ +        return; +    } + +    reg_base = section->offset_within_address_space; +    opp->mapped = reg_base; + +    attr.group = KVM_DEV_MPIC_GRP_MISC; +    attr.attr = KVM_DEV_MPIC_BASE_ADDR; +    attr.addr = (uint64_t)(unsigned long)®_base; + +    ret = ioctl(opp->fd, KVM_SET_DEVICE_ATTR, &attr); +    if (ret < 0) { +        fprintf(stderr, "%s: %s %" PRIx64 "\n", __func__, +                strerror(errno), reg_base); +    } +} + +static void kvm_openpic_region_del(MemoryListener *listener, +                                   MemoryRegionSection *section) +{ +    KVMOpenPICState *opp = container_of(listener, KVMOpenPICState, +                                        mem_listener); +    struct kvm_device_attr attr; +    uint64_t reg_base = 0; +    int ret; + +    /* Ignore events on regions that are not us */ +    if (section->mr != &opp->mem) { +        return; +    } + +    if (section->offset_within_address_space != opp->mapped) { +        /* +         * We can only map the MPIC once. This mapping was a secondary +         * one that we couldn't fulfill. Ignore it. +         */ +        return; +    } +    opp->mapped = 0; + +    attr.group = KVM_DEV_MPIC_GRP_MISC; +    attr.attr = KVM_DEV_MPIC_BASE_ADDR; +    attr.addr = (uint64_t)(unsigned long)®_base; + +    ret = ioctl(opp->fd, KVM_SET_DEVICE_ATTR, &attr); +    if (ret < 0) { +        fprintf(stderr, "%s: %s %" PRIx64 "\n", __func__, +                strerror(errno), reg_base); +    } +} + +static void kvm_openpic_init(Object *obj) +{ +    KVMOpenPICState *opp = KVM_OPENPIC(obj); + +    memory_region_init_io(&opp->mem, OBJECT(opp), &kvm_openpic_mem_ops, opp, +                          "kvm-openpic", 0x40000); +} + +static void kvm_openpic_realize(DeviceState *dev, Error **errp) +{ +    SysBusDevice *d = SYS_BUS_DEVICE(dev); +    KVMOpenPICState *opp = KVM_OPENPIC(dev); +    KVMState *s = kvm_state; +    int kvm_openpic_model; +    struct kvm_create_device cd = {0}; +    int ret, i; + +    if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) { +        error_setg(errp, "Kernel is lacking Device Control API"); +        return; +    } + +    switch (opp->model) { +    case OPENPIC_MODEL_FSL_MPIC_20: +        kvm_openpic_model = KVM_DEV_TYPE_FSL_MPIC_20; +        break; + +    case OPENPIC_MODEL_FSL_MPIC_42: +        kvm_openpic_model = KVM_DEV_TYPE_FSL_MPIC_42; +        break; + +    default: +        error_setg(errp, "Unsupported OpenPIC model %" PRIu32, opp->model); +        return; +    } + +    cd.type = kvm_openpic_model; +    ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &cd); +    if (ret < 0) { +        error_setg(errp, "Can't create device %d: %s", +                   cd.type, strerror(errno)); +        return; +    } +    opp->fd = cd.fd; + +    sysbus_init_mmio(d, &opp->mem); +    qdev_init_gpio_in(dev, kvm_openpic_set_irq, OPENPIC_MAX_IRQ); + +    opp->mem_listener.region_add = kvm_openpic_region_add; +    opp->mem_listener.region_del = kvm_openpic_region_del; +    memory_listener_register(&opp->mem_listener, &address_space_memory); + +    /* indicate pic capabilities */ +    msi_supported = true; +    kvm_kernel_irqchip = true; +    kvm_async_interrupts_allowed = true; + +    /* set up irq routing */ +    kvm_init_irq_routing(kvm_state); +    for (i = 0; i < 256; ++i) { +        kvm_irqchip_add_irq_route(kvm_state, i, 0, i); +    } + +    kvm_msi_via_irqfd_allowed = true; +    kvm_gsi_routing_allowed = true; + +    kvm_irqchip_commit_routes(s); +} + +int kvm_openpic_connect_vcpu(DeviceState *d, CPUState *cs) +{ +    KVMOpenPICState *opp = KVM_OPENPIC(d); + +    return kvm_vcpu_enable_cap(cs, KVM_CAP_IRQ_MPIC, 0, opp->fd, +                               kvm_arch_vcpu_id(cs)); +} + +static Property kvm_openpic_properties[] = { +    DEFINE_PROP_UINT32("model", KVMOpenPICState, model, +                       OPENPIC_MODEL_FSL_MPIC_20), +    DEFINE_PROP_END_OF_LIST(), +}; + +static void kvm_openpic_class_init(ObjectClass *oc, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(oc); + +    dc->realize = kvm_openpic_realize; +    dc->props = kvm_openpic_properties; +    dc->reset = kvm_openpic_reset; +} + +static const TypeInfo kvm_openpic_info = { +    .name          = TYPE_KVM_OPENPIC, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(KVMOpenPICState), +    .instance_init = kvm_openpic_init, +    .class_init    = kvm_openpic_class_init, +}; + +static void kvm_openpic_register_types(void) +{ +    type_register_static(&kvm_openpic_info); +} + +type_init(kvm_openpic_register_types) diff --git a/hw/intc/pl190.c b/hw/intc/pl190.c new file mode 100644 index 00000000..2bf359a7 --- /dev/null +++ b/hw/intc/pl190.c @@ -0,0 +1,292 @@ +/* + * Arm PrimeCell PL190 Vector Interrupt Controller + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "hw/sysbus.h" + +/* The number of virtual priority levels.  16 user vectors plus the +   unvectored IRQ.  Chained interrupts would require an additional level +   if implemented.  */ + +#define PL190_NUM_PRIO 17 + +#define TYPE_PL190 "pl190" +#define PL190(obj) OBJECT_CHECK(PL190State, (obj), TYPE_PL190) + +typedef struct PL190State { +    SysBusDevice parent_obj; + +    MemoryRegion iomem; +    uint32_t level; +    uint32_t soft_level; +    uint32_t irq_enable; +    uint32_t fiq_select; +    uint8_t vect_control[16]; +    uint32_t vect_addr[PL190_NUM_PRIO]; +    /* Mask containing interrupts with higher priority than this one.  */ +    uint32_t prio_mask[PL190_NUM_PRIO + 1]; +    int protected; +    /* Current priority level.  */ +    int priority; +    int prev_prio[PL190_NUM_PRIO]; +    qemu_irq irq; +    qemu_irq fiq; +} PL190State; + +static const unsigned char pl190_id[] = +{ 0x90, 0x11, 0x04, 0x00, 0x0D, 0xf0, 0x05, 0xb1 }; + +static inline uint32_t pl190_irq_level(PL190State *s) +{ +    return (s->level | s->soft_level) & s->irq_enable & ~s->fiq_select; +} + +/* Update interrupts.  */ +static void pl190_update(PL190State *s) +{ +    uint32_t level = pl190_irq_level(s); +    int set; + +    set = (level & s->prio_mask[s->priority]) != 0; +    qemu_set_irq(s->irq, set); +    set = ((s->level | s->soft_level) & s->fiq_select) != 0; +    qemu_set_irq(s->fiq, set); +} + +static void pl190_set_irq(void *opaque, int irq, int level) +{ +    PL190State *s = (PL190State *)opaque; + +    if (level) +        s->level |= 1u << irq; +    else +        s->level &= ~(1u << irq); +    pl190_update(s); +} + +static void pl190_update_vectors(PL190State *s) +{ +    uint32_t mask; +    int i; +    int n; + +    mask = 0; +    for (i = 0; i < 16; i++) +      { +        s->prio_mask[i] = mask; +        if (s->vect_control[i] & 0x20) +          { +            n = s->vect_control[i] & 0x1f; +            mask |= 1 << n; +          } +      } +    s->prio_mask[16] = mask; +    pl190_update(s); +} + +static uint64_t pl190_read(void *opaque, hwaddr offset, +                           unsigned size) +{ +    PL190State *s = (PL190State *)opaque; +    int i; + +    if (offset >= 0xfe0 && offset < 0x1000) { +        return pl190_id[(offset - 0xfe0) >> 2]; +    } +    if (offset >= 0x100 && offset < 0x140) { +        return s->vect_addr[(offset - 0x100) >> 2]; +    } +    if (offset >= 0x200 && offset < 0x240) { +        return s->vect_control[(offset - 0x200) >> 2]; +    } +    switch (offset >> 2) { +    case 0: /* IRQSTATUS */ +        return pl190_irq_level(s); +    case 1: /* FIQSATUS */ +        return (s->level | s->soft_level) & s->fiq_select; +    case 2: /* RAWINTR */ +        return s->level | s->soft_level; +    case 3: /* INTSELECT */ +        return s->fiq_select; +    case 4: /* INTENABLE */ +        return s->irq_enable; +    case 6: /* SOFTINT */ +        return s->soft_level; +    case 8: /* PROTECTION */ +        return s->protected; +    case 12: /* VECTADDR */ +        /* Read vector address at the start of an ISR.  Increases the +         * current priority level to that of the current interrupt. +         * +         * Since an enabled interrupt X at priority P causes prio_mask[Y] +         * to have bit X set for all Y > P, this loop will stop with +         * i == the priority of the highest priority set interrupt. +         */ +        for (i = 0; i < s->priority; i++) { +            if ((s->level | s->soft_level) & s->prio_mask[i + 1]) { +                break; +            } +        } + +        /* Reading this value with no pending interrupts is undefined. +           We return the default address.  */ +        if (i == PL190_NUM_PRIO) +          return s->vect_addr[16]; +        if (i < s->priority) +          { +            s->prev_prio[i] = s->priority; +            s->priority = i; +            pl190_update(s); +          } +        return s->vect_addr[s->priority]; +    case 13: /* DEFVECTADDR */ +        return s->vect_addr[16]; +    default: +        qemu_log_mask(LOG_GUEST_ERROR, +                      "pl190_read: Bad offset %x\n", (int)offset); +        return 0; +    } +} + +static void pl190_write(void *opaque, hwaddr offset, +                        uint64_t val, unsigned size) +{ +    PL190State *s = (PL190State *)opaque; + +    if (offset >= 0x100 && offset < 0x140) { +        s->vect_addr[(offset - 0x100) >> 2] = val; +        pl190_update_vectors(s); +        return; +    } +    if (offset >= 0x200 && offset < 0x240) { +        s->vect_control[(offset - 0x200) >> 2] = val; +        pl190_update_vectors(s); +        return; +    } +    switch (offset >> 2) { +    case 0: /* SELECT */ +        /* This is a readonly register, but linux tries to write to it +           anyway.  Ignore the write.  */ +        break; +    case 3: /* INTSELECT */ +        s->fiq_select = val; +        break; +    case 4: /* INTENABLE */ +        s->irq_enable |= val; +        break; +    case 5: /* INTENCLEAR */ +        s->irq_enable &= ~val; +        break; +    case 6: /* SOFTINT */ +        s->soft_level |= val; +        break; +    case 7: /* SOFTINTCLEAR */ +        s->soft_level &= ~val; +        break; +    case 8: /* PROTECTION */ +        /* TODO: Protection (supervisor only access) is not implemented.  */ +        s->protected = val & 1; +        break; +    case 12: /* VECTADDR */ +        /* Restore the previous priority level.  The value written is +           ignored.  */ +        if (s->priority < PL190_NUM_PRIO) +            s->priority = s->prev_prio[s->priority]; +        break; +    case 13: /* DEFVECTADDR */ +        s->vect_addr[16] = val; +        break; +    case 0xc0: /* ITCR */ +        if (val) { +            qemu_log_mask(LOG_UNIMP, "pl190: Test mode not implemented\n"); +        } +        break; +    default: +        qemu_log_mask(LOG_GUEST_ERROR, +                     "pl190_write: Bad offset %x\n", (int)offset); +        return; +    } +    pl190_update(s); +} + +static const MemoryRegionOps pl190_ops = { +    .read = pl190_read, +    .write = pl190_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pl190_reset(DeviceState *d) +{ +    PL190State *s = PL190(d); +    int i; + +    for (i = 0; i < 16; i++) { +        s->vect_addr[i] = 0; +        s->vect_control[i] = 0; +    } +    s->vect_addr[16] = 0; +    s->prio_mask[17] = 0xffffffff; +    s->priority = PL190_NUM_PRIO; +    pl190_update_vectors(s); +} + +static int pl190_init(SysBusDevice *sbd) +{ +    DeviceState *dev = DEVICE(sbd); +    PL190State *s = PL190(dev); + +    memory_region_init_io(&s->iomem, OBJECT(s), &pl190_ops, s, "pl190", 0x1000); +    sysbus_init_mmio(sbd, &s->iomem); +    qdev_init_gpio_in(dev, pl190_set_irq, 32); +    sysbus_init_irq(sbd, &s->irq); +    sysbus_init_irq(sbd, &s->fiq); +    return 0; +} + +static const VMStateDescription vmstate_pl190 = { +    .name = "pl190", +    .version_id = 1, +    .minimum_version_id = 1, +    .fields = (VMStateField[]) { +        VMSTATE_UINT32(level, PL190State), +        VMSTATE_UINT32(soft_level, PL190State), +        VMSTATE_UINT32(irq_enable, PL190State), +        VMSTATE_UINT32(fiq_select, PL190State), +        VMSTATE_UINT8_ARRAY(vect_control, PL190State, 16), +        VMSTATE_UINT32_ARRAY(vect_addr, PL190State, PL190_NUM_PRIO), +        VMSTATE_UINT32_ARRAY(prio_mask, PL190State, PL190_NUM_PRIO+1), +        VMSTATE_INT32(protected, PL190State), +        VMSTATE_INT32(priority, PL190State), +        VMSTATE_INT32_ARRAY(prev_prio, PL190State, PL190_NUM_PRIO), +        VMSTATE_END_OF_LIST() +    } +}; + +static void pl190_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + +    k->init = pl190_init; +    dc->reset = pl190_reset; +    dc->vmsd = &vmstate_pl190; +} + +static const TypeInfo pl190_info = { +    .name          = TYPE_PL190, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(PL190State), +    .class_init    = pl190_class_init, +}; + +static void pl190_register_types(void) +{ +    type_register_static(&pl190_info); +} + +type_init(pl190_register_types) diff --git a/hw/intc/puv3_intc.c b/hw/intc/puv3_intc.c new file mode 100644 index 00000000..c2803d07 --- /dev/null +++ b/hw/intc/puv3_intc.c @@ -0,0 +1,140 @@ +/* + * INTC device simulation in PKUnity SoC + * + * Copyright (C) 2010-2012 Guan Xuetao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation, or any later version. + * See the COPYING file in the top-level directory. + */ +#include "hw/sysbus.h" + +#undef DEBUG_PUV3 +#include "hw/unicore32/puv3.h" + +#define TYPE_PUV3_INTC "puv3_intc" +#define PUV3_INTC(obj) OBJECT_CHECK(PUV3INTCState, (obj), TYPE_PUV3_INTC) + +typedef struct PUV3INTCState { +    SysBusDevice parent_obj; + +    MemoryRegion iomem; +    qemu_irq parent_irq; + +    uint32_t reg_ICMR; +    uint32_t reg_ICPR; +} PUV3INTCState; + +/* Update interrupt status after enabled or pending bits have been changed.  */ +static void puv3_intc_update(PUV3INTCState *s) +{ +    if (s->reg_ICMR & s->reg_ICPR) { +        qemu_irq_raise(s->parent_irq); +    } else { +        qemu_irq_lower(s->parent_irq); +    } +} + +/* Process a change in an external INTC input. */ +static void puv3_intc_handler(void *opaque, int irq, int level) +{ +    PUV3INTCState *s = opaque; + +    DPRINTF("irq 0x%x, level 0x%x\n", irq, level); +    if (level) { +        s->reg_ICPR |= (1 << irq); +    } else { +        s->reg_ICPR &= ~(1 << irq); +    } +    puv3_intc_update(s); +} + +static uint64_t puv3_intc_read(void *opaque, hwaddr offset, +        unsigned size) +{ +    PUV3INTCState *s = opaque; +    uint32_t ret = 0; + +    switch (offset) { +    case 0x04: /* INTC_ICMR */ +        ret = s->reg_ICMR; +        break; +    case 0x0c: /* INTC_ICIP */ +        ret = s->reg_ICPR; /* the same value with ICPR */ +        break; +    default: +        DPRINTF("Bad offset %x\n", (int)offset); +    } +    DPRINTF("offset 0x%x, value 0x%x\n", offset, ret); +    return ret; +} + +static void puv3_intc_write(void *opaque, hwaddr offset, +        uint64_t value, unsigned size) +{ +    PUV3INTCState *s = opaque; + +    DPRINTF("offset 0x%x, value 0x%x\n", offset, value); +    switch (offset) { +    case 0x00: /* INTC_ICLR */ +    case 0x14: /* INTC_ICCR */ +        break; +    case 0x04: /* INTC_ICMR */ +        s->reg_ICMR = value; +        break; +    default: +        DPRINTF("Bad offset 0x%x\n", (int)offset); +        return; +    } +    puv3_intc_update(s); +} + +static const MemoryRegionOps puv3_intc_ops = { +    .read = puv3_intc_read, +    .write = puv3_intc_write, +    .impl = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +    .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int puv3_intc_init(SysBusDevice *sbd) +{ +    DeviceState *dev = DEVICE(sbd); +    PUV3INTCState *s = PUV3_INTC(dev); + +    qdev_init_gpio_in(dev, puv3_intc_handler, PUV3_IRQS_NR); +    sysbus_init_irq(sbd, &s->parent_irq); + +    s->reg_ICMR = 0; +    s->reg_ICPR = 0; + +    memory_region_init_io(&s->iomem, OBJECT(s), &puv3_intc_ops, s, "puv3_intc", +                          PUV3_REGS_OFFSET); +    sysbus_init_mmio(sbd, &s->iomem); + +    return 0; +} + +static void puv3_intc_class_init(ObjectClass *klass, void *data) +{ +    SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass); + +    sdc->init = puv3_intc_init; +} + +static const TypeInfo puv3_intc_info = { +    .name = TYPE_PUV3_INTC, +    .parent = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(PUV3INTCState), +    .class_init = puv3_intc_class_init, +}; + +static void puv3_intc_register_type(void) +{ +    type_register_static(&puv3_intc_info); +} + +type_init(puv3_intc_register_type) diff --git a/hw/intc/realview_gic.c b/hw/intc/realview_gic.c new file mode 100644 index 00000000..6c812961 --- /dev/null +++ b/hw/intc/realview_gic.c @@ -0,0 +1,87 @@ +/* + * ARM RealView Emulation Baseboard Interrupt Controller + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "hw/intc/realview_gic.h" + +static void realview_gic_set_irq(void *opaque, int irq, int level) +{ +    RealViewGICState *s = (RealViewGICState *)opaque; + +    qemu_set_irq(qdev_get_gpio_in(DEVICE(&s->gic), irq), level); +} + +static void realview_gic_realize(DeviceState *dev, Error **errp) +{ +    SysBusDevice *sbd = SYS_BUS_DEVICE(dev); +    RealViewGICState *s = REALVIEW_GIC(dev); +    SysBusDevice *busdev; +    Error *err = NULL; +    /* The GICs on the RealView boards have a fixed nonconfigurable +     * number of interrupt lines, so we don't need to expose this as +     * a qdev property. +     */ +    int numirq = 96; + +    qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", numirq); +    object_property_set_bool(OBJECT(&s->gic), true, "realized", &err); +    if (err != NULL) { +        error_propagate(errp, err); +        return; +    } +    busdev = SYS_BUS_DEVICE(&s->gic); + +    /* Pass through outbound IRQ lines from the GIC */ +    sysbus_pass_irq(sbd, busdev); + +    /* Pass through inbound GPIO lines to the GIC */ +    qdev_init_gpio_in(dev, realview_gic_set_irq, numirq - 32); + +    memory_region_add_subregion(&s->container, 0, +                                sysbus_mmio_get_region(busdev, 1)); +    memory_region_add_subregion(&s->container, 0x1000, +                                sysbus_mmio_get_region(busdev, 0)); +} + +static void realview_gic_init(Object *obj) +{ +    SysBusDevice *sbd = SYS_BUS_DEVICE(obj); +    RealViewGICState *s = REALVIEW_GIC(obj); +    DeviceState *gicdev; + +    memory_region_init(&s->container, OBJECT(s), +                       "realview-gic-container", 0x2000); +    sysbus_init_mmio(sbd, &s->container); + +    object_initialize(&s->gic, sizeof(s->gic), TYPE_ARM_GIC); +    gicdev = DEVICE(&s->gic); +    qdev_set_parent_bus(gicdev, sysbus_get_default()); +    qdev_prop_set_uint32(gicdev, "num-cpu", 1); +} + +static void realview_gic_class_init(ObjectClass *oc, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(oc); + +    dc->realize = realview_gic_realize; +} + +static const TypeInfo realview_gic_info = { +    .name          = TYPE_REALVIEW_GIC, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(RealViewGICState), +    .instance_init = realview_gic_init, +    .class_init    = realview_gic_class_init, +}; + +static void realview_gic_register_types(void) +{ +    type_register_static(&realview_gic_info); +} + +type_init(realview_gic_register_types) diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c new file mode 100644 index 00000000..02e10b75 --- /dev/null +++ b/hw/intc/s390_flic.c @@ -0,0 +1,99 @@ +/* + * QEMU S390x floating interrupt controller (flic) + * + * Copyright 2014 IBM Corp. + * Author(s): Jens Freimann <jfrei@linux.vnet.ibm.com> + *            Cornelia Huck <cornelia.huck@de.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/error-report.h" +#include "hw/sysbus.h" +#include "migration/qemu-file.h" +#include "hw/s390x/s390_flic.h" +#include "trace.h" + +S390FLICState *s390_get_flic(void) +{ +    S390FLICState *fs; + +    fs = S390_FLIC_COMMON(object_resolve_path(TYPE_KVM_S390_FLIC, NULL)); +    if (!fs) { +        fs = S390_FLIC_COMMON(object_resolve_path(TYPE_QEMU_S390_FLIC, NULL)); +    } +    return fs; +} + +void s390_flic_init(void) +{ +    DeviceState *dev; + +    dev = s390_flic_kvm_create(); +    if (!dev) { +        dev = qdev_create(NULL, TYPE_QEMU_S390_FLIC); +        object_property_add_child(qdev_get_machine(), TYPE_QEMU_S390_FLIC, +                                  OBJECT(dev), NULL); +    } +    qdev_init_nofail(dev); +} + +static int qemu_s390_register_io_adapter(S390FLICState *fs, uint32_t id, +                                         uint8_t isc, bool swap, +                                         bool is_maskable) +{ +    /* nothing to do */ +    return 0; +} + +static int qemu_s390_io_adapter_map(S390FLICState *fs, uint32_t id, +                                    uint64_t map_addr, bool do_map) +{ +    /* nothing to do */ +    return 0; +} + +static int qemu_s390_add_adapter_routes(S390FLICState *fs, +                                        AdapterRoutes *routes) +{ +    return -ENOSYS; +} + +static void qemu_s390_release_adapter_routes(S390FLICState *fs, +                                             AdapterRoutes *routes) +{ +} + +static void qemu_s390_flic_class_init(ObjectClass *oc, void *data) +{ +    S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc); + +    fsc->register_io_adapter = qemu_s390_register_io_adapter; +    fsc->io_adapter_map = qemu_s390_io_adapter_map; +    fsc->add_adapter_routes = qemu_s390_add_adapter_routes; +    fsc->release_adapter_routes = qemu_s390_release_adapter_routes; +} + +static const TypeInfo qemu_s390_flic_info = { +    .name          = TYPE_QEMU_S390_FLIC, +    .parent        = TYPE_S390_FLIC_COMMON, +    .instance_size = sizeof(QEMUS390FLICState), +    .class_init    = qemu_s390_flic_class_init, +}; + +static const TypeInfo s390_flic_common_info = { +    .name          = TYPE_S390_FLIC_COMMON, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(S390FLICState), +    .class_size    = sizeof(S390FLICStateClass), +}; + +static void qemu_s390_flic_register_types(void) +{ +    type_register_static(&s390_flic_common_info); +    type_register_static(&qemu_s390_flic_info); +} + +type_init(qemu_s390_flic_register_types) diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c new file mode 100644 index 00000000..b471e7a4 --- /dev/null +++ b/hw/intc/s390_flic_kvm.c @@ -0,0 +1,432 @@ +/* + * QEMU S390x KVM floating interrupt controller (flic) + * + * Copyright 2014 IBM Corp. + * Author(s): Jens Freimann <jfrei@linux.vnet.ibm.com> + *            Cornelia Huck <cornelia.huck@de.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include <sys/ioctl.h> +#include "qemu/error-report.h" +#include "hw/sysbus.h" +#include "sysemu/kvm.h" +#include "migration/qemu-file.h" +#include "hw/s390x/s390_flic.h" +#include "hw/s390x/adapter.h" +#include "trace.h" + +#define FLIC_SAVE_INITIAL_SIZE getpagesize() +#define FLIC_FAILED (-1UL) +#define FLIC_SAVEVM_VERSION 1 + +typedef struct KVMS390FLICState { +    S390FLICState parent_obj; + +    uint32_t fd; +} KVMS390FLICState; + +DeviceState *s390_flic_kvm_create(void) +{ +    DeviceState *dev = NULL; + +    if (kvm_enabled()) { +        dev = qdev_create(NULL, TYPE_KVM_S390_FLIC); +        object_property_add_child(qdev_get_machine(), TYPE_KVM_S390_FLIC, +                                  OBJECT(dev), NULL); +    } +    return dev; +} + +/** + * flic_get_all_irqs - store all pending irqs in buffer + * @buf: pointer to buffer which is passed to kernel + * @len: length of buffer + * @flic: pointer to flic device state + * + * Returns: -ENOMEM if buffer is too small, + * -EINVAL if attr.group is invalid, + * -EFAULT if copying to userspace failed, + * on success return number of stored interrupts + */ +static int flic_get_all_irqs(KVMS390FLICState *flic, +                             void *buf, int len) +{ +    struct kvm_device_attr attr = { +        .group = KVM_DEV_FLIC_GET_ALL_IRQS, +        .addr = (uint64_t) buf, +        .attr = len, +    }; +    int rc; + +    rc = ioctl(flic->fd, KVM_GET_DEVICE_ATTR, &attr); + +    return rc == -1 ? -errno : rc; +} + +static void flic_enable_pfault(KVMS390FLICState *flic) +{ +    struct kvm_device_attr attr = { +        .group = KVM_DEV_FLIC_APF_ENABLE, +    }; +    int rc; + +    rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); + +    if (rc) { +        fprintf(stderr, "flic: couldn't enable pfault\n"); +    } +} + +static void flic_disable_wait_pfault(KVMS390FLICState *flic) +{ +    struct kvm_device_attr attr = { +        .group = KVM_DEV_FLIC_APF_DISABLE_WAIT, +    }; +    int rc; + +    rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); + +    if (rc) { +        fprintf(stderr, "flic: couldn't disable pfault\n"); +    } +} + +/** flic_enqueue_irqs - returns 0 on success + * @buf: pointer to buffer which is passed to kernel + * @len: length of buffer + * @flic: pointer to flic device state + * + * Returns: -EINVAL if attr.group is unknown + */ +static int flic_enqueue_irqs(void *buf, uint64_t len, +                            KVMS390FLICState *flic) +{ +    int rc; +    struct kvm_device_attr attr = { +        .group = KVM_DEV_FLIC_ENQUEUE, +        .addr = (uint64_t) buf, +        .attr = len, +    }; + +    rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); + +    return rc ? -errno : 0; +} + +int kvm_s390_inject_flic(struct kvm_s390_irq *irq) +{ +    static KVMS390FLICState *flic; + +    if (unlikely(!flic)) { +        flic = KVM_S390_FLIC(s390_get_flic()); +    } +    return flic_enqueue_irqs(irq, sizeof(*irq), flic); +} + +/** + * __get_all_irqs - store all pending irqs in buffer + * @flic: pointer to flic device state + * @buf: pointer to pointer to a buffer + * @len: length of buffer + * + * Returns: return value of flic_get_all_irqs + * Note: Retry and increase buffer size until flic_get_all_irqs + * either returns a value >= 0 or a negative error code. + * -ENOMEM is an exception, which means the buffer is too small + * and we should try again. Other negative error codes can be + * -EFAULT and -EINVAL which we ignore at this point + */ +static int __get_all_irqs(KVMS390FLICState *flic, +                          void **buf, int len) +{ +    int r; + +    do { +        /* returns -ENOMEM if buffer is too small and number +         * of queued interrupts on success */ +        r = flic_get_all_irqs(flic, *buf, len); +        if (r >= 0) { +            break; +        } +        len *= 2; +        *buf = g_try_realloc(*buf, len); +        if (!buf) { +            return -ENOMEM; +        } +    } while (r == -ENOMEM && len <= KVM_S390_FLIC_MAX_BUFFER); + +    return r; +} + +static int kvm_s390_register_io_adapter(S390FLICState *fs, uint32_t id, +                                        uint8_t isc, bool swap, +                                        bool is_maskable) +{ +    struct kvm_s390_io_adapter adapter = { +        .id = id, +        .isc = isc, +        .maskable = is_maskable, +        .swap = swap, +    }; +    KVMS390FLICState *flic = KVM_S390_FLIC(fs); +    int r, ret; +    struct kvm_device_attr attr = { +        .group = KVM_DEV_FLIC_ADAPTER_REGISTER, +        .addr = (uint64_t)&adapter, +    }; + +    if (!kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING)) { +        /* nothing to do */ +        return 0; +    } + +    r = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); + +    ret = r ? -errno : 0; +    return ret; +} + +static int kvm_s390_io_adapter_map(S390FLICState *fs, uint32_t id, +                                   uint64_t map_addr, bool do_map) +{ +    struct kvm_s390_io_adapter_req req = { +        .id = id, +        .type = do_map ? KVM_S390_IO_ADAPTER_MAP : KVM_S390_IO_ADAPTER_UNMAP, +        .addr = map_addr, +    }; +    struct kvm_device_attr attr = { +        .group = KVM_DEV_FLIC_ADAPTER_MODIFY, +        .addr = (uint64_t)&req, +    }; +    KVMS390FLICState *flic = KVM_S390_FLIC(fs); +    int r; + +    if (!kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING)) { +        /* nothing to do */ +        return 0; +    } + +    r = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); +    return r ? -errno : 0; +} + +static int kvm_s390_add_adapter_routes(S390FLICState *fs, +                                       AdapterRoutes *routes) +{ +    int ret, i; +    uint64_t ind_offset = routes->adapter.ind_offset; + +    for (i = 0; i < routes->num_routes; i++) { +        ret = kvm_irqchip_add_adapter_route(kvm_state, &routes->adapter); +        if (ret < 0) { +            goto out_undo; +        } +        routes->gsi[i] = ret; +        routes->adapter.ind_offset++; +    } +    /* Restore passed-in structure to original state. */ +    routes->adapter.ind_offset = ind_offset; +    return 0; +out_undo: +    while (--i >= 0) { +        kvm_irqchip_release_virq(kvm_state, routes->gsi[i]); +        routes->gsi[i] = -1; +    } +    routes->adapter.ind_offset = ind_offset; +    return ret; +} + +static void kvm_s390_release_adapter_routes(S390FLICState *fs, +                                            AdapterRoutes *routes) +{ +    int i; + +    for (i = 0; i < routes->num_routes; i++) { +        if (routes->gsi[i] >= 0) { +            kvm_irqchip_release_virq(kvm_state, routes->gsi[i]); +            routes->gsi[i] = -1; +        } +    } +} + +/** + * kvm_flic_save - Save pending floating interrupts + * @f: QEMUFile containing migration state + * @opaque: pointer to flic device state + * + * Note: Pass buf and len to kernel. Start with one page and + * increase until buffer is sufficient or maxium size is + * reached + */ +static void kvm_flic_save(QEMUFile *f, void *opaque) +{ +    KVMS390FLICState *flic = opaque; +    int len = FLIC_SAVE_INITIAL_SIZE; +    void *buf; +    int count; + +    flic_disable_wait_pfault((struct KVMS390FLICState *) opaque); + +    buf = g_try_malloc0(len); +    if (!buf) { +        /* Storing FLIC_FAILED into the count field here will cause the +         * target system to fail when attempting to load irqs from the +         * migration state */ +        error_report("flic: couldn't allocate memory"); +        qemu_put_be64(f, FLIC_FAILED); +        return; +    } + +    count = __get_all_irqs(flic, &buf, len); +    if (count < 0) { +        error_report("flic: couldn't retrieve irqs from kernel, rc %d", +                     count); +        /* Storing FLIC_FAILED into the count field here will cause the +         * target system to fail when attempting to load irqs from the +         * migration state */ +        qemu_put_be64(f, FLIC_FAILED); +    } else { +        qemu_put_be64(f, count); +        qemu_put_buffer(f, (uint8_t *) buf, +                        count * sizeof(struct kvm_s390_irq)); +    } +    g_free(buf); +} + +/** + * kvm_flic_load - Load pending floating interrupts + * @f: QEMUFile containing migration state + * @opaque: pointer to flic device state + * @version_id: version id for migration + * + * Returns: value of flic_enqueue_irqs, -EINVAL on error + * Note: Do nothing when no interrupts where stored + * in QEMUFile + */ +static int kvm_flic_load(QEMUFile *f, void *opaque, int version_id) +{ +    uint64_t len = 0; +    uint64_t count = 0; +    void *buf = NULL; +    int r = 0; + +    if (version_id != FLIC_SAVEVM_VERSION) { +        r = -EINVAL; +        goto out; +    } + +    flic_enable_pfault((struct KVMS390FLICState *) opaque); + +    count = qemu_get_be64(f); +    len = count * sizeof(struct kvm_s390_irq); +    if (count == FLIC_FAILED) { +        r = -EINVAL; +        goto out; +    } +    if (count == 0) { +        r = 0; +        goto out; +    } +    buf = g_try_malloc0(len); +    if (!buf) { +        r = -ENOMEM; +        goto out; +    } + +    if (qemu_get_buffer(f, (uint8_t *) buf, len) != len) { +        r = -EINVAL; +        goto out_free; +    } +    r = flic_enqueue_irqs(buf, len, (struct KVMS390FLICState *) opaque); + +out_free: +    g_free(buf); +out: +    return r; +} + +static void kvm_s390_flic_realize(DeviceState *dev, Error **errp) +{ +    KVMS390FLICState *flic_state = KVM_S390_FLIC(dev); +    struct kvm_create_device cd = {0}; +    int ret; + +    flic_state->fd = -1; +    if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) { +        trace_flic_no_device_api(errno); +        return; +    } + +    cd.type = KVM_DEV_TYPE_FLIC; +    ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd); +    if (ret < 0) { +        trace_flic_create_device(errno); +        return; +    } +    flic_state->fd = cd.fd; + +    /* Register savevm handler for floating interrupts */ +    register_savevm(NULL, "s390-flic", 0, 1, kvm_flic_save, +                    kvm_flic_load, (void *) flic_state); +} + +static void kvm_s390_flic_unrealize(DeviceState *dev, Error **errp) +{ +    KVMS390FLICState *flic_state = KVM_S390_FLIC(dev); + +    unregister_savevm(DEVICE(flic_state), "s390-flic", flic_state); +} + +static void kvm_s390_flic_reset(DeviceState *dev) +{ +    KVMS390FLICState *flic = KVM_S390_FLIC(dev); +    struct kvm_device_attr attr = { +        .group = KVM_DEV_FLIC_CLEAR_IRQS, +    }; +    int rc = 0; + +    if (flic->fd == -1) { +        return; +    } + +    flic_disable_wait_pfault(flic); + +    rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); +    if (rc) { +        trace_flic_reset_failed(errno); +    } + +    flic_enable_pfault(flic); +} + +static void kvm_s390_flic_class_init(ObjectClass *oc, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(oc); +    S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc); + +    dc->realize = kvm_s390_flic_realize; +    dc->unrealize = kvm_s390_flic_unrealize; +    dc->reset = kvm_s390_flic_reset; +    fsc->register_io_adapter = kvm_s390_register_io_adapter; +    fsc->io_adapter_map = kvm_s390_io_adapter_map; +    fsc->add_adapter_routes = kvm_s390_add_adapter_routes; +    fsc->release_adapter_routes = kvm_s390_release_adapter_routes; +} + +static const TypeInfo kvm_s390_flic_info = { +    .name          = TYPE_KVM_S390_FLIC, +    .parent        = TYPE_S390_FLIC_COMMON, +    .instance_size = sizeof(KVMS390FLICState), +    .class_init    = kvm_s390_flic_class_init, +}; + +static void kvm_s390_flic_register_types(void) +{ +    type_register_static(&kvm_s390_flic_info); +} + +type_init(kvm_s390_flic_register_types) diff --git a/hw/intc/sh_intc.c b/hw/intc/sh_intc.c new file mode 100644 index 00000000..55c76e4a --- /dev/null +++ b/hw/intc/sh_intc.c @@ -0,0 +1,512 @@ +/* + * SuperH interrupt controller module + * + * Copyright (c) 2007 Magnus Damm + * Based on sh_timer.c and arm_timer.c by Paul Brook + * Copyright (c) 2005-2006 CodeSourcery. + * + * This code is licensed under the GPL. + */ + +#include "hw/sh4/sh_intc.h" +#include "hw/hw.h" +#include "hw/sh4/sh.h" + +//#define DEBUG_INTC +//#define DEBUG_INTC_SOURCES + +#define INTC_A7(x) ((x) & 0x1fffffff) + +void sh_intc_toggle_source(struct intc_source *source, +			   int enable_adj, int assert_adj) +{ +    int enable_changed = 0; +    int pending_changed = 0; +    int old_pending; + +    if ((source->enable_count == source->enable_max) && (enable_adj == -1)) +        enable_changed = -1; + +    source->enable_count += enable_adj; + +    if (source->enable_count == source->enable_max) +        enable_changed = 1; + +    source->asserted += assert_adj; + +    old_pending = source->pending; +    source->pending = source->asserted && +      (source->enable_count == source->enable_max); + +    if (old_pending != source->pending) +        pending_changed = 1; + +    if (pending_changed) { +        if (source->pending) { +            source->parent->pending++; +            if (source->parent->pending == 1) { +                cpu_interrupt(first_cpu, CPU_INTERRUPT_HARD); +            } +        } else { +            source->parent->pending--; +            if (source->parent->pending == 0) { +                cpu_reset_interrupt(first_cpu, CPU_INTERRUPT_HARD); +            } +	} +    } + +  if (enable_changed || assert_adj || pending_changed) { +#ifdef DEBUG_INTC_SOURCES +            printf("sh_intc: (%d/%d/%d/%d) interrupt source 0x%x %s%s%s\n", +		   source->parent->pending, +		   source->asserted, +		   source->enable_count, +		   source->enable_max, +		   source->vect, +		   source->asserted ? "asserted " : +		   assert_adj ? "deasserted" : "", +		   enable_changed == 1 ? "enabled " : +		   enable_changed == -1 ? "disabled " : "", +		   source->pending ? "pending" : ""); +#endif +  } +} + +static void sh_intc_set_irq (void *opaque, int n, int level) +{ +  struct intc_desc *desc = opaque; +  struct intc_source *source = &(desc->sources[n]); + +  if (level && !source->asserted) +    sh_intc_toggle_source(source, 0, 1); +  else if (!level && source->asserted) +    sh_intc_toggle_source(source, 0, -1); +} + +int sh_intc_get_pending_vector(struct intc_desc *desc, int imask) +{ +    unsigned int i; + +    /* slow: use a linked lists of pending sources instead */ +    /* wrong: take interrupt priority into account (one list per priority) */ + +    if (imask == 0x0f) { +        return -1; /* FIXME, update code to include priority per source */ +    } + +    for (i = 0; i < desc->nr_sources; i++) { +        struct intc_source *source = desc->sources + i; + +	if (source->pending) { +#ifdef DEBUG_INTC_SOURCES +            printf("sh_intc: (%d) returning interrupt source 0x%x\n", +		   desc->pending, source->vect); +#endif +            return source->vect; +	} +    } + +    abort(); +} + +#define INTC_MODE_NONE       0 +#define INTC_MODE_DUAL_SET   1 +#define INTC_MODE_DUAL_CLR   2 +#define INTC_MODE_ENABLE_REG 3 +#define INTC_MODE_MASK_REG   4 +#define INTC_MODE_IS_PRIO    8 + +static unsigned int sh_intc_mode(unsigned long address, +				 unsigned long set_reg, unsigned long clr_reg) +{ +    if ((address != INTC_A7(set_reg)) && +	(address != INTC_A7(clr_reg))) +        return INTC_MODE_NONE; + +    if (set_reg && clr_reg) { +        if (address == INTC_A7(set_reg)) +            return INTC_MODE_DUAL_SET; +	else +            return INTC_MODE_DUAL_CLR; +    } + +    if (set_reg) +        return INTC_MODE_ENABLE_REG; +    else +        return INTC_MODE_MASK_REG; +} + +static void sh_intc_locate(struct intc_desc *desc, +			   unsigned long address, +			   unsigned long **datap, +			   intc_enum **enums, +			   unsigned int *first, +			   unsigned int *width, +			   unsigned int *modep) +{ +    unsigned int i, mode; + +    /* this is slow but works for now */ + +    if (desc->mask_regs) { +        for (i = 0; i < desc->nr_mask_regs; i++) { +	    struct intc_mask_reg *mr = desc->mask_regs + i; + +	    mode = sh_intc_mode(address, mr->set_reg, mr->clr_reg); +	    if (mode == INTC_MODE_NONE) +                continue; + +	    *modep = mode; +	    *datap = &mr->value; +	    *enums = mr->enum_ids; +	    *first = mr->reg_width - 1; +	    *width = 1; +	    return; +	} +    } + +    if (desc->prio_regs) { +        for (i = 0; i < desc->nr_prio_regs; i++) { +	    struct intc_prio_reg *pr = desc->prio_regs + i; + +	    mode = sh_intc_mode(address, pr->set_reg, pr->clr_reg); +	    if (mode == INTC_MODE_NONE) +                continue; + +	    *modep = mode | INTC_MODE_IS_PRIO; +	    *datap = &pr->value; +	    *enums = pr->enum_ids; +	    *first = (pr->reg_width / pr->field_width) - 1; +	    *width = pr->field_width; +	    return; +	} +    } + +    abort(); +} + +static void sh_intc_toggle_mask(struct intc_desc *desc, intc_enum id, +				int enable, int is_group) +{ +    struct intc_source *source = desc->sources + id; + +    if (!id) +	return; + +    if (!source->next_enum_id && (!source->enable_max || !source->vect)) { +#ifdef DEBUG_INTC_SOURCES +        printf("sh_intc: reserved interrupt source %d modified\n", id); +#endif +	return; +    } + +    if (source->vect) +        sh_intc_toggle_source(source, enable ? 1 : -1, 0); + +#ifdef DEBUG_INTC +    else { +        printf("setting interrupt group %d to %d\n", id, !!enable); +    } +#endif + +    if ((is_group || !source->vect) && source->next_enum_id) { +        sh_intc_toggle_mask(desc, source->next_enum_id, enable, 1); +    } + +#ifdef DEBUG_INTC +    if (!source->vect) { +        printf("setting interrupt group %d to %d - done\n", id, !!enable); +    } +#endif +} + +static uint64_t sh_intc_read(void *opaque, hwaddr offset, +                             unsigned size) +{ +    struct intc_desc *desc = opaque; +    intc_enum *enum_ids = NULL; +    unsigned int first = 0; +    unsigned int width = 0; +    unsigned int mode = 0; +    unsigned long *valuep; + +#ifdef DEBUG_INTC +    printf("sh_intc_read 0x%lx\n", (unsigned long) offset); +#endif + +    sh_intc_locate(desc, (unsigned long)offset, &valuep,  +		   &enum_ids, &first, &width, &mode); +    return *valuep; +} + +static void sh_intc_write(void *opaque, hwaddr offset, +                          uint64_t value, unsigned size) +{ +    struct intc_desc *desc = opaque; +    intc_enum *enum_ids = NULL; +    unsigned int first = 0; +    unsigned int width = 0; +    unsigned int mode = 0; +    unsigned int k; +    unsigned long *valuep; +    unsigned long mask; + +#ifdef DEBUG_INTC +    printf("sh_intc_write 0x%lx 0x%08x\n", (unsigned long) offset, value); +#endif + +    sh_intc_locate(desc, (unsigned long)offset, &valuep,  +		   &enum_ids, &first, &width, &mode); + +    switch (mode) { +    case INTC_MODE_ENABLE_REG | INTC_MODE_IS_PRIO: break; +    case INTC_MODE_DUAL_SET: value |= *valuep; break; +    case INTC_MODE_DUAL_CLR: value = *valuep & ~value; break; +    default: abort(); +    } + +    for (k = 0; k <= first; k++) { +        mask = ((1 << width) - 1) << ((first - k) * width); + +	if ((*valuep & mask) == (value & mask)) +            continue; +#if 0 +	printf("k = %d, first = %d, enum = %d, mask = 0x%08x\n",  +	       k, first, enum_ids[k], (unsigned int)mask); +#endif +        sh_intc_toggle_mask(desc, enum_ids[k], value & mask, 0); +    } + +    *valuep = value; + +#ifdef DEBUG_INTC +    printf("sh_intc_write 0x%lx -> 0x%08x\n", (unsigned long) offset, value); +#endif +} + +static const MemoryRegionOps sh_intc_ops = { +    .read = sh_intc_read, +    .write = sh_intc_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +}; + +struct intc_source *sh_intc_source(struct intc_desc *desc, intc_enum id) +{ +    if (id) +        return desc->sources + id; + +    return NULL; +} + +static unsigned int sh_intc_register(MemoryRegion *sysmem, +                             struct intc_desc *desc, +                             const unsigned long address, +                             const char *type, +                             const char *action, +                             const unsigned int index) +{ +    char name[60]; +    MemoryRegion *iomem, *iomem_p4, *iomem_a7; + +    if (!address) { +        return 0; +    } + +    iomem = &desc->iomem; +    iomem_p4 = desc->iomem_aliases + index; +    iomem_a7 = iomem_p4 + 1; + +#define SH_INTC_IOMEM_FORMAT "interrupt-controller-%s-%s-%s" +    snprintf(name, sizeof(name), SH_INTC_IOMEM_FORMAT, type, action, "p4"); +    memory_region_init_alias(iomem_p4, NULL, name, iomem, INTC_A7(address), 4); +    memory_region_add_subregion(sysmem, P4ADDR(address), iomem_p4); + +    snprintf(name, sizeof(name), SH_INTC_IOMEM_FORMAT, type, action, "a7"); +    memory_region_init_alias(iomem_a7, NULL, name, iomem, INTC_A7(address), 4); +    memory_region_add_subregion(sysmem, A7ADDR(address), iomem_a7); +#undef SH_INTC_IOMEM_FORMAT + +    /* used to increment aliases index */ +    return 2; +} + +static void sh_intc_register_source(struct intc_desc *desc, +				    intc_enum source, +				    struct intc_group *groups, +				    int nr_groups) +{ +    unsigned int i, k; +    struct intc_source *s; + +    if (desc->mask_regs) { +        for (i = 0; i < desc->nr_mask_regs; i++) { +	    struct intc_mask_reg *mr = desc->mask_regs + i; + +	    for (k = 0; k < ARRAY_SIZE(mr->enum_ids); k++) { +                if (mr->enum_ids[k] != source) +                    continue; + +		s = sh_intc_source(desc, mr->enum_ids[k]); +		if (s) +                    s->enable_max++; +	    } +	} +    } + +    if (desc->prio_regs) { +        for (i = 0; i < desc->nr_prio_regs; i++) { +	    struct intc_prio_reg *pr = desc->prio_regs + i; + +	    for (k = 0; k < ARRAY_SIZE(pr->enum_ids); k++) { +                if (pr->enum_ids[k] != source) +                    continue; + +		s = sh_intc_source(desc, pr->enum_ids[k]); +		if (s) +                    s->enable_max++; +	    } +	} +    } + +    if (groups) { +        for (i = 0; i < nr_groups; i++) { +	    struct intc_group *gr = groups + i; + +	    for (k = 0; k < ARRAY_SIZE(gr->enum_ids); k++) { +                if (gr->enum_ids[k] != source) +                    continue; + +		s = sh_intc_source(desc, gr->enum_ids[k]); +		if (s) +                    s->enable_max++; +	    } +	} +    } + +} + +void sh_intc_register_sources(struct intc_desc *desc, +			      struct intc_vect *vectors, +			      int nr_vectors, +			      struct intc_group *groups, +			      int nr_groups) +{ +    unsigned int i, k; +    struct intc_source *s; + +    for (i = 0; i < nr_vectors; i++) { +	struct intc_vect *vect = vectors + i; + +	sh_intc_register_source(desc, vect->enum_id, groups, nr_groups); +	s = sh_intc_source(desc, vect->enum_id); +        if (s) { +            s->vect = vect->vect; + +#ifdef DEBUG_INTC_SOURCES +            printf("sh_intc: registered source %d -> 0x%04x (%d/%d)\n", +                   vect->enum_id, s->vect, s->enable_count, s->enable_max); +#endif +        } +    } + +    if (groups) { +        for (i = 0; i < nr_groups; i++) { +	    struct intc_group *gr = groups + i; + +	    s = sh_intc_source(desc, gr->enum_id); +	    s->next_enum_id = gr->enum_ids[0]; + +	    for (k = 1; k < ARRAY_SIZE(gr->enum_ids); k++) { +                if (!gr->enum_ids[k]) +                    continue; + +		s = sh_intc_source(desc, gr->enum_ids[k - 1]); +		s->next_enum_id = gr->enum_ids[k]; +	    } + +#ifdef DEBUG_INTC_SOURCES +	    printf("sh_intc: registered group %d (%d/%d)\n", +		   gr->enum_id, s->enable_count, s->enable_max); +#endif +	} +    } +} + +int sh_intc_init(MemoryRegion *sysmem, +         struct intc_desc *desc, +		 int nr_sources, +		 struct intc_mask_reg *mask_regs, +		 int nr_mask_regs, +		 struct intc_prio_reg *prio_regs, +		 int nr_prio_regs) +{ +    unsigned int i, j; + +    desc->pending = 0; +    desc->nr_sources = nr_sources; +    desc->mask_regs = mask_regs; +    desc->nr_mask_regs = nr_mask_regs; +    desc->prio_regs = prio_regs; +    desc->nr_prio_regs = nr_prio_regs; +    /* Allocate 4 MemoryRegions per register (2 actions * 2 aliases). +     **/ +    desc->iomem_aliases = g_new0(MemoryRegion, +                                 (nr_mask_regs + nr_prio_regs) * 4); + +    j = 0; +    i = sizeof(struct intc_source) * nr_sources; +    desc->sources = g_malloc0(i); + +    for (i = 0; i < desc->nr_sources; i++) { +        struct intc_source *source = desc->sources + i; + +        source->parent = desc; +    } + +    desc->irqs = qemu_allocate_irqs(sh_intc_set_irq, desc, nr_sources); +  +    memory_region_init_io(&desc->iomem, NULL, &sh_intc_ops, desc, +                          "interrupt-controller", 0x100000000ULL); + +#define INT_REG_PARAMS(reg_struct, type, action, j) \ +        reg_struct->action##_reg, #type, #action, j +    if (desc->mask_regs) { +        for (i = 0; i < desc->nr_mask_regs; i++) { +	    struct intc_mask_reg *mr = desc->mask_regs + i; + +            j += sh_intc_register(sysmem, desc, +                                  INT_REG_PARAMS(mr, mask, set, j)); +            j += sh_intc_register(sysmem, desc, +                                  INT_REG_PARAMS(mr, mask, clr, j)); +	} +    } + +    if (desc->prio_regs) { +        for (i = 0; i < desc->nr_prio_regs; i++) { +	    struct intc_prio_reg *pr = desc->prio_regs + i; + +            j += sh_intc_register(sysmem, desc, +                                  INT_REG_PARAMS(pr, prio, set, j)); +            j += sh_intc_register(sysmem, desc, +                                  INT_REG_PARAMS(pr, prio, clr, j)); +	} +    } +#undef INT_REG_PARAMS + +    return 0; +} + +/* Assert level <n> IRL interrupt.  +   0:deassert. 1:lowest priority,... 15:highest priority. */ +void sh_intc_set_irl(void *opaque, int n, int level) +{ +    struct intc_source *s = opaque; +    int i, irl = level ^ 15; +    for (i = 0; (s = sh_intc_source(s->parent, s->next_enum_id)); i++) { +	if (i == irl) +	    sh_intc_toggle_source(s, s->enable_count?0:1, s->asserted?0:1); +	else +	    if (s->asserted) +	        sh_intc_toggle_source(s, 0, -1); +    } +} diff --git a/hw/intc/slavio_intctl.c b/hw/intc/slavio_intctl.c new file mode 100644 index 00000000..f22aba03 --- /dev/null +++ b/hw/intc/slavio_intctl.c @@ -0,0 +1,471 @@ +/* + * QEMU Sparc SLAVIO interrupt controller emulation + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "hw/sparc/sun4m.h" +#include "monitor/monitor.h" +#include "hw/sysbus.h" +#include "trace.h" + +//#define DEBUG_IRQ_COUNT + +/* + * Registers of interrupt controller in sun4m. + * + * This is the interrupt controller part of chip STP2001 (Slave I/O), also + * produced as NCR89C105. See + * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C105.txt + * + * There is a system master controller and one for each cpu. + * + */ + +#define MAX_CPUS 16 +#define MAX_PILS 16 + +struct SLAVIO_INTCTLState; + +typedef struct SLAVIO_CPUINTCTLState { +    MemoryRegion iomem; +    struct SLAVIO_INTCTLState *master; +    uint32_t intreg_pending; +    uint32_t cpu; +    uint32_t irl_out; +} SLAVIO_CPUINTCTLState; + +#define TYPE_SLAVIO_INTCTL "slavio_intctl" +#define SLAVIO_INTCTL(obj) \ +    OBJECT_CHECK(SLAVIO_INTCTLState, (obj), TYPE_SLAVIO_INTCTL) + +typedef struct SLAVIO_INTCTLState { +    SysBusDevice parent_obj; + +    MemoryRegion iomem; +#ifdef DEBUG_IRQ_COUNT +    uint64_t irq_count[32]; +#endif +    qemu_irq cpu_irqs[MAX_CPUS][MAX_PILS]; +    SLAVIO_CPUINTCTLState slaves[MAX_CPUS]; +    uint32_t intregm_pending; +    uint32_t intregm_disabled; +    uint32_t target_cpu; +} SLAVIO_INTCTLState; + +#define INTCTL_MAXADDR 0xf +#define INTCTL_SIZE (INTCTL_MAXADDR + 1) +#define INTCTLM_SIZE 0x14 +#define MASTER_IRQ_MASK ~0x0fa2007f +#define MASTER_DISABLE 0x80000000 +#define CPU_SOFTIRQ_MASK 0xfffe0000 +#define CPU_IRQ_INT15_IN (1 << 15) +#define CPU_IRQ_TIMER_IN (1 << 14) + +static void slavio_check_interrupts(SLAVIO_INTCTLState *s, int set_irqs); + +// per-cpu interrupt controller +static uint64_t slavio_intctl_mem_readl(void *opaque, hwaddr addr, +                                        unsigned size) +{ +    SLAVIO_CPUINTCTLState *s = opaque; +    uint32_t saddr, ret; + +    saddr = addr >> 2; +    switch (saddr) { +    case 0: +        ret = s->intreg_pending; +        break; +    default: +        ret = 0; +        break; +    } +    trace_slavio_intctl_mem_readl(s->cpu, addr, ret); + +    return ret; +} + +static void slavio_intctl_mem_writel(void *opaque, hwaddr addr, +                                     uint64_t val, unsigned size) +{ +    SLAVIO_CPUINTCTLState *s = opaque; +    uint32_t saddr; + +    saddr = addr >> 2; +    trace_slavio_intctl_mem_writel(s->cpu, addr, val); +    switch (saddr) { +    case 1: // clear pending softints +        val &= CPU_SOFTIRQ_MASK | CPU_IRQ_INT15_IN; +        s->intreg_pending &= ~val; +        slavio_check_interrupts(s->master, 1); +        trace_slavio_intctl_mem_writel_clear(s->cpu, val, s->intreg_pending); +        break; +    case 2: // set softint +        val &= CPU_SOFTIRQ_MASK; +        s->intreg_pending |= val; +        slavio_check_interrupts(s->master, 1); +        trace_slavio_intctl_mem_writel_set(s->cpu, val, s->intreg_pending); +        break; +    default: +        break; +    } +} + +static const MemoryRegionOps slavio_intctl_mem_ops = { +    .read = slavio_intctl_mem_readl, +    .write = slavio_intctl_mem_writel, +    .endianness = DEVICE_NATIVE_ENDIAN, +    .valid = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +// master system interrupt controller +static uint64_t slavio_intctlm_mem_readl(void *opaque, hwaddr addr, +                                         unsigned size) +{ +    SLAVIO_INTCTLState *s = opaque; +    uint32_t saddr, ret; + +    saddr = addr >> 2; +    switch (saddr) { +    case 0: +        ret = s->intregm_pending & ~MASTER_DISABLE; +        break; +    case 1: +        ret = s->intregm_disabled & MASTER_IRQ_MASK; +        break; +    case 4: +        ret = s->target_cpu; +        break; +    default: +        ret = 0; +        break; +    } +    trace_slavio_intctlm_mem_readl(addr, ret); + +    return ret; +} + +static void slavio_intctlm_mem_writel(void *opaque, hwaddr addr, +                                      uint64_t val, unsigned size) +{ +    SLAVIO_INTCTLState *s = opaque; +    uint32_t saddr; + +    saddr = addr >> 2; +    trace_slavio_intctlm_mem_writel(addr, val); +    switch (saddr) { +    case 2: // clear (enable) +        // Force clear unused bits +        val &= MASTER_IRQ_MASK; +        s->intregm_disabled &= ~val; +        trace_slavio_intctlm_mem_writel_enable(val, s->intregm_disabled); +        slavio_check_interrupts(s, 1); +        break; +    case 3: // set (disable; doesn't affect pending) +        // Force clear unused bits +        val &= MASTER_IRQ_MASK; +        s->intregm_disabled |= val; +        slavio_check_interrupts(s, 1); +        trace_slavio_intctlm_mem_writel_disable(val, s->intregm_disabled); +        break; +    case 4: +        s->target_cpu = val & (MAX_CPUS - 1); +        slavio_check_interrupts(s, 1); +        trace_slavio_intctlm_mem_writel_target(s->target_cpu); +        break; +    default: +        break; +    } +} + +static const MemoryRegionOps slavio_intctlm_mem_ops = { +    .read = slavio_intctlm_mem_readl, +    .write = slavio_intctlm_mem_writel, +    .endianness = DEVICE_NATIVE_ENDIAN, +    .valid = { +        .min_access_size = 4, +        .max_access_size = 4, +    }, +}; + +void slavio_pic_info(Monitor *mon, DeviceState *dev) +{ +    SLAVIO_INTCTLState *s = SLAVIO_INTCTL(dev); +    int i; + +    for (i = 0; i < MAX_CPUS; i++) { +        monitor_printf(mon, "per-cpu %d: pending 0x%08x\n", i, +                       s->slaves[i].intreg_pending); +    } +    monitor_printf(mon, "master: pending 0x%08x, disabled 0x%08x\n", +                   s->intregm_pending, s->intregm_disabled); +} + +void slavio_irq_info(Monitor *mon, DeviceState *dev) +{ +#ifndef DEBUG_IRQ_COUNT +    monitor_printf(mon, "irq statistic code not compiled.\n"); +#else +    SLAVIO_INTCTLState *s = SLAVIO_INTCTL(dev); +    int i; +    int64_t count; + +    s = SLAVIO_INTCTL(dev); +    monitor_printf(mon, "IRQ statistics:\n"); +    for (i = 0; i < 32; i++) { +        count = s->irq_count[i]; +        if (count > 0) +            monitor_printf(mon, "%2d: %" PRId64 "\n", i, count); +    } +#endif +} + +static const uint32_t intbit_to_level[] = { +    2, 3, 5, 7, 9, 11, 13, 2,   3, 5, 7, 9, 11, 13, 12, 12, +    6, 13, 4, 10, 8, 9, 11, 0,  0, 0, 0, 15, 15, 15, 15, 0, +}; + +static void slavio_check_interrupts(SLAVIO_INTCTLState *s, int set_irqs) +{ +    uint32_t pending = s->intregm_pending, pil_pending; +    unsigned int i, j; + +    pending &= ~s->intregm_disabled; + +    trace_slavio_check_interrupts(pending, s->intregm_disabled); +    for (i = 0; i < MAX_CPUS; i++) { +        pil_pending = 0; + +        /* If we are the current interrupt target, get hard interrupts */ +        if (pending && !(s->intregm_disabled & MASTER_DISABLE) && +            (i == s->target_cpu)) { +            for (j = 0; j < 32; j++) { +                if ((pending & (1 << j)) && intbit_to_level[j]) { +                    pil_pending |= 1 << intbit_to_level[j]; +                } +            } +        } + +        /* Calculate current pending hard interrupts for display */ +        s->slaves[i].intreg_pending &= CPU_SOFTIRQ_MASK | CPU_IRQ_INT15_IN | +            CPU_IRQ_TIMER_IN; +        if (i == s->target_cpu) { +            for (j = 0; j < 32; j++) { +                if ((s->intregm_pending & (1U << j)) && intbit_to_level[j]) { +                    s->slaves[i].intreg_pending |= 1 << intbit_to_level[j]; +                } +            } +        } + +        /* Level 15 and CPU timer interrupts are only masked when +           the MASTER_DISABLE bit is set */ +        if (!(s->intregm_disabled & MASTER_DISABLE)) { +            pil_pending |= s->slaves[i].intreg_pending & +                (CPU_IRQ_INT15_IN | CPU_IRQ_TIMER_IN); +        } + +        /* Add soft interrupts */ +        pil_pending |= (s->slaves[i].intreg_pending & CPU_SOFTIRQ_MASK) >> 16; + +        if (set_irqs) { +            /* Since there is not really an interrupt 0 (and pil_pending +             * and irl_out bit zero are thus always zero) there is no need +             * to do anything with cpu_irqs[i][0] and it is OK not to do +             * the j=0 iteration of this loop. +             */ +            for (j = MAX_PILS-1; j > 0; j--) { +                if (pil_pending & (1 << j)) { +                    if (!(s->slaves[i].irl_out & (1 << j))) { +                        qemu_irq_raise(s->cpu_irqs[i][j]); +                    } +                } else { +                    if (s->slaves[i].irl_out & (1 << j)) { +                        qemu_irq_lower(s->cpu_irqs[i][j]); +                    } +                } +            } +        } +        s->slaves[i].irl_out = pil_pending; +    } +} + +/* + * "irq" here is the bit number in the system interrupt register to + * separate serial and keyboard interrupts sharing a level. + */ +static void slavio_set_irq(void *opaque, int irq, int level) +{ +    SLAVIO_INTCTLState *s = opaque; +    uint32_t mask = 1 << irq; +    uint32_t pil = intbit_to_level[irq]; +    unsigned int i; + +    trace_slavio_set_irq(s->target_cpu, irq, pil, level); +    if (pil > 0) { +        if (level) { +#ifdef DEBUG_IRQ_COUNT +            s->irq_count[pil]++; +#endif +            s->intregm_pending |= mask; +            if (pil == 15) { +                for (i = 0; i < MAX_CPUS; i++) { +                    s->slaves[i].intreg_pending |= 1 << pil; +                } +            } +        } else { +            s->intregm_pending &= ~mask; +            if (pil == 15) { +                for (i = 0; i < MAX_CPUS; i++) { +                    s->slaves[i].intreg_pending &= ~(1 << pil); +                } +            } +        } +        slavio_check_interrupts(s, 1); +    } +} + +static void slavio_set_timer_irq_cpu(void *opaque, int cpu, int level) +{ +    SLAVIO_INTCTLState *s = opaque; + +    trace_slavio_set_timer_irq_cpu(cpu, level); + +    if (level) { +        s->slaves[cpu].intreg_pending |= CPU_IRQ_TIMER_IN; +    } else { +        s->slaves[cpu].intreg_pending &= ~CPU_IRQ_TIMER_IN; +    } + +    slavio_check_interrupts(s, 1); +} + +static void slavio_set_irq_all(void *opaque, int irq, int level) +{ +    if (irq < 32) { +        slavio_set_irq(opaque, irq, level); +    } else { +        slavio_set_timer_irq_cpu(opaque, irq - 32, level); +    } +} + +static int vmstate_intctl_post_load(void *opaque, int version_id) +{ +    SLAVIO_INTCTLState *s = opaque; + +    slavio_check_interrupts(s, 0); +    return 0; +} + +static const VMStateDescription vmstate_intctl_cpu = { +    .name ="slavio_intctl_cpu", +    .version_id = 1, +    .minimum_version_id = 1, +    .fields = (VMStateField[]) { +        VMSTATE_UINT32(intreg_pending, SLAVIO_CPUINTCTLState), +        VMSTATE_END_OF_LIST() +    } +}; + +static const VMStateDescription vmstate_intctl = { +    .name ="slavio_intctl", +    .version_id = 1, +    .minimum_version_id = 1, +    .post_load = vmstate_intctl_post_load, +    .fields = (VMStateField[]) { +        VMSTATE_STRUCT_ARRAY(slaves, SLAVIO_INTCTLState, MAX_CPUS, 1, +                             vmstate_intctl_cpu, SLAVIO_CPUINTCTLState), +        VMSTATE_UINT32(intregm_pending, SLAVIO_INTCTLState), +        VMSTATE_UINT32(intregm_disabled, SLAVIO_INTCTLState), +        VMSTATE_UINT32(target_cpu, SLAVIO_INTCTLState), +        VMSTATE_END_OF_LIST() +    } +}; + +static void slavio_intctl_reset(DeviceState *d) +{ +    SLAVIO_INTCTLState *s = SLAVIO_INTCTL(d); +    int i; + +    for (i = 0; i < MAX_CPUS; i++) { +        s->slaves[i].intreg_pending = 0; +        s->slaves[i].irl_out = 0; +    } +    s->intregm_disabled = ~MASTER_IRQ_MASK; +    s->intregm_pending = 0; +    s->target_cpu = 0; +    slavio_check_interrupts(s, 0); +} + +static int slavio_intctl_init1(SysBusDevice *sbd) +{ +    DeviceState *dev = DEVICE(sbd); +    SLAVIO_INTCTLState *s = SLAVIO_INTCTL(dev); +    unsigned int i, j; +    char slave_name[45]; + +    qdev_init_gpio_in(dev, slavio_set_irq_all, 32 + MAX_CPUS); +    memory_region_init_io(&s->iomem, OBJECT(s), &slavio_intctlm_mem_ops, s, +                          "master-interrupt-controller", INTCTLM_SIZE); +    sysbus_init_mmio(sbd, &s->iomem); + +    for (i = 0; i < MAX_CPUS; i++) { +        snprintf(slave_name, sizeof(slave_name), +                 "slave-interrupt-controller-%i", i); +        for (j = 0; j < MAX_PILS; j++) { +            sysbus_init_irq(sbd, &s->cpu_irqs[i][j]); +        } +        memory_region_init_io(&s->slaves[i].iomem, OBJECT(s), +                              &slavio_intctl_mem_ops, +                              &s->slaves[i], slave_name, INTCTL_SIZE); +        sysbus_init_mmio(sbd, &s->slaves[i].iomem); +        s->slaves[i].cpu = i; +        s->slaves[i].master = s; +    } + +    return 0; +} + +static void slavio_intctl_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + +    k->init = slavio_intctl_init1; +    dc->reset = slavio_intctl_reset; +    dc->vmsd = &vmstate_intctl; +} + +static const TypeInfo slavio_intctl_info = { +    .name          = TYPE_SLAVIO_INTCTL, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(SLAVIO_INTCTLState), +    .class_init    = slavio_intctl_class_init, +}; + +static void slavio_intctl_register_types(void) +{ +    type_register_static(&slavio_intctl_info); +} + +type_init(slavio_intctl_register_types) diff --git a/hw/intc/xics.c b/hw/intc/xics.c new file mode 100644 index 00000000..924b1ae3 --- /dev/null +++ b/hw/intc/xics.c @@ -0,0 +1,1084 @@ +/* + * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator + * + * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics + * + * Copyright (c) 2010,2011 David Gibson, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#include "hw/hw.h" +#include "trace.h" +#include "qemu/timer.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/xics.h" +#include "qemu/error-report.h" +#include "qapi/visitor.h" + +static int get_cpu_index_by_dt_id(int cpu_dt_id) +{ +    PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id); + +    if (cpu) { +        return cpu->parent_obj.cpu_index; +    } + +    return -1; +} + +void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu) +{ +    CPUState *cs = CPU(cpu); +    CPUPPCState *env = &cpu->env; +    ICPState *ss = &icp->ss[cs->cpu_index]; +    XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); + +    assert(cs->cpu_index < icp->nr_servers); + +    if (info->cpu_setup) { +        info->cpu_setup(icp, cpu); +    } + +    switch (PPC_INPUT(env)) { +    case PPC_FLAGS_INPUT_POWER7: +        ss->output = env->irq_inputs[POWER7_INPUT_INT]; +        break; + +    case PPC_FLAGS_INPUT_970: +        ss->output = env->irq_inputs[PPC970_INPUT_INT]; +        break; + +    default: +        error_report("XICS interrupt controller does not support this CPU " +                     "bus model"); +        abort(); +    } +} + +/* + * XICS Common class - parent for emulated XICS and KVM-XICS + */ +static void xics_common_reset(DeviceState *d) +{ +    XICSState *icp = XICS_COMMON(d); +    int i; + +    for (i = 0; i < icp->nr_servers; i++) { +        device_reset(DEVICE(&icp->ss[i])); +    } + +    device_reset(DEVICE(icp->ics)); +} + +static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, +                                  void *opaque, const char *name, Error **errp) +{ +    XICSState *icp = XICS_COMMON(obj); +    int64_t value = icp->nr_irqs; + +    visit_type_int(v, &value, name, errp); +} + +static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, +                                  void *opaque, const char *name, Error **errp) +{ +    XICSState *icp = XICS_COMMON(obj); +    XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); +    Error *error = NULL; +    int64_t value; + +    visit_type_int(v, &value, name, &error); +    if (error) { +        error_propagate(errp, error); +        return; +    } +    if (icp->nr_irqs) { +        error_setg(errp, "Number of interrupts is already set to %u", +                   icp->nr_irqs); +        return; +    } + +    assert(info->set_nr_irqs); +    assert(icp->ics); +    info->set_nr_irqs(icp, value, errp); +} + +static void xics_prop_get_nr_servers(Object *obj, Visitor *v, +                                     void *opaque, const char *name, +                                     Error **errp) +{ +    XICSState *icp = XICS_COMMON(obj); +    int64_t value = icp->nr_servers; + +    visit_type_int(v, &value, name, errp); +} + +static void xics_prop_set_nr_servers(Object *obj, Visitor *v, +                                     void *opaque, const char *name, +                                     Error **errp) +{ +    XICSState *icp = XICS_COMMON(obj); +    XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); +    Error *error = NULL; +    int64_t value; + +    visit_type_int(v, &value, name, &error); +    if (error) { +        error_propagate(errp, error); +        return; +    } +    if (icp->nr_servers) { +        error_setg(errp, "Number of servers is already set to %u", +                   icp->nr_servers); +        return; +    } + +    assert(info->set_nr_servers); +    info->set_nr_servers(icp, value, errp); +} + +static void xics_common_initfn(Object *obj) +{ +    object_property_add(obj, "nr_irqs", "int", +                        xics_prop_get_nr_irqs, xics_prop_set_nr_irqs, +                        NULL, NULL, NULL); +    object_property_add(obj, "nr_servers", "int", +                        xics_prop_get_nr_servers, xics_prop_set_nr_servers, +                        NULL, NULL, NULL); +} + +static void xics_common_class_init(ObjectClass *oc, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(oc); + +    dc->reset = xics_common_reset; +} + +static const TypeInfo xics_common_info = { +    .name          = TYPE_XICS_COMMON, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(XICSState), +    .class_size    = sizeof(XICSStateClass), +    .instance_init = xics_common_initfn, +    .class_init    = xics_common_class_init, +}; + +/* + * ICP: Presentation layer + */ + +#define XISR_MASK  0x00ffffff +#define CPPR_MASK  0xff000000 + +#define XISR(ss)   (((ss)->xirr) & XISR_MASK) +#define CPPR(ss)   (((ss)->xirr) >> 24) + +static void ics_reject(ICSState *ics, int nr); +static void ics_resend(ICSState *ics); +static void ics_eoi(ICSState *ics, int nr); + +static void icp_check_ipi(XICSState *icp, int server) +{ +    ICPState *ss = icp->ss + server; + +    if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) { +        return; +    } + +    trace_xics_icp_check_ipi(server, ss->mfrr); + +    if (XISR(ss)) { +        ics_reject(icp->ics, XISR(ss)); +    } + +    ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI; +    ss->pending_priority = ss->mfrr; +    qemu_irq_raise(ss->output); +} + +static void icp_resend(XICSState *icp, int server) +{ +    ICPState *ss = icp->ss + server; + +    if (ss->mfrr < CPPR(ss)) { +        icp_check_ipi(icp, server); +    } +    ics_resend(icp->ics); +} + +static void icp_set_cppr(XICSState *icp, int server, uint8_t cppr) +{ +    ICPState *ss = icp->ss + server; +    uint8_t old_cppr; +    uint32_t old_xisr; + +    old_cppr = CPPR(ss); +    ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24); + +    if (cppr < old_cppr) { +        if (XISR(ss) && (cppr <= ss->pending_priority)) { +            old_xisr = XISR(ss); +            ss->xirr &= ~XISR_MASK; /* Clear XISR */ +            ss->pending_priority = 0xff; +            qemu_irq_lower(ss->output); +            ics_reject(icp->ics, old_xisr); +        } +    } else { +        if (!XISR(ss)) { +            icp_resend(icp, server); +        } +    } +} + +static void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr) +{ +    ICPState *ss = icp->ss + server; + +    ss->mfrr = mfrr; +    if (mfrr < CPPR(ss)) { +        icp_check_ipi(icp, server); +    } +} + +static uint32_t icp_accept(ICPState *ss) +{ +    uint32_t xirr = ss->xirr; + +    qemu_irq_lower(ss->output); +    ss->xirr = ss->pending_priority << 24; +    ss->pending_priority = 0xff; + +    trace_xics_icp_accept(xirr, ss->xirr); + +    return xirr; +} + +static void icp_eoi(XICSState *icp, int server, uint32_t xirr) +{ +    ICPState *ss = icp->ss + server; + +    /* Send EOI -> ICS */ +    ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK); +    trace_xics_icp_eoi(server, xirr, ss->xirr); +    ics_eoi(icp->ics, xirr & XISR_MASK); +    if (!XISR(ss)) { +        icp_resend(icp, server); +    } +} + +static void icp_irq(XICSState *icp, int server, int nr, uint8_t priority) +{ +    ICPState *ss = icp->ss + server; + +    trace_xics_icp_irq(server, nr, priority); + +    if ((priority >= CPPR(ss)) +        || (XISR(ss) && (ss->pending_priority <= priority))) { +        ics_reject(icp->ics, nr); +    } else { +        if (XISR(ss)) { +            ics_reject(icp->ics, XISR(ss)); +        } +        ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK); +        ss->pending_priority = priority; +        trace_xics_icp_raise(ss->xirr, ss->pending_priority); +        qemu_irq_raise(ss->output); +    } +} + +static void icp_dispatch_pre_save(void *opaque) +{ +    ICPState *ss = opaque; +    ICPStateClass *info = ICP_GET_CLASS(ss); + +    if (info->pre_save) { +        info->pre_save(ss); +    } +} + +static int icp_dispatch_post_load(void *opaque, int version_id) +{ +    ICPState *ss = opaque; +    ICPStateClass *info = ICP_GET_CLASS(ss); + +    if (info->post_load) { +        return info->post_load(ss, version_id); +    } + +    return 0; +} + +static const VMStateDescription vmstate_icp_server = { +    .name = "icp/server", +    .version_id = 1, +    .minimum_version_id = 1, +    .pre_save = icp_dispatch_pre_save, +    .post_load = icp_dispatch_post_load, +    .fields = (VMStateField[]) { +        /* Sanity check */ +        VMSTATE_UINT32(xirr, ICPState), +        VMSTATE_UINT8(pending_priority, ICPState), +        VMSTATE_UINT8(mfrr, ICPState), +        VMSTATE_END_OF_LIST() +    }, +}; + +static void icp_reset(DeviceState *dev) +{ +    ICPState *icp = ICP(dev); + +    icp->xirr = 0; +    icp->pending_priority = 0xff; +    icp->mfrr = 0xff; + +    /* Make all outputs are deasserted */ +    qemu_set_irq(icp->output, 0); +} + +static void icp_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); + +    dc->reset = icp_reset; +    dc->vmsd = &vmstate_icp_server; +} + +static const TypeInfo icp_info = { +    .name = TYPE_ICP, +    .parent = TYPE_DEVICE, +    .instance_size = sizeof(ICPState), +    .class_init = icp_class_init, +    .class_size = sizeof(ICPStateClass), +}; + +/* + * ICS: Source layer + */ +static int ics_valid_irq(ICSState *ics, uint32_t nr) +{ +    return (nr >= ics->offset) +        && (nr < (ics->offset + ics->nr_irqs)); +} + +static void resend_msi(ICSState *ics, int srcno) +{ +    ICSIRQState *irq = ics->irqs + srcno; + +    /* FIXME: filter by server#? */ +    if (irq->status & XICS_STATUS_REJECTED) { +        irq->status &= ~XICS_STATUS_REJECTED; +        if (irq->priority != 0xff) { +            icp_irq(ics->icp, irq->server, srcno + ics->offset, +                    irq->priority); +        } +    } +} + +static void resend_lsi(ICSState *ics, int srcno) +{ +    ICSIRQState *irq = ics->irqs + srcno; + +    if ((irq->priority != 0xff) +        && (irq->status & XICS_STATUS_ASSERTED) +        && !(irq->status & XICS_STATUS_SENT)) { +        irq->status |= XICS_STATUS_SENT; +        icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); +    } +} + +static void set_irq_msi(ICSState *ics, int srcno, int val) +{ +    ICSIRQState *irq = ics->irqs + srcno; + +    trace_xics_set_irq_msi(srcno, srcno + ics->offset); + +    if (val) { +        if (irq->priority == 0xff) { +            irq->status |= XICS_STATUS_MASKED_PENDING; +            trace_xics_masked_pending(); +        } else  { +            icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); +        } +    } +} + +static void set_irq_lsi(ICSState *ics, int srcno, int val) +{ +    ICSIRQState *irq = ics->irqs + srcno; + +    trace_xics_set_irq_lsi(srcno, srcno + ics->offset); +    if (val) { +        irq->status |= XICS_STATUS_ASSERTED; +    } else { +        irq->status &= ~XICS_STATUS_ASSERTED; +    } +    resend_lsi(ics, srcno); +} + +static void ics_set_irq(void *opaque, int srcno, int val) +{ +    ICSState *ics = (ICSState *)opaque; + +    if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { +        set_irq_lsi(ics, srcno, val); +    } else { +        set_irq_msi(ics, srcno, val); +    } +} + +static void write_xive_msi(ICSState *ics, int srcno) +{ +    ICSIRQState *irq = ics->irqs + srcno; + +    if (!(irq->status & XICS_STATUS_MASKED_PENDING) +        || (irq->priority == 0xff)) { +        return; +    } + +    irq->status &= ~XICS_STATUS_MASKED_PENDING; +    icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); +} + +static void write_xive_lsi(ICSState *ics, int srcno) +{ +    resend_lsi(ics, srcno); +} + +static void ics_write_xive(ICSState *ics, int nr, int server, +                           uint8_t priority, uint8_t saved_priority) +{ +    int srcno = nr - ics->offset; +    ICSIRQState *irq = ics->irqs + srcno; + +    irq->server = server; +    irq->priority = priority; +    irq->saved_priority = saved_priority; + +    trace_xics_ics_write_xive(nr, srcno, server, priority); + +    if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { +        write_xive_lsi(ics, srcno); +    } else { +        write_xive_msi(ics, srcno); +    } +} + +static void ics_reject(ICSState *ics, int nr) +{ +    ICSIRQState *irq = ics->irqs + nr - ics->offset; + +    trace_xics_ics_reject(nr, nr - ics->offset); +    irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */ +    irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */ +} + +static void ics_resend(ICSState *ics) +{ +    int i; + +    for (i = 0; i < ics->nr_irqs; i++) { +        /* FIXME: filter by server#? */ +        if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) { +            resend_lsi(ics, i); +        } else { +            resend_msi(ics, i); +        } +    } +} + +static void ics_eoi(ICSState *ics, int nr) +{ +    int srcno = nr - ics->offset; +    ICSIRQState *irq = ics->irqs + srcno; + +    trace_xics_ics_eoi(nr); + +    if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { +        irq->status &= ~XICS_STATUS_SENT; +    } +} + +static void ics_reset(DeviceState *dev) +{ +    ICSState *ics = ICS(dev); +    int i; +    uint8_t flags[ics->nr_irqs]; + +    for (i = 0; i < ics->nr_irqs; i++) { +        flags[i] = ics->irqs[i].flags; +    } + +    memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs); + +    for (i = 0; i < ics->nr_irqs; i++) { +        ics->irqs[i].priority = 0xff; +        ics->irqs[i].saved_priority = 0xff; +        ics->irqs[i].flags = flags[i]; +    } +} + +static int ics_post_load(ICSState *ics, int version_id) +{ +    int i; + +    for (i = 0; i < ics->icp->nr_servers; i++) { +        icp_resend(ics->icp, i); +    } + +    return 0; +} + +static void ics_dispatch_pre_save(void *opaque) +{ +    ICSState *ics = opaque; +    ICSStateClass *info = ICS_GET_CLASS(ics); + +    if (info->pre_save) { +        info->pre_save(ics); +    } +} + +static int ics_dispatch_post_load(void *opaque, int version_id) +{ +    ICSState *ics = opaque; +    ICSStateClass *info = ICS_GET_CLASS(ics); + +    if (info->post_load) { +        return info->post_load(ics, version_id); +    } + +    return 0; +} + +static const VMStateDescription vmstate_ics_irq = { +    .name = "ics/irq", +    .version_id = 2, +    .minimum_version_id = 1, +    .fields = (VMStateField[]) { +        VMSTATE_UINT32(server, ICSIRQState), +        VMSTATE_UINT8(priority, ICSIRQState), +        VMSTATE_UINT8(saved_priority, ICSIRQState), +        VMSTATE_UINT8(status, ICSIRQState), +        VMSTATE_UINT8(flags, ICSIRQState), +        VMSTATE_END_OF_LIST() +    }, +}; + +static const VMStateDescription vmstate_ics = { +    .name = "ics", +    .version_id = 1, +    .minimum_version_id = 1, +    .pre_save = ics_dispatch_pre_save, +    .post_load = ics_dispatch_post_load, +    .fields = (VMStateField[]) { +        /* Sanity check */ +        VMSTATE_UINT32_EQUAL(nr_irqs, ICSState), + +        VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs, +                                             vmstate_ics_irq, ICSIRQState), +        VMSTATE_END_OF_LIST() +    }, +}; + +static void ics_initfn(Object *obj) +{ +    ICSState *ics = ICS(obj); + +    ics->offset = XICS_IRQ_BASE; +} + +static void ics_realize(DeviceState *dev, Error **errp) +{ +    ICSState *ics = ICS(dev); + +    if (!ics->nr_irqs) { +        error_setg(errp, "Number of interrupts needs to be greater 0"); +        return; +    } +    ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState)); +    ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs); +} + +static void ics_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    ICSStateClass *isc = ICS_CLASS(klass); + +    dc->realize = ics_realize; +    dc->vmsd = &vmstate_ics; +    dc->reset = ics_reset; +    isc->post_load = ics_post_load; +} + +static const TypeInfo ics_info = { +    .name = TYPE_ICS, +    .parent = TYPE_DEVICE, +    .instance_size = sizeof(ICSState), +    .class_init = ics_class_init, +    .class_size = sizeof(ICSStateClass), +    .instance_init = ics_initfn, +}; + +/* + * Exported functions + */ +static int xics_find_source(XICSState *icp, int irq) +{ +    int sources = 1; +    int src; + +    /* FIXME: implement multiple sources */ +    for (src = 0; src < sources; ++src) { +        ICSState *ics = &icp->ics[src]; +        if (ics_valid_irq(ics, irq)) { +            return src; +        } +    } + +    return -1; +} + +qemu_irq xics_get_qirq(XICSState *icp, int irq) +{ +    int src = xics_find_source(icp, irq); + +    if (src >= 0) { +        ICSState *ics = &icp->ics[src]; +        return ics->qirqs[irq - ics->offset]; +    } + +    return NULL; +} + +static void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) +{ +    assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK)); + +    ics->irqs[srcno].flags |= +        lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; +} + +void xics_set_irq_type(XICSState *icp, int irq, bool lsi) +{ +    int src = xics_find_source(icp, irq); +    ICSState *ics; + +    assert(src >= 0); + +    ics = &icp->ics[src]; +    ics_set_irq_type(ics, irq - ics->offset, lsi); +} + +#define ICS_IRQ_FREE(ics, srcno)   \ +    (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK))) + +static int ics_find_free_block(ICSState *ics, int num, int alignnum) +{ +    int first, i; + +    for (first = 0; first < ics->nr_irqs; first += alignnum) { +        if (num > (ics->nr_irqs - first)) { +            return -1; +        } +        for (i = first; i < first + num; ++i) { +            if (!ICS_IRQ_FREE(ics, i)) { +                break; +            } +        } +        if (i == (first + num)) { +            return first; +        } +    } + +    return -1; +} + +int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi) +{ +    ICSState *ics = &icp->ics[src]; +    int irq; + +    if (irq_hint) { +        assert(src == xics_find_source(icp, irq_hint)); +        if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) { +            trace_xics_alloc_failed_hint(src, irq_hint); +            return -1; +        } +        irq = irq_hint; +    } else { +        irq = ics_find_free_block(ics, 1, 1); +        if (irq < 0) { +            trace_xics_alloc_failed_no_left(src); +            return -1; +        } +        irq += ics->offset; +    } + +    ics_set_irq_type(ics, irq - ics->offset, lsi); +    trace_xics_alloc(src, irq); + +    return irq; +} + +/* + * Allocate block of consequtive IRQs, returns a number of the first. + * If align==true, aligns the first IRQ number to num. + */ +int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align) +{ +    int i, first = -1; +    ICSState *ics = &icp->ics[src]; + +    assert(src == 0); +    /* +     * MSIMesage::data is used for storing VIRQ so +     * it has to be aligned to num to support multiple +     * MSI vectors. MSI-X is not affected by this. +     * The hint is used for the first IRQ, the rest should +     * be allocated continuously. +     */ +    if (align) { +        assert((num == 1) || (num == 2) || (num == 4) || +               (num == 8) || (num == 16) || (num == 32)); +        first = ics_find_free_block(ics, num, num); +    } else { +        first = ics_find_free_block(ics, num, 1); +    } + +    if (first >= 0) { +        for (i = first; i < first + num; ++i) { +            ics_set_irq_type(ics, i, lsi); +        } +    } +    first += ics->offset; + +    trace_xics_alloc_block(src, first, num, lsi, align); + +    return first; +} + +static void ics_free(ICSState *ics, int srcno, int num) +{ +    int i; + +    for (i = srcno; i < srcno + num; ++i) { +        if (ICS_IRQ_FREE(ics, i)) { +            trace_xics_ics_free_warn(ics - ics->icp->ics, i + ics->offset); +        } +        memset(&ics->irqs[i], 0, sizeof(ICSIRQState)); +    } +} + +void xics_free(XICSState *icp, int irq, int num) +{ +    int src = xics_find_source(icp, irq); + +    if (src >= 0) { +        ICSState *ics = &icp->ics[src]; + +        /* FIXME: implement multiple sources */ +        assert(src == 0); + +        trace_xics_ics_free(ics - icp->ics, irq, num); +        ics_free(ics, irq - ics->offset, num); +    } +} + +/* + * Guest interfaces + */ + +static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr, +                           target_ulong opcode, target_ulong *args) +{ +    CPUState *cs = CPU(cpu); +    target_ulong cppr = args[0]; + +    icp_set_cppr(spapr->icp, cs->cpu_index, cppr); +    return H_SUCCESS; +} + +static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr, +                          target_ulong opcode, target_ulong *args) +{ +    target_ulong server = get_cpu_index_by_dt_id(args[0]); +    target_ulong mfrr = args[1]; + +    if (server >= spapr->icp->nr_servers) { +        return H_PARAMETER; +    } + +    icp_set_mfrr(spapr->icp, server, mfrr); +    return H_SUCCESS; +} + +static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr, +                           target_ulong opcode, target_ulong *args) +{ +    CPUState *cs = CPU(cpu); +    uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index); + +    args[0] = xirr; +    return H_SUCCESS; +} + +static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr, +                             target_ulong opcode, target_ulong *args) +{ +    CPUState *cs = CPU(cpu); +    ICPState *ss = &spapr->icp->ss[cs->cpu_index]; +    uint32_t xirr = icp_accept(ss); + +    args[0] = xirr; +    args[1] = cpu_get_real_ticks(); +    return H_SUCCESS; +} + +static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr, +                          target_ulong opcode, target_ulong *args) +{ +    CPUState *cs = CPU(cpu); +    target_ulong xirr = args[0]; + +    icp_eoi(spapr->icp, cs->cpu_index, xirr); +    return H_SUCCESS; +} + +static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr, +                            target_ulong opcode, target_ulong *args) +{ +    CPUState *cs = CPU(cpu); +    ICPState *ss = &spapr->icp->ss[cs->cpu_index]; + +    args[0] = ss->xirr; +    args[1] = ss->mfrr; + +    return H_SUCCESS; +} + +static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr, +                          uint32_t token, +                          uint32_t nargs, target_ulong args, +                          uint32_t nret, target_ulong rets) +{ +    ICSState *ics = spapr->icp->ics; +    uint32_t nr, server, priority; + +    if ((nargs != 3) || (nret != 1)) { +        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +        return; +    } + +    nr = rtas_ld(args, 0); +    server = get_cpu_index_by_dt_id(rtas_ld(args, 1)); +    priority = rtas_ld(args, 2); + +    if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers) +        || (priority > 0xff)) { +        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +        return; +    } + +    ics_write_xive(ics, nr, server, priority, priority); + +    rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static void rtas_get_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr, +                          uint32_t token, +                          uint32_t nargs, target_ulong args, +                          uint32_t nret, target_ulong rets) +{ +    ICSState *ics = spapr->icp->ics; +    uint32_t nr; + +    if ((nargs != 1) || (nret != 3)) { +        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +        return; +    } + +    nr = rtas_ld(args, 0); + +    if (!ics_valid_irq(ics, nr)) { +        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +        return; +    } + +    rtas_st(rets, 0, RTAS_OUT_SUCCESS); +    rtas_st(rets, 1, ics->irqs[nr - ics->offset].server); +    rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority); +} + +static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr, +                         uint32_t token, +                         uint32_t nargs, target_ulong args, +                         uint32_t nret, target_ulong rets) +{ +    ICSState *ics = spapr->icp->ics; +    uint32_t nr; + +    if ((nargs != 1) || (nret != 1)) { +        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +        return; +    } + +    nr = rtas_ld(args, 0); + +    if (!ics_valid_irq(ics, nr)) { +        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +        return; +    } + +    ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff, +                   ics->irqs[nr - ics->offset].priority); + +    rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr, +                        uint32_t token, +                        uint32_t nargs, target_ulong args, +                        uint32_t nret, target_ulong rets) +{ +    ICSState *ics = spapr->icp->ics; +    uint32_t nr; + +    if ((nargs != 1) || (nret != 1)) { +        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +        return; +    } + +    nr = rtas_ld(args, 0); + +    if (!ics_valid_irq(ics, nr)) { +        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +        return; +    } + +    ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, +                   ics->irqs[nr - ics->offset].saved_priority, +                   ics->irqs[nr - ics->offset].saved_priority); + +    rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +/* + * XICS + */ + +static void xics_set_nr_irqs(XICSState *icp, uint32_t nr_irqs, Error **errp) +{ +    icp->nr_irqs = icp->ics->nr_irqs = nr_irqs; +} + +static void xics_set_nr_servers(XICSState *icp, uint32_t nr_servers, +                                Error **errp) +{ +    int i; + +    icp->nr_servers = nr_servers; + +    icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState)); +    for (i = 0; i < icp->nr_servers; i++) { +        char buffer[32]; +        object_initialize(&icp->ss[i], sizeof(icp->ss[i]), TYPE_ICP); +        snprintf(buffer, sizeof(buffer), "icp[%d]", i); +        object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]), +                                  errp); +    } +} + +static void xics_realize(DeviceState *dev, Error **errp) +{ +    XICSState *icp = XICS(dev); +    Error *error = NULL; +    int i; + +    if (!icp->nr_servers) { +        error_setg(errp, "Number of servers needs to be greater 0"); +        return; +    } + +    /* Registration of global state belongs into realize */ +    spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive); +    spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive); +    spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off); +    spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_int_on); + +    spapr_register_hypercall(H_CPPR, h_cppr); +    spapr_register_hypercall(H_IPI, h_ipi); +    spapr_register_hypercall(H_XIRR, h_xirr); +    spapr_register_hypercall(H_XIRR_X, h_xirr_x); +    spapr_register_hypercall(H_EOI, h_eoi); +    spapr_register_hypercall(H_IPOLL, h_ipoll); + +    object_property_set_bool(OBJECT(icp->ics), true, "realized", &error); +    if (error) { +        error_propagate(errp, error); +        return; +    } + +    for (i = 0; i < icp->nr_servers; i++) { +        object_property_set_bool(OBJECT(&icp->ss[i]), true, "realized", &error); +        if (error) { +            error_propagate(errp, error); +            return; +        } +    } +} + +static void xics_initfn(Object *obj) +{ +    XICSState *xics = XICS(obj); + +    xics->ics = ICS(object_new(TYPE_ICS)); +    object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL); +    xics->ics->icp = xics; +} + +static void xics_class_init(ObjectClass *oc, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(oc); +    XICSStateClass *xsc = XICS_CLASS(oc); + +    dc->realize = xics_realize; +    xsc->set_nr_irqs = xics_set_nr_irqs; +    xsc->set_nr_servers = xics_set_nr_servers; +} + +static const TypeInfo xics_info = { +    .name          = TYPE_XICS, +    .parent        = TYPE_XICS_COMMON, +    .instance_size = sizeof(XICSState), +    .class_size = sizeof(XICSStateClass), +    .class_init    = xics_class_init, +    .instance_init = xics_initfn, +}; + +static void xics_register_types(void) +{ +    type_register_static(&xics_common_info); +    type_register_static(&xics_info); +    type_register_static(&ics_info); +    type_register_static(&icp_info); +} + +type_init(xics_register_types) diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c new file mode 100644 index 00000000..d58729cf --- /dev/null +++ b/hw/intc/xics_kvm.c @@ -0,0 +1,508 @@ +/* + * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator + * + * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics, in-kernel emulation + * + * Copyright (c) 2013 David Gibson, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#include "hw/hw.h" +#include "trace.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/xics.h" +#include "kvm_ppc.h" +#include "qemu/config-file.h" +#include "qemu/error-report.h" + +#include <sys/ioctl.h> + +typedef struct KVMXICSState { +    XICSState parent_obj; + +    int kernel_xics_fd; +} KVMXICSState; + +/* + * ICP-KVM + */ +static void icp_get_kvm_state(ICPState *ss) +{ +    uint64_t state; +    struct kvm_one_reg reg = { +        .id = KVM_REG_PPC_ICP_STATE, +        .addr = (uintptr_t)&state, +    }; +    int ret; + +    /* ICP for this CPU thread is not in use, exiting */ +    if (!ss->cs) { +        return; +    } + +    ret = kvm_vcpu_ioctl(ss->cs, KVM_GET_ONE_REG, ®); +    if (ret != 0) { +        error_report("Unable to retrieve KVM interrupt controller state" +                " for CPU %ld: %s", kvm_arch_vcpu_id(ss->cs), strerror(errno)); +        exit(1); +    } + +    ss->xirr = state >> KVM_REG_PPC_ICP_XISR_SHIFT; +    ss->mfrr = (state >> KVM_REG_PPC_ICP_MFRR_SHIFT) +        & KVM_REG_PPC_ICP_MFRR_MASK; +    ss->pending_priority = (state >> KVM_REG_PPC_ICP_PPRI_SHIFT) +        & KVM_REG_PPC_ICP_PPRI_MASK; +} + +static int icp_set_kvm_state(ICPState *ss, int version_id) +{ +    uint64_t state; +    struct kvm_one_reg reg = { +        .id = KVM_REG_PPC_ICP_STATE, +        .addr = (uintptr_t)&state, +    }; +    int ret; + +    /* ICP for this CPU thread is not in use, exiting */ +    if (!ss->cs) { +        return 0; +    } + +    state = ((uint64_t)ss->xirr << KVM_REG_PPC_ICP_XISR_SHIFT) +        | ((uint64_t)ss->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) +        | ((uint64_t)ss->pending_priority << KVM_REG_PPC_ICP_PPRI_SHIFT); + +    ret = kvm_vcpu_ioctl(ss->cs, KVM_SET_ONE_REG, ®); +    if (ret != 0) { +        error_report("Unable to restore KVM interrupt controller state (0x%" +                PRIx64 ") for CPU %ld: %s", state, kvm_arch_vcpu_id(ss->cs), +                strerror(errno)); +        return ret; +    } + +    return 0; +} + +static void icp_kvm_reset(DeviceState *dev) +{ +    ICPState *icp = ICP(dev); + +    icp->xirr = 0; +    icp->pending_priority = 0xff; +    icp->mfrr = 0xff; + +    /* Make all outputs are deasserted */ +    qemu_set_irq(icp->output, 0); + +    icp_set_kvm_state(icp, 1); +} + +static void icp_kvm_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    ICPStateClass *icpc = ICP_CLASS(klass); + +    dc->reset = icp_kvm_reset; +    icpc->pre_save = icp_get_kvm_state; +    icpc->post_load = icp_set_kvm_state; +} + +static const TypeInfo icp_kvm_info = { +    .name = TYPE_KVM_ICP, +    .parent = TYPE_ICP, +    .instance_size = sizeof(ICPState), +    .class_init = icp_kvm_class_init, +    .class_size = sizeof(ICPStateClass), +}; + +/* + * ICS-KVM + */ +static void ics_get_kvm_state(ICSState *ics) +{ +    KVMXICSState *icpkvm = KVM_XICS(ics->icp); +    uint64_t state; +    struct kvm_device_attr attr = { +        .flags = 0, +        .group = KVM_DEV_XICS_GRP_SOURCES, +        .addr = (uint64_t)(uintptr_t)&state, +    }; +    int i; + +    for (i = 0; i < ics->nr_irqs; i++) { +        ICSIRQState *irq = &ics->irqs[i]; +        int ret; + +        attr.attr = i + ics->offset; + +        ret = ioctl(icpkvm->kernel_xics_fd, KVM_GET_DEVICE_ATTR, &attr); +        if (ret != 0) { +            error_report("Unable to retrieve KVM interrupt controller state" +                    " for IRQ %d: %s", i + ics->offset, strerror(errno)); +            exit(1); +        } + +        irq->server = state & KVM_XICS_DESTINATION_MASK; +        irq->saved_priority = (state >> KVM_XICS_PRIORITY_SHIFT) +            & KVM_XICS_PRIORITY_MASK; +        /* +         * To be consistent with the software emulation in xics.c, we +         * split out the masked state + priority that we get from the +         * kernel into 'current priority' (0xff if masked) and +         * 'saved priority' (if masked, this is the priority the +         * interrupt had before it was masked).  Masking and unmasking +         * are done with the ibm,int-off and ibm,int-on RTAS calls. +         */ +        if (state & KVM_XICS_MASKED) { +            irq->priority = 0xff; +        } else { +            irq->priority = irq->saved_priority; +        } + +        if (state & KVM_XICS_PENDING) { +            if (state & KVM_XICS_LEVEL_SENSITIVE) { +                irq->status |= XICS_STATUS_ASSERTED; +            } else { +                /* +                 * A pending edge-triggered interrupt (or MSI) +                 * must have been rejected previously when we +                 * first detected it and tried to deliver it, +                 * so mark it as pending and previously rejected +                 * for consistency with how xics.c works. +                 */ +                irq->status |= XICS_STATUS_MASKED_PENDING +                    | XICS_STATUS_REJECTED; +            } +        } +    } +} + +static int ics_set_kvm_state(ICSState *ics, int version_id) +{ +    KVMXICSState *icpkvm = KVM_XICS(ics->icp); +    uint64_t state; +    struct kvm_device_attr attr = { +        .flags = 0, +        .group = KVM_DEV_XICS_GRP_SOURCES, +        .addr = (uint64_t)(uintptr_t)&state, +    }; +    int i; + +    for (i = 0; i < ics->nr_irqs; i++) { +        ICSIRQState *irq = &ics->irqs[i]; +        int ret; + +        attr.attr = i + ics->offset; + +        state = irq->server; +        state |= (uint64_t)(irq->saved_priority & KVM_XICS_PRIORITY_MASK) +            << KVM_XICS_PRIORITY_SHIFT; +        if (irq->priority != irq->saved_priority) { +            assert(irq->priority == 0xff); +            state |= KVM_XICS_MASKED; +        } + +        if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) { +            state |= KVM_XICS_LEVEL_SENSITIVE; +            if (irq->status & XICS_STATUS_ASSERTED) { +                state |= KVM_XICS_PENDING; +            } +        } else { +            if (irq->status & XICS_STATUS_MASKED_PENDING) { +                state |= KVM_XICS_PENDING; +            } +        } + +        ret = ioctl(icpkvm->kernel_xics_fd, KVM_SET_DEVICE_ATTR, &attr); +        if (ret != 0) { +            error_report("Unable to restore KVM interrupt controller state" +                    " for IRQs %d: %s", i + ics->offset, strerror(errno)); +            return ret; +        } +    } + +    return 0; +} + +static void ics_kvm_set_irq(void *opaque, int srcno, int val) +{ +    ICSState *ics = opaque; +    struct kvm_irq_level args; +    int rc; + +    args.irq = srcno + ics->offset; +    if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MSI) { +        if (!val) { +            return; +        } +        args.level = KVM_INTERRUPT_SET; +    } else { +        args.level = val ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET; +    } +    rc = kvm_vm_ioctl(kvm_state, KVM_IRQ_LINE, &args); +    if (rc < 0) { +        perror("kvm_irq_line"); +    } +} + +static void ics_kvm_reset(DeviceState *dev) +{ +    ICSState *ics = ICS(dev); +    int i; +    uint8_t flags[ics->nr_irqs]; + +    for (i = 0; i < ics->nr_irqs; i++) { +        flags[i] = ics->irqs[i].flags; +    } + +    memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs); + +    for (i = 0; i < ics->nr_irqs; i++) { +        ics->irqs[i].priority = 0xff; +        ics->irqs[i].saved_priority = 0xff; +        ics->irqs[i].flags = flags[i]; +    } + +    ics_set_kvm_state(ics, 1); +} + +static void ics_kvm_realize(DeviceState *dev, Error **errp) +{ +    ICSState *ics = ICS(dev); + +    if (!ics->nr_irqs) { +        error_setg(errp, "Number of interrupts needs to be greater 0"); +        return; +    } +    ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState)); +    ics->qirqs = qemu_allocate_irqs(ics_kvm_set_irq, ics, ics->nr_irqs); +} + +static void ics_kvm_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); +    ICSStateClass *icsc = ICS_CLASS(klass); + +    dc->realize = ics_kvm_realize; +    dc->reset = ics_kvm_reset; +    icsc->pre_save = ics_get_kvm_state; +    icsc->post_load = ics_set_kvm_state; +} + +static const TypeInfo ics_kvm_info = { +    .name = TYPE_KVM_ICS, +    .parent = TYPE_ICS, +    .instance_size = sizeof(ICSState), +    .class_init = ics_kvm_class_init, +}; + +/* + * XICS-KVM + */ +static void xics_kvm_cpu_setup(XICSState *icp, PowerPCCPU *cpu) +{ +    CPUState *cs; +    ICPState *ss; +    KVMXICSState *icpkvm = KVM_XICS(icp); + +    cs = CPU(cpu); +    ss = &icp->ss[cs->cpu_index]; + +    assert(cs->cpu_index < icp->nr_servers); +    if (icpkvm->kernel_xics_fd == -1) { +        abort(); +    } + +    /* +     * If we are reusing a parked vCPU fd corresponding to the CPU +     * which was hot-removed earlier we don't have to renable +     * KVM_CAP_IRQ_XICS capability again. +     */ +    if (ss->cap_irq_xics_enabled) { +        return; +    } + +    if (icpkvm->kernel_xics_fd != -1) { +        int ret; + +        ss->cs = cs; + +        ret = kvm_vcpu_enable_cap(cs, KVM_CAP_IRQ_XICS, 0, +                                  icpkvm->kernel_xics_fd, kvm_arch_vcpu_id(cs)); +        if (ret < 0) { +            error_report("Unable to connect CPU%ld to kernel XICS: %s", +                    kvm_arch_vcpu_id(cs), strerror(errno)); +            exit(1); +        } +        ss->cap_irq_xics_enabled = true; +    } +} + +static void xics_kvm_set_nr_irqs(XICSState *icp, uint32_t nr_irqs, Error **errp) +{ +    icp->nr_irqs = icp->ics->nr_irqs = nr_irqs; +} + +static void xics_kvm_set_nr_servers(XICSState *icp, uint32_t nr_servers, +                                    Error **errp) +{ +    int i; + +    icp->nr_servers = nr_servers; + +    icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState)); +    for (i = 0; i < icp->nr_servers; i++) { +        char buffer[32]; +        object_initialize(&icp->ss[i], sizeof(icp->ss[i]), TYPE_KVM_ICP); +        snprintf(buffer, sizeof(buffer), "icp[%d]", i); +        object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]), +                                  errp); +    } +} + +static void rtas_dummy(PowerPCCPU *cpu, sPAPRMachineState *spapr, +                       uint32_t token, +                       uint32_t nargs, target_ulong args, +                       uint32_t nret, target_ulong rets) +{ +    error_report("pseries: %s must never be called for in-kernel XICS", +                 __func__); +} + +static void xics_kvm_realize(DeviceState *dev, Error **errp) +{ +    KVMXICSState *icpkvm = KVM_XICS(dev); +    XICSState *icp = XICS_COMMON(dev); +    int i, rc; +    Error *error = NULL; +    struct kvm_create_device xics_create_device = { +        .type = KVM_DEV_TYPE_XICS, +        .flags = 0, +    }; + +    if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_IRQ_XICS)) { +        error_setg(errp, +                   "KVM and IRQ_XICS capability must be present for in-kernel XICS"); +        goto fail; +    } + +    spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_dummy); +    spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_dummy); +    spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_dummy); +    spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_dummy); + +    rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_SET_XIVE, "ibm,set-xive"); +    if (rc < 0) { +        error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,set-xive"); +        goto fail; +    } + +    rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_GET_XIVE, "ibm,get-xive"); +    if (rc < 0) { +        error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,get-xive"); +        goto fail; +    } + +    rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_INT_ON, "ibm,int-on"); +    if (rc < 0) { +        error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,int-on"); +        goto fail; +    } + +    rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_INT_OFF, "ibm,int-off"); +    if (rc < 0) { +        error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,int-off"); +        goto fail; +    } + +    /* Create the kernel ICP */ +    rc = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &xics_create_device); +    if (rc < 0) { +        error_setg_errno(errp, -rc, "Error on KVM_CREATE_DEVICE for XICS"); +        goto fail; +    } + +    icpkvm->kernel_xics_fd = xics_create_device.fd; + +    object_property_set_bool(OBJECT(icp->ics), true, "realized", &error); +    if (error) { +        error_propagate(errp, error); +        goto fail; +    } + +    assert(icp->nr_servers); +    for (i = 0; i < icp->nr_servers; i++) { +        object_property_set_bool(OBJECT(&icp->ss[i]), true, "realized", &error); +        if (error) { +            error_propagate(errp, error); +            goto fail; +        } +    } + +    kvm_kernel_irqchip = true; +    kvm_msi_via_irqfd_allowed = true; +    kvm_gsi_direct_mapping = true; + +    return; + +fail: +    kvmppc_define_rtas_kernel_token(0, "ibm,set-xive"); +    kvmppc_define_rtas_kernel_token(0, "ibm,get-xive"); +    kvmppc_define_rtas_kernel_token(0, "ibm,int-on"); +    kvmppc_define_rtas_kernel_token(0, "ibm,int-off"); +} + +static void xics_kvm_initfn(Object *obj) +{ +    XICSState *xics = XICS_COMMON(obj); + +    xics->ics = ICS(object_new(TYPE_KVM_ICS)); +    object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL); +    xics->ics->icp = xics; +} + +static void xics_kvm_class_init(ObjectClass *oc, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(oc); +    XICSStateClass *xsc = XICS_COMMON_CLASS(oc); + +    dc->realize = xics_kvm_realize; +    xsc->cpu_setup = xics_kvm_cpu_setup; +    xsc->set_nr_irqs = xics_kvm_set_nr_irqs; +    xsc->set_nr_servers = xics_kvm_set_nr_servers; +} + +static const TypeInfo xics_kvm_info = { +    .name          = TYPE_KVM_XICS, +    .parent        = TYPE_XICS_COMMON, +    .instance_size = sizeof(KVMXICSState), +    .class_init    = xics_kvm_class_init, +    .instance_init = xics_kvm_initfn, +}; + +static void xics_kvm_register_types(void) +{ +    type_register_static(&xics_kvm_info); +    type_register_static(&ics_kvm_info); +    type_register_static(&icp_kvm_info); +} + +type_init(xics_kvm_register_types) diff --git a/hw/intc/xilinx_intc.c b/hw/intc/xilinx_intc.c new file mode 100644 index 00000000..12804ab7 --- /dev/null +++ b/hw/intc/xilinx_intc.c @@ -0,0 +1,201 @@ +/* + * QEMU Xilinx OPB Interrupt Controller. + * + * Copyright (c) 2009 Edgar E. Iglesias. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "hw/sysbus.h" +#include "hw/hw.h" + +#define D(x) + +#define R_ISR       0 +#define R_IPR       1 +#define R_IER       2 +#define R_IAR       3 +#define R_SIE       4 +#define R_CIE       5 +#define R_IVR       6 +#define R_MER       7 +#define R_MAX       8 + +#define TYPE_XILINX_INTC "xlnx.xps-intc" +#define XILINX_INTC(obj) OBJECT_CHECK(struct xlx_pic, (obj), TYPE_XILINX_INTC) + +struct xlx_pic +{ +    SysBusDevice parent_obj; + +    MemoryRegion mmio; +    qemu_irq parent_irq; + +    /* Configuration reg chosen at synthesis-time. QEMU populates +       the bits at board-setup.  */ +    uint32_t c_kind_of_intr; + +    /* Runtime control registers.  */ +    uint32_t regs[R_MAX]; +    /* state of the interrupt input pins */ +    uint32_t irq_pin_state; +}; + +static void update_irq(struct xlx_pic *p) +{ +    uint32_t i; + +    /* level triggered interrupt */ +    if (p->regs[R_MER] & 2) { +        p->regs[R_ISR] |= p->irq_pin_state & ~p->c_kind_of_intr; +    } + +    /* Update the pending register.  */ +    p->regs[R_IPR] = p->regs[R_ISR] & p->regs[R_IER]; + +    /* Update the vector register.  */ +    for (i = 0; i < 32; i++) { +        if (p->regs[R_IPR] & (1U << i)) { +            break; +        } +    } +    if (i == 32) +        i = ~0; + +    p->regs[R_IVR] = i; +    qemu_set_irq(p->parent_irq, (p->regs[R_MER] & 1) && p->regs[R_IPR]); +} + +static uint64_t +pic_read(void *opaque, hwaddr addr, unsigned int size) +{ +    struct xlx_pic *p = opaque; +    uint32_t r = 0; + +    addr >>= 2; +    switch (addr) +    { +        default: +            if (addr < ARRAY_SIZE(p->regs)) +                r = p->regs[addr]; +            break; + +    } +    D(printf("%s %x=%x\n", __func__, addr * 4, r)); +    return r; +} + +static void +pic_write(void *opaque, hwaddr addr, +          uint64_t val64, unsigned int size) +{ +    struct xlx_pic *p = opaque; +    uint32_t value = val64; + +    addr >>= 2; +    D(qemu_log("%s addr=%x val=%x\n", __func__, addr * 4, value)); +    switch (addr)  +    { +        case R_IAR: +            p->regs[R_ISR] &= ~value; /* ACK.  */ +            break; +        case R_SIE: +            p->regs[R_IER] |= value;  /* Atomic set ie.  */ +            break; +        case R_CIE: +            p->regs[R_IER] &= ~value; /* Atomic clear ie.  */ +            break; +        case R_MER: +            p->regs[R_MER] = value & 0x3; +            break; +        case R_ISR: +            if ((p->regs[R_MER] & 2)) { +                break; +            } +            /* fallthrough */ +        default: +            if (addr < ARRAY_SIZE(p->regs)) +                p->regs[addr] = value; +            break; +    } +    update_irq(p); +} + +static const MemoryRegionOps pic_ops = { +    .read = pic_read, +    .write = pic_write, +    .endianness = DEVICE_NATIVE_ENDIAN, +    .valid = { +        .min_access_size = 4, +        .max_access_size = 4 +    } +}; + +static void irq_handler(void *opaque, int irq, int level) +{ +    struct xlx_pic *p = opaque; + +    /* edge triggered interrupt */ +    if (p->c_kind_of_intr & (1 << irq) && p->regs[R_MER] & 2) { +        p->regs[R_ISR] |= (level << irq); +    } + +    p->irq_pin_state &= ~(1 << irq); +    p->irq_pin_state |= level << irq; +    update_irq(p); +} + +static void xilinx_intc_init(Object *obj) +{ +    struct xlx_pic *p = XILINX_INTC(obj); + +    qdev_init_gpio_in(DEVICE(obj), irq_handler, 32); +    sysbus_init_irq(SYS_BUS_DEVICE(obj), &p->parent_irq); + +    memory_region_init_io(&p->mmio, obj, &pic_ops, p, "xlnx.xps-intc", +                          R_MAX * 4); +    sysbus_init_mmio(SYS_BUS_DEVICE(obj), &p->mmio); +} + +static Property xilinx_intc_properties[] = { +    DEFINE_PROP_UINT32("kind-of-intr", struct xlx_pic, c_kind_of_intr, 0), +    DEFINE_PROP_END_OF_LIST(), +}; + +static void xilinx_intc_class_init(ObjectClass *klass, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(klass); + +    dc->props = xilinx_intc_properties; +} + +static const TypeInfo xilinx_intc_info = { +    .name          = TYPE_XILINX_INTC, +    .parent        = TYPE_SYS_BUS_DEVICE, +    .instance_size = sizeof(struct xlx_pic), +    .instance_init = xilinx_intc_init, +    .class_init    = xilinx_intc_class_init, +}; + +static void xilinx_intc_register_types(void) +{ +    type_register_static(&xilinx_intc_info); +} + +type_init(xilinx_intc_register_types)  | 
