diff options
Diffstat (limited to 'target-unicore32')
| -rw-r--r-- | target-unicore32/Makefile.objs | 4 | ||||
| -rw-r--r-- | target-unicore32/cpu-qom.h | 69 | ||||
| -rw-r--r-- | target-unicore32/cpu.c | 199 | ||||
| -rw-r--r-- | target-unicore32/cpu.h | 165 | ||||
| -rw-r--r-- | target-unicore32/helper.c | 261 | ||||
| -rw-r--r-- | target-unicore32/helper.h | 65 | ||||
| -rw-r--r-- | target-unicore32/op_helper.c | 259 | ||||
| -rw-r--r-- | target-unicore32/softmmu.c | 276 | ||||
| -rw-r--r-- | target-unicore32/translate.c | 2139 | ||||
| -rw-r--r-- | target-unicore32/ucf64_helper.c | 324 | 
10 files changed, 3761 insertions, 0 deletions
diff --git a/target-unicore32/Makefile.objs b/target-unicore32/Makefile.objs new file mode 100644 index 00000000..6b41b1e9 --- /dev/null +++ b/target-unicore32/Makefile.objs @@ -0,0 +1,4 @@ +obj-y += translate.o op_helper.o helper.o cpu.o +obj-y += ucf64_helper.o + +obj-$(CONFIG_SOFTMMU) += softmmu.o diff --git a/target-unicore32/cpu-qom.h b/target-unicore32/cpu-qom.h new file mode 100644 index 00000000..ea65b833 --- /dev/null +++ b/target-unicore32/cpu-qom.h @@ -0,0 +1,69 @@ +/* + * QEMU UniCore32 CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation, or (at your option) any + * later version. See the COPYING file in the top-level directory. + */ +#ifndef QEMU_UC32_CPU_QOM_H +#define QEMU_UC32_CPU_QOM_H + +#include "qom/cpu.h" +#include "cpu.h" + +#define TYPE_UNICORE32_CPU "unicore32-cpu" + +#define UNICORE32_CPU_CLASS(klass) \ +    OBJECT_CLASS_CHECK(UniCore32CPUClass, (klass), TYPE_UNICORE32_CPU) +#define UNICORE32_CPU(obj) \ +    OBJECT_CHECK(UniCore32CPU, (obj), TYPE_UNICORE32_CPU) +#define UNICORE32_CPU_GET_CLASS(obj) \ +    OBJECT_GET_CLASS(UniCore32CPUClass, (obj), TYPE_UNICORE32_CPU) + +/** + * UniCore32CPUClass: + * @parent_realize: The parent class' realize handler. + * + * A UniCore32 CPU model. + */ +typedef struct UniCore32CPUClass { +    /*< private >*/ +    CPUClass parent_class; +    /*< public >*/ + +    DeviceRealize parent_realize; +} UniCore32CPUClass; + +/** + * UniCore32CPU: + * @env: #CPUUniCore32State + * + * A UniCore32 CPU. + */ +typedef struct UniCore32CPU { +    /*< private >*/ +    CPUState parent_obj; +    /*< public >*/ + +    CPUUniCore32State env; +} UniCore32CPU; + +static inline UniCore32CPU *uc32_env_get_cpu(CPUUniCore32State *env) +{ +    return container_of(env, UniCore32CPU, env); +} + +#define ENV_GET_CPU(e) CPU(uc32_env_get_cpu(e)) + +#define ENV_OFFSET offsetof(UniCore32CPU, env) + +void uc32_cpu_do_interrupt(CPUState *cpu); +bool uc32_cpu_exec_interrupt(CPUState *cpu, int int_req); +void uc32_cpu_dump_state(CPUState *cpu, FILE *f, +                         fprintf_function cpu_fprintf, int flags); +hwaddr uc32_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); + +#endif diff --git a/target-unicore32/cpu.c b/target-unicore32/cpu.c new file mode 100644 index 00000000..e5252eba --- /dev/null +++ b/target-unicore32/cpu.c @@ -0,0 +1,199 @@ +/* + * QEMU UniCore32 CPU + * + * Copyright (c) 2010-2012 Guan Xuetao + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Contributions from 2012-04-01 on are considered under GPL version 2, + * or (at your option) any later version. + */ + +#include "cpu.h" +#include "qemu-common.h" +#include "migration/vmstate.h" + +static void uc32_cpu_set_pc(CPUState *cs, vaddr value) +{ +    UniCore32CPU *cpu = UNICORE32_CPU(cs); + +    cpu->env.regs[31] = value; +} + +static bool uc32_cpu_has_work(CPUState *cs) +{ +    return cs->interrupt_request & +        (CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB); +} + +static inline void set_feature(CPUUniCore32State *env, int feature) +{ +    env->features |= feature; +} + +/* CPU models */ + +static ObjectClass *uc32_cpu_class_by_name(const char *cpu_model) +{ +    ObjectClass *oc; +    char *typename; + +    if (cpu_model == NULL) { +        return NULL; +    } + +    typename = g_strdup_printf("%s-" TYPE_UNICORE32_CPU, cpu_model); +    oc = object_class_by_name(typename); +    g_free(typename); +    if (oc != NULL && (!object_class_dynamic_cast(oc, TYPE_UNICORE32_CPU) || +                       object_class_is_abstract(oc))) { +        oc = NULL; +    } +    return oc; +} + +typedef struct UniCore32CPUInfo { +    const char *name; +    void (*instance_init)(Object *obj); +} UniCore32CPUInfo; + +static void unicore_ii_cpu_initfn(Object *obj) +{ +    UniCore32CPU *cpu = UNICORE32_CPU(obj); +    CPUUniCore32State *env = &cpu->env; + +    env->cp0.c0_cpuid = 0x4d000863; +    env->cp0.c0_cachetype = 0x0d152152; +    env->cp0.c1_sys = 0x2000; +    env->cp0.c2_base = 0x0; +    env->cp0.c3_faultstatus = 0x0; +    env->cp0.c4_faultaddr = 0x0; +    env->ucf64.xregs[UC32_UCF64_FPSCR] = 0; + +    set_feature(env, UC32_HWCAP_CMOV); +    set_feature(env, UC32_HWCAP_UCF64); +} + +static void uc32_any_cpu_initfn(Object *obj) +{ +    UniCore32CPU *cpu = UNICORE32_CPU(obj); +    CPUUniCore32State *env = &cpu->env; + +    env->cp0.c0_cpuid = 0xffffffff; +    env->ucf64.xregs[UC32_UCF64_FPSCR] = 0; + +    set_feature(env, UC32_HWCAP_CMOV); +    set_feature(env, UC32_HWCAP_UCF64); +} + +static const UniCore32CPUInfo uc32_cpus[] = { +    { .name = "UniCore-II", .instance_init = unicore_ii_cpu_initfn }, +    { .name = "any",        .instance_init = uc32_any_cpu_initfn }, +}; + +static void uc32_cpu_realizefn(DeviceState *dev, Error **errp) +{ +    UniCore32CPUClass *ucc = UNICORE32_CPU_GET_CLASS(dev); + +    qemu_init_vcpu(CPU(dev)); + +    ucc->parent_realize(dev, errp); +} + +static void uc32_cpu_initfn(Object *obj) +{ +    CPUState *cs = CPU(obj); +    UniCore32CPU *cpu = UNICORE32_CPU(obj); +    CPUUniCore32State *env = &cpu->env; +    static bool inited; + +    cs->env_ptr = env; +    cpu_exec_init(cs, &error_abort); + +#ifdef CONFIG_USER_ONLY +    env->uncached_asr = ASR_MODE_USER; +    env->regs[31] = 0; +#else +    env->uncached_asr = ASR_MODE_PRIV; +    env->regs[31] = 0x03000000; +#endif + +    tlb_flush(cs, 1); + +    if (tcg_enabled() && !inited) { +        inited = true; +        uc32_translate_init(); +    } +} + +static const VMStateDescription vmstate_uc32_cpu = { +    .name = "cpu", +    .unmigratable = 1, +}; + +static void uc32_cpu_class_init(ObjectClass *oc, void *data) +{ +    DeviceClass *dc = DEVICE_CLASS(oc); +    CPUClass *cc = CPU_CLASS(oc); +    UniCore32CPUClass *ucc = UNICORE32_CPU_CLASS(oc); + +    ucc->parent_realize = dc->realize; +    dc->realize = uc32_cpu_realizefn; + +    cc->class_by_name = uc32_cpu_class_by_name; +    cc->has_work = uc32_cpu_has_work; +    cc->do_interrupt = uc32_cpu_do_interrupt; +    cc->cpu_exec_interrupt = uc32_cpu_exec_interrupt; +    cc->dump_state = uc32_cpu_dump_state; +    cc->set_pc = uc32_cpu_set_pc; +#ifdef CONFIG_USER_ONLY +    cc->handle_mmu_fault = uc32_cpu_handle_mmu_fault; +#else +    cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug; +#endif +    dc->vmsd = &vmstate_uc32_cpu; + +    /* +     * Reason: uc32_cpu_initfn() calls cpu_exec_init(), which saves +     * the object in cpus -> dangling pointer after final +     * object_unref(). +     */ +    dc->cannot_destroy_with_object_finalize_yet = true; +} + +static void uc32_register_cpu_type(const UniCore32CPUInfo *info) +{ +    TypeInfo type_info = { +        .parent = TYPE_UNICORE32_CPU, +        .instance_init = info->instance_init, +    }; + +    type_info.name = g_strdup_printf("%s-" TYPE_UNICORE32_CPU, info->name); +    type_register(&type_info); +    g_free((void *)type_info.name); +} + +static const TypeInfo uc32_cpu_type_info = { +    .name = TYPE_UNICORE32_CPU, +    .parent = TYPE_CPU, +    .instance_size = sizeof(UniCore32CPU), +    .instance_init = uc32_cpu_initfn, +    .abstract = true, +    .class_size = sizeof(UniCore32CPUClass), +    .class_init = uc32_cpu_class_init, +}; + +static void uc32_cpu_register_types(void) +{ +    int i; + +    type_register_static(&uc32_cpu_type_info); +    for (i = 0; i < ARRAY_SIZE(uc32_cpus); i++) { +        uc32_register_cpu_type(&uc32_cpus[i]); +    } +} + +type_init(uc32_cpu_register_types) diff --git a/target-unicore32/cpu.h b/target-unicore32/cpu.h new file mode 100644 index 00000000..45e31e54 --- /dev/null +++ b/target-unicore32/cpu.h @@ -0,0 +1,165 @@ +/* + * UniCore32 virtual CPU header + * + * Copyright (C) 2010-2012 Guan Xuetao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation, or (at your option) any + * later version. See the COPYING file in the top-level directory. + */ +#ifndef QEMU_UNICORE32_CPU_H +#define QEMU_UNICORE32_CPU_H + +#define TARGET_LONG_BITS                32 +#define TARGET_PAGE_BITS                12 + +#define TARGET_PHYS_ADDR_SPACE_BITS     32 +#define TARGET_VIRT_ADDR_SPACE_BITS     32 + +#define ELF_MACHINE             EM_UNICORE32 + +#define CPUArchState                struct CPUUniCore32State + +#include "config.h" +#include "qemu-common.h" +#include "exec/cpu-defs.h" +#include "fpu/softfloat.h" + +#define NB_MMU_MODES            2 + +typedef struct CPUUniCore32State { +    /* Regs for current mode.  */ +    uint32_t regs[32]; +    /* Frequently accessed ASR bits are stored separately for efficiently. +       This contains all the other bits.  Use asr_{read,write} to access +       the whole ASR.  */ +    uint32_t uncached_asr; +    uint32_t bsr; + +    /* Banked registers.  */ +    uint32_t banked_bsr[6]; +    uint32_t banked_r29[6]; +    uint32_t banked_r30[6]; + +    /* asr flag cache for faster execution */ +    uint32_t CF; /* 0 or 1 */ +    uint32_t VF; /* V is the bit 31. All other bits are undefined */ +    uint32_t NF; /* N is bit 31. All other bits are undefined.  */ +    uint32_t ZF; /* Z set if zero.  */ + +    /* System control coprocessor (cp0) */ +    struct { +        uint32_t c0_cpuid; +        uint32_t c0_cachetype; +        uint32_t c1_sys; /* System control register.  */ +        uint32_t c2_base; /* MMU translation table base.  */ +        uint32_t c3_faultstatus; /* Fault status registers.  */ +        uint32_t c4_faultaddr; /* Fault address registers.  */ +        uint32_t c5_cacheop; /* Cache operation registers.  */ +        uint32_t c6_tlbop; /* TLB operation registers. */ +    } cp0; + +    /* UniCore-F64 coprocessor state.  */ +    struct { +        float64 regs[16]; +        uint32_t xregs[32]; +        float_status fp_status; +    } ucf64; + +    CPU_COMMON + +    /* Internal CPU feature flags.  */ +    uint32_t features; + +} CPUUniCore32State; + +#define ASR_M                   (0x1f) +#define ASR_MODE_USER           (0x10) +#define ASR_MODE_INTR           (0x12) +#define ASR_MODE_PRIV           (0x13) +#define ASR_MODE_TRAP           (0x17) +#define ASR_MODE_EXTN           (0x1b) +#define ASR_MODE_SUSR           (0x1f) +#define ASR_I                   (1 << 7) +#define ASR_V                   (1 << 28) +#define ASR_C                   (1 << 29) +#define ASR_Z                   (1 << 30) +#define ASR_N                   (1 << 31) +#define ASR_NZCV                (ASR_N | ASR_Z | ASR_C | ASR_V) +#define ASR_RESERVED            (~(ASR_M | ASR_I | ASR_NZCV)) + +#define UC32_EXCP_PRIV          (1) +#define UC32_EXCP_ITRAP         (2) +#define UC32_EXCP_DTRAP         (3) +#define UC32_EXCP_INTR          (4) + +/* Return the current ASR value.  */ +target_ulong cpu_asr_read(CPUUniCore32State *env1); +/* Set the ASR.  Note that some bits of mask must be all-set or all-clear.  */ +void cpu_asr_write(CPUUniCore32State *env1, target_ulong val, target_ulong mask); + +/* UniCore-F64 system registers.  */ +#define UC32_UCF64_FPSCR                (31) +#define UCF64_FPSCR_MASK                (0x27ffffff) +#define UCF64_FPSCR_RND_MASK            (0x7) +#define UCF64_FPSCR_RND(r)              (((r) >>  0) & UCF64_FPSCR_RND_MASK) +#define UCF64_FPSCR_TRAPEN_MASK         (0x7f) +#define UCF64_FPSCR_TRAPEN(r)           (((r) >> 10) & UCF64_FPSCR_TRAPEN_MASK) +#define UCF64_FPSCR_FLAG_MASK           (0x3ff) +#define UCF64_FPSCR_FLAG(r)             (((r) >> 17) & UCF64_FPSCR_FLAG_MASK) +#define UCF64_FPSCR_FLAG_ZERO           (1 << 17) +#define UCF64_FPSCR_FLAG_INFINITY       (1 << 18) +#define UCF64_FPSCR_FLAG_INVALID        (1 << 19) +#define UCF64_FPSCR_FLAG_UNDERFLOW      (1 << 20) +#define UCF64_FPSCR_FLAG_OVERFLOW       (1 << 21) +#define UCF64_FPSCR_FLAG_INEXACT        (1 << 22) +#define UCF64_FPSCR_FLAG_HUGEINT        (1 << 23) +#define UCF64_FPSCR_FLAG_DENORMAL       (1 << 24) +#define UCF64_FPSCR_FLAG_UNIMP          (1 << 25) +#define UCF64_FPSCR_FLAG_DIVZERO        (1 << 26) + +#define UC32_HWCAP_CMOV                 4 /* 1 << 2 */ +#define UC32_HWCAP_UCF64                8 /* 1 << 3 */ + +#define cpu_exec                        uc32_cpu_exec +#define cpu_signal_handler              uc32_cpu_signal_handler + +int uc32_cpu_signal_handler(int host_signum, void *pinfo, void *puc); + +/* MMU modes definitions */ +#define MMU_MODE0_SUFFIX _kernel +#define MMU_MODE1_SUFFIX _user +#define MMU_USER_IDX 1 +static inline int cpu_mmu_index(CPUUniCore32State *env) +{ +    return (env->uncached_asr & ASR_M) == ASR_MODE_USER ? 1 : 0; +} + +#include "exec/cpu-all.h" +#include "cpu-qom.h" +#include "exec/exec-all.h" + +int uc32_cpu_exec(CPUState *s); + +UniCore32CPU *uc32_cpu_init(const char *cpu_model); + +#define cpu_init(cpu_model) CPU(uc32_cpu_init(cpu_model)) + +static inline void cpu_get_tb_cpu_state(CPUUniCore32State *env, target_ulong *pc, +                                        target_ulong *cs_base, int *flags) +{ +    *pc = env->regs[31]; +    *cs_base = 0; +    *flags = 0; +    if ((env->uncached_asr & ASR_M) != ASR_MODE_USER) { +        *flags |= (1 << 6); +    } +} + +int uc32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, +                              int mmu_idx); +void uc32_translate_init(void); +void switch_mode(CPUUniCore32State *, int); + +#endif /* QEMU_UNICORE32_CPU_H */ diff --git a/target-unicore32/helper.c b/target-unicore32/helper.c new file mode 100644 index 00000000..ae63277c --- /dev/null +++ b/target-unicore32/helper.c @@ -0,0 +1,261 @@ +/* + * Copyright (C) 2010-2012 Guan Xuetao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Contributions from 2012-04-01 on are considered under GPL version 2, + * or (at your option) any later version. + */ + +#include "cpu.h" +#include "exec/gdbstub.h" +#include "exec/helper-proto.h" +#include "qemu/host-utils.h" +#ifndef CONFIG_USER_ONLY +#include "ui/console.h" +#endif + +#undef DEBUG_UC32 + +#ifdef DEBUG_UC32 +#define DPRINTF(fmt, ...) printf("%s: " fmt , __func__, ## __VA_ARGS__) +#else +#define DPRINTF(fmt, ...) do {} while (0) +#endif + +UniCore32CPU *uc32_cpu_init(const char *cpu_model) +{ +    return UNICORE32_CPU(cpu_generic_init(TYPE_UNICORE32_CPU, cpu_model)); +} + +uint32_t HELPER(clo)(uint32_t x) +{ +    return clo32(x); +} + +uint32_t HELPER(clz)(uint32_t x) +{ +    return clz32(x); +} + +#ifndef CONFIG_USER_ONLY +void helper_cp0_set(CPUUniCore32State *env, uint32_t val, uint32_t creg, +        uint32_t cop) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); + +    /* +     * movc pp.nn, rn, #imm9 +     *      rn: UCOP_REG_D +     *      nn: UCOP_REG_N +     *          1: sys control reg. +     *          2: page table base reg. +     *          3: data fault status reg. +     *          4: insn fault status reg. +     *          5: cache op. reg. +     *          6: tlb op. reg. +     *      imm9: split UCOP_IMM10 with bit5 is 0 +     */ +    switch (creg) { +    case 1: +        if (cop != 0) { +            goto unrecognized; +        } +        env->cp0.c1_sys = val; +        break; +    case 2: +        if (cop != 0) { +            goto unrecognized; +        } +        env->cp0.c2_base = val; +        break; +    case 3: +        if (cop != 0) { +            goto unrecognized; +        } +        env->cp0.c3_faultstatus = val; +        break; +    case 4: +        if (cop != 0) { +            goto unrecognized; +        } +        env->cp0.c4_faultaddr = val; +        break; +    case 5: +        switch (cop) { +        case 28: +            DPRINTF("Invalidate Entire I&D cache\n"); +            return; +        case 20: +            DPRINTF("Invalidate Entire Icache\n"); +            return; +        case 12: +            DPRINTF("Invalidate Entire Dcache\n"); +            return; +        case 10: +            DPRINTF("Clean Entire Dcache\n"); +            return; +        case 14: +            DPRINTF("Flush Entire Dcache\n"); +            return; +        case 13: +            DPRINTF("Invalidate Dcache line\n"); +            return; +        case 11: +            DPRINTF("Clean Dcache line\n"); +            return; +        case 15: +            DPRINTF("Flush Dcache line\n"); +            return; +        } +        break; +    case 6: +        if ((cop <= 6) && (cop >= 2)) { +            /* invalid all tlb */ +            tlb_flush(CPU(cpu), 1); +            return; +        } +        break; +    default: +        goto unrecognized; +    } +    return; +unrecognized: +    DPRINTF("Wrong register (%d) or wrong operation (%d) in cp0_set!\n", +            creg, cop); +} + +uint32_t helper_cp0_get(CPUUniCore32State *env, uint32_t creg, uint32_t cop) +{ +    /* +     * movc rd, pp.nn, #imm9 +     *      rd: UCOP_REG_D +     *      nn: UCOP_REG_N +     *          0: cpuid and cachetype +     *          1: sys control reg. +     *          2: page table base reg. +     *          3: data fault status reg. +     *          4: insn fault status reg. +     *      imm9: split UCOP_IMM10 with bit5 is 0 +     */ +    switch (creg) { +    case 0: +        switch (cop) { +        case 0: +            return env->cp0.c0_cpuid; +        case 1: +            return env->cp0.c0_cachetype; +        } +        break; +    case 1: +        if (cop == 0) { +            return env->cp0.c1_sys; +        } +        break; +    case 2: +        if (cop == 0) { +            return env->cp0.c2_base; +        } +        break; +    case 3: +        if (cop == 0) { +            return env->cp0.c3_faultstatus; +        } +        break; +    case 4: +        if (cop == 0) { +            return env->cp0.c4_faultaddr; +        } +        break; +    } +    DPRINTF("Wrong register (%d) or wrong operation (%d) in cp0_set!\n", +            creg, cop); +    return 0; +} + +#ifdef CONFIG_CURSES +/* + * FIXME: + *     1. curses windows will be blank when switching back + *     2. backspace is not handled yet + */ +static void putc_on_screen(unsigned char ch) +{ +    static WINDOW *localwin; +    static int init; + +    if (!init) { +        /* Assume 80 * 30 screen to minimize the implementation */ +        localwin = newwin(30, 80, 0, 0); +        scrollok(localwin, TRUE); +        init = TRUE; +    } + +    if (isprint(ch)) { +        wprintw(localwin, "%c", ch); +    } else { +        switch (ch) { +        case '\n': +            wprintw(localwin, "%c", ch); +            break; +        case '\r': +            /* If '\r' is put before '\n', the curses window will destroy the +             * last print line. And meanwhile, '\n' implifies '\r' inside. */ +            break; +        default: /* Not handled, so just print it hex code */ +            wprintw(localwin, "-- 0x%x --", ch); +        } +    } + +    wrefresh(localwin); +} +#else +#define putc_on_screen(c)               do { } while (0) +#endif + +void helper_cp1_putc(target_ulong x) +{ +    putc_on_screen((unsigned char)x);   /* Output to screen */ +    DPRINTF("%c", x);                   /* Output to stdout */ +} +#endif + +#ifdef CONFIG_USER_ONLY +void switch_mode(CPUUniCore32State *env, int mode) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); + +    if (mode != ASR_MODE_USER) { +        cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); +    } +} + +void uc32_cpu_do_interrupt(CPUState *cs) +{ +    cpu_abort(cs, "NO interrupt in user mode\n"); +} + +int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, +                              int access_type, int mmu_idx) +{ +    cpu_abort(cs, "NO mmu fault in user mode\n"); +    return 1; +} +#endif + +bool uc32_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ +    if (interrupt_request & CPU_INTERRUPT_HARD) { +        UniCore32CPU *cpu = UNICORE32_CPU(cs); +        CPUUniCore32State *env = &cpu->env; + +        if (!(env->uncached_asr & ASR_I)) { +            cs->exception_index = UC32_EXCP_INTR; +            uc32_cpu_do_interrupt(cs); +            return true; +        } +    } +    return false; +} diff --git a/target-unicore32/helper.h b/target-unicore32/helper.h new file mode 100644 index 00000000..94181374 --- /dev/null +++ b/target-unicore32/helper.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2010-2012 Guan Xuetao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation, or (at your option) any + * later version. See the COPYING file in the top-level directory. + */ + +#ifndef CONFIG_USER_ONLY +DEF_HELPER_4(cp0_set, void, env, i32, i32, i32) +DEF_HELPER_3(cp0_get, i32, env, i32, i32) +DEF_HELPER_1(cp1_putc, void, i32) +#endif + +DEF_HELPER_1(clz, i32, i32) +DEF_HELPER_1(clo, i32, i32) + +DEF_HELPER_2(exception, void, env, i32) + +DEF_HELPER_3(asr_write, void, env, i32, i32) +DEF_HELPER_1(asr_read, i32, env) + +DEF_HELPER_2(get_user_reg, i32, env, i32) +DEF_HELPER_3(set_user_reg, void, env, i32, i32) + +DEF_HELPER_3(add_cc, i32, env, i32, i32) +DEF_HELPER_3(adc_cc, i32, env, i32, i32) +DEF_HELPER_3(sub_cc, i32, env, i32, i32) +DEF_HELPER_3(sbc_cc, i32, env, i32, i32) + +DEF_HELPER_2(shl, i32, i32, i32) +DEF_HELPER_2(shr, i32, i32, i32) +DEF_HELPER_2(sar, i32, i32, i32) +DEF_HELPER_3(shl_cc, i32, env, i32, i32) +DEF_HELPER_3(shr_cc, i32, env, i32, i32) +DEF_HELPER_3(sar_cc, i32, env, i32, i32) +DEF_HELPER_3(ror_cc, i32, env, i32, i32) + +DEF_HELPER_1(ucf64_get_fpscr, i32, env) +DEF_HELPER_2(ucf64_set_fpscr, void, env, i32) + +DEF_HELPER_3(ucf64_adds, f32, f32, f32, env) +DEF_HELPER_3(ucf64_addd, f64, f64, f64, env) +DEF_HELPER_3(ucf64_subs, f32, f32, f32, env) +DEF_HELPER_3(ucf64_subd, f64, f64, f64, env) +DEF_HELPER_3(ucf64_muls, f32, f32, f32, env) +DEF_HELPER_3(ucf64_muld, f64, f64, f64, env) +DEF_HELPER_3(ucf64_divs, f32, f32, f32, env) +DEF_HELPER_3(ucf64_divd, f64, f64, f64, env) +DEF_HELPER_1(ucf64_negs, f32, f32) +DEF_HELPER_1(ucf64_negd, f64, f64) +DEF_HELPER_1(ucf64_abss, f32, f32) +DEF_HELPER_1(ucf64_absd, f64, f64) +DEF_HELPER_4(ucf64_cmps, void, f32, f32, i32, env) +DEF_HELPER_4(ucf64_cmpd, void, f64, f64, i32, env) + +DEF_HELPER_2(ucf64_sf2df, f64, f32, env) +DEF_HELPER_2(ucf64_df2sf, f32, f64, env) + +DEF_HELPER_2(ucf64_si2sf, f32, f32, env) +DEF_HELPER_2(ucf64_si2df, f64, f32, env) + +DEF_HELPER_2(ucf64_sf2si, f32, f32, env) +DEF_HELPER_2(ucf64_df2si, f32, f64, env) diff --git a/target-unicore32/op_helper.c b/target-unicore32/op_helper.c new file mode 100644 index 00000000..0266dbdf --- /dev/null +++ b/target-unicore32/op_helper.c @@ -0,0 +1,259 @@ +/* + *  UniCore32 helper routines + * + * Copyright (C) 2010-2012 Guan Xuetao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation, or (at your option) any + * later version. See the COPYING file in the top-level directory. + */ +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" + +#define SIGNBIT (uint32_t)0x80000000 +#define SIGNBIT64 ((uint64_t)1 << 63) + +void HELPER(exception)(CPUUniCore32State *env, uint32_t excp) +{ +    CPUState *cs = CPU(uc32_env_get_cpu(env)); + +    cs->exception_index = excp; +    cpu_loop_exit(cs); +} + +static target_ulong asr_read(CPUUniCore32State *env) +{ +    int ZF; +    ZF = (env->ZF == 0); +    return env->uncached_asr | (env->NF & 0x80000000) | (ZF << 30) | +        (env->CF << 29) | ((env->VF & 0x80000000) >> 3); +} + +target_ulong cpu_asr_read(CPUUniCore32State *env) +{ +    return asr_read(env); +} + +target_ulong HELPER(asr_read)(CPUUniCore32State *env) +{ +    return asr_read(env); +} + +static void asr_write(CPUUniCore32State *env, target_ulong val, +                      target_ulong mask) +{ +    if (mask & ASR_NZCV) { +        env->ZF = (~val) & ASR_Z; +        env->NF = val; +        env->CF = (val >> 29) & 1; +        env->VF = (val << 3) & 0x80000000; +    } + +    if ((env->uncached_asr ^ val) & mask & ASR_M) { +        switch_mode(env, val & ASR_M); +    } +    mask &= ~ASR_NZCV; +    env->uncached_asr = (env->uncached_asr & ~mask) | (val & mask); +} + +void cpu_asr_write(CPUUniCore32State *env, target_ulong val, target_ulong mask) +{ +    asr_write(env, val, mask); +} + +void HELPER(asr_write)(CPUUniCore32State *env, target_ulong val, +                       target_ulong mask) +{ +    asr_write(env, val, mask); +} + +/* Access to user mode registers from privileged modes.  */ +uint32_t HELPER(get_user_reg)(CPUUniCore32State *env, uint32_t regno) +{ +    uint32_t val; + +    if (regno == 29) { +        val = env->banked_r29[0]; +    } else if (regno == 30) { +        val = env->banked_r30[0]; +    } else { +        val = env->regs[regno]; +    } +    return val; +} + +void HELPER(set_user_reg)(CPUUniCore32State *env, uint32_t regno, uint32_t val) +{ +    if (regno == 29) { +        env->banked_r29[0] = val; +    } else if (regno == 30) { +        env->banked_r30[0] = val; +    } else { +        env->regs[regno] = val; +    } +} + +/* ??? Flag setting arithmetic is awkward because we need to do comparisons. +   The only way to do that in TCG is a conditional branch, which clobbers +   all our temporaries.  For now implement these as helper functions.  */ + +uint32_t HELPER(add_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b) +{ +    uint32_t result; +    result = a + b; +    env->NF = env->ZF = result; +    env->CF = result < a; +    env->VF = (a ^ b ^ -1) & (a ^ result); +    return result; +} + +uint32_t HELPER(adc_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b) +{ +    uint32_t result; +    if (!env->CF) { +        result = a + b; +        env->CF = result < a; +    } else { +        result = a + b + 1; +        env->CF = result <= a; +    } +    env->VF = (a ^ b ^ -1) & (a ^ result); +    env->NF = env->ZF = result; +    return result; +} + +uint32_t HELPER(sub_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b) +{ +    uint32_t result; +    result = a - b; +    env->NF = env->ZF = result; +    env->CF = a >= b; +    env->VF = (a ^ b) & (a ^ result); +    return result; +} + +uint32_t HELPER(sbc_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b) +{ +    uint32_t result; +    if (!env->CF) { +        result = a - b - 1; +        env->CF = a > b; +    } else { +        result = a - b; +        env->CF = a >= b; +    } +    env->VF = (a ^ b) & (a ^ result); +    env->NF = env->ZF = result; +    return result; +} + +/* Similarly for variable shift instructions.  */ + +uint32_t HELPER(shl)(uint32_t x, uint32_t i) +{ +    int shift = i & 0xff; +    if (shift >= 32) { +        return 0; +    } +    return x << shift; +} + +uint32_t HELPER(shr)(uint32_t x, uint32_t i) +{ +    int shift = i & 0xff; +    if (shift >= 32) { +        return 0; +    } +    return (uint32_t)x >> shift; +} + +uint32_t HELPER(sar)(uint32_t x, uint32_t i) +{ +    int shift = i & 0xff; +    if (shift >= 32) { +        shift = 31; +    } +    return (int32_t)x >> shift; +} + +uint32_t HELPER(shl_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i) +{ +    int shift = i & 0xff; +    if (shift >= 32) { +        if (shift == 32) { +            env->CF = x & 1; +        } else { +            env->CF = 0; +        } +        return 0; +    } else if (shift != 0) { +        env->CF = (x >> (32 - shift)) & 1; +        return x << shift; +    } +    return x; +} + +uint32_t HELPER(shr_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i) +{ +    int shift = i & 0xff; +    if (shift >= 32) { +        if (shift == 32) { +            env->CF = (x >> 31) & 1; +        } else { +            env->CF = 0; +        } +        return 0; +    } else if (shift != 0) { +        env->CF = (x >> (shift - 1)) & 1; +        return x >> shift; +    } +    return x; +} + +uint32_t HELPER(sar_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i) +{ +    int shift = i & 0xff; +    if (shift >= 32) { +        env->CF = (x >> 31) & 1; +        return (int32_t)x >> 31; +    } else if (shift != 0) { +        env->CF = (x >> (shift - 1)) & 1; +        return (int32_t)x >> shift; +    } +    return x; +} + +uint32_t HELPER(ror_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i) +{ +    int shift1, shift; +    shift1 = i & 0xff; +    shift = shift1 & 0x1f; +    if (shift == 0) { +        if (shift1 != 0) { +            env->CF = (x >> 31) & 1; +        } +        return x; +    } else { +        env->CF = (x >> (shift - 1)) & 1; +        return ((uint32_t)x >> shift) | (x << (32 - shift)); +    } +} + +#ifndef CONFIG_USER_ONLY +void tlb_fill(CPUState *cs, target_ulong addr, int is_write, +              int mmu_idx, uintptr_t retaddr) +{ +    int ret; + +    ret = uc32_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); +    if (unlikely(ret)) { +        if (retaddr) { +            /* now we have a real cpu fault */ +            cpu_restore_state(cs, retaddr); +        } +        cpu_loop_exit(cs); +    } +} +#endif diff --git a/target-unicore32/softmmu.c b/target-unicore32/softmmu.c new file mode 100644 index 00000000..9a3786dd --- /dev/null +++ b/target-unicore32/softmmu.c @@ -0,0 +1,276 @@ +/* + * Softmmu related functions + * + * Copyright (C) 2010-2012 Guan Xuetao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation, or any later version. + * See the COPYING file in the top-level directory. + */ +#ifdef CONFIG_USER_ONLY +#error This file only exist under softmmu circumstance +#endif + +#include <cpu.h> + +#undef DEBUG_UC32 + +#ifdef DEBUG_UC32 +#define DPRINTF(fmt, ...) printf("%s: " fmt , __func__, ## __VA_ARGS__) +#else +#define DPRINTF(fmt, ...) do {} while (0) +#endif + +#define SUPERPAGE_SIZE             (1 << 22) +#define UC32_PAGETABLE_READ        (1 << 8) +#define UC32_PAGETABLE_WRITE       (1 << 7) +#define UC32_PAGETABLE_EXEC        (1 << 6) +#define UC32_PAGETABLE_EXIST       (1 << 2) +#define PAGETABLE_TYPE(x)          ((x) & 3) + + +/* Map CPU modes onto saved register banks.  */ +static inline int bank_number(CPUUniCore32State *env, int mode) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); + +    switch (mode) { +    case ASR_MODE_USER: +    case ASR_MODE_SUSR: +        return 0; +    case ASR_MODE_PRIV: +        return 1; +    case ASR_MODE_TRAP: +        return 2; +    case ASR_MODE_EXTN: +        return 3; +    case ASR_MODE_INTR: +        return 4; +    } +    cpu_abort(CPU(cpu), "Bad mode %x\n", mode); +    return -1; +} + +void switch_mode(CPUUniCore32State *env, int mode) +{ +    int old_mode; +    int i; + +    old_mode = env->uncached_asr & ASR_M; +    if (mode == old_mode) { +        return; +    } + +    i = bank_number(env, old_mode); +    env->banked_r29[i] = env->regs[29]; +    env->banked_r30[i] = env->regs[30]; +    env->banked_bsr[i] = env->bsr; + +    i = bank_number(env, mode); +    env->regs[29] = env->banked_r29[i]; +    env->regs[30] = env->banked_r30[i]; +    env->bsr = env->banked_bsr[i]; +} + +/* Handle a CPU exception.  */ +void uc32_cpu_do_interrupt(CPUState *cs) +{ +    UniCore32CPU *cpu = UNICORE32_CPU(cs); +    CPUUniCore32State *env = &cpu->env; +    uint32_t addr; +    int new_mode; + +    switch (cs->exception_index) { +    case UC32_EXCP_PRIV: +        new_mode = ASR_MODE_PRIV; +        addr = 0x08; +        break; +    case UC32_EXCP_ITRAP: +        DPRINTF("itrap happened at %x\n", env->regs[31]); +        new_mode = ASR_MODE_TRAP; +        addr = 0x0c; +        break; +    case UC32_EXCP_DTRAP: +        DPRINTF("dtrap happened at %x\n", env->regs[31]); +        new_mode = ASR_MODE_TRAP; +        addr = 0x10; +        break; +    case UC32_EXCP_INTR: +        new_mode = ASR_MODE_INTR; +        addr = 0x18; +        break; +    default: +        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); +        return; +    } +    /* High vectors.  */ +    if (env->cp0.c1_sys & (1 << 13)) { +        addr += 0xffff0000; +    } + +    switch_mode(env, new_mode); +    env->bsr = cpu_asr_read(env); +    env->uncached_asr = (env->uncached_asr & ~ASR_M) | new_mode; +    env->uncached_asr |= ASR_I; +    /* The PC already points to the proper instruction.  */ +    env->regs[30] = env->regs[31]; +    env->regs[31] = addr; +    cs->interrupt_request |= CPU_INTERRUPT_EXITTB; +} + +static int get_phys_addr_ucv2(CPUUniCore32State *env, uint32_t address, +        int access_type, int is_user, uint32_t *phys_ptr, int *prot, +        target_ulong *page_size) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    CPUState *cs = CPU(cpu); +    int code; +    uint32_t table; +    uint32_t desc; +    uint32_t phys_addr; + +    /* Pagetable walk.  */ +    /* Lookup l1 descriptor.  */ +    table = env->cp0.c2_base & 0xfffff000; +    table |= (address >> 20) & 0xffc; +    desc = ldl_phys(cs->as, table); +    code = 0; +    switch (PAGETABLE_TYPE(desc)) { +    case 3: +        /* Superpage  */ +        if (!(desc & UC32_PAGETABLE_EXIST)) { +            code = 0x0b; /* superpage miss */ +            goto do_fault; +        } +        phys_addr = (desc & 0xffc00000) | (address & 0x003fffff); +        *page_size = SUPERPAGE_SIZE; +        break; +    case 0: +        /* Lookup l2 entry.  */ +        if (is_user) { +            DPRINTF("PGD address %x, desc %x\n", table, desc); +        } +        if (!(desc & UC32_PAGETABLE_EXIST)) { +            code = 0x05; /* second pagetable miss */ +            goto do_fault; +        } +        table = (desc & 0xfffff000) | ((address >> 10) & 0xffc); +        desc = ldl_phys(cs->as, table); +        /* 4k page.  */ +        if (is_user) { +            DPRINTF("PTE address %x, desc %x\n", table, desc); +        } +        if (!(desc & UC32_PAGETABLE_EXIST)) { +            code = 0x08; /* page miss */ +            goto do_fault; +        } +        switch (PAGETABLE_TYPE(desc)) { +        case 0: +            phys_addr = (desc & 0xfffff000) | (address & 0xfff); +            *page_size = TARGET_PAGE_SIZE; +            break; +        default: +            cpu_abort(CPU(cpu), "wrong page type!"); +        } +        break; +    default: +        cpu_abort(CPU(cpu), "wrong page type!"); +    } + +    *phys_ptr = phys_addr; +    *prot = 0; +    /* Check access permissions.  */ +    if (desc & UC32_PAGETABLE_READ) { +        *prot |= PAGE_READ; +    } else { +        if (is_user && (access_type == 0)) { +            code = 0x11; /* access unreadable area */ +            goto do_fault; +        } +    } + +    if (desc & UC32_PAGETABLE_WRITE) { +        *prot |= PAGE_WRITE; +    } else { +        if (is_user && (access_type == 1)) { +            code = 0x12; /* access unwritable area */ +            goto do_fault; +        } +    } + +    if (desc & UC32_PAGETABLE_EXEC) { +        *prot |= PAGE_EXEC; +    } else { +        if (is_user && (access_type == 2)) { +            code = 0x13; /* access unexecutable area */ +            goto do_fault; +        } +    } + +do_fault: +    return code; +} + +int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, +                              int access_type, int mmu_idx) +{ +    UniCore32CPU *cpu = UNICORE32_CPU(cs); +    CPUUniCore32State *env = &cpu->env; +    uint32_t phys_addr; +    target_ulong page_size; +    int prot; +    int ret, is_user; + +    ret = 1; +    is_user = mmu_idx == MMU_USER_IDX; + +    if ((env->cp0.c1_sys & 1) == 0) { +        /* MMU disabled.  */ +        phys_addr = address; +        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; +        page_size = TARGET_PAGE_SIZE; +        ret = 0; +    } else { +        if ((address & (1 << 31)) || (is_user)) { +            ret = get_phys_addr_ucv2(env, address, access_type, is_user, +                                    &phys_addr, &prot, &page_size); +            if (is_user) { +                DPRINTF("user space access: ret %x, address %" VADDR_PRIx ", " +                        "access_type %x, phys_addr %x, prot %x\n", +                        ret, address, access_type, phys_addr, prot); +            } +        } else { +            /*IO memory */ +            phys_addr = address | (1 << 31); +            prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; +            page_size = TARGET_PAGE_SIZE; +            ret = 0; +        } +    } + +    if (ret == 0) { +        /* Map a single page.  */ +        phys_addr &= TARGET_PAGE_MASK; +        address &= TARGET_PAGE_MASK; +        tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size); +        return 0; +    } + +    env->cp0.c3_faultstatus = ret; +    env->cp0.c4_faultaddr = address; +    if (access_type == 2) { +        cs->exception_index = UC32_EXCP_ITRAP; +    } else { +        cs->exception_index = UC32_EXCP_DTRAP; +    } +    return ret; +} + +hwaddr uc32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ +    UniCore32CPU *cpu = UNICORE32_CPU(cs); + +    cpu_abort(CPU(cpu), "%s not supported yet\n", __func__); +    return addr; +} diff --git a/target-unicore32/translate.c b/target-unicore32/translate.c new file mode 100644 index 00000000..2fc78e6f --- /dev/null +++ b/target-unicore32/translate.c @@ -0,0 +1,2139 @@ +/* + *  UniCore32 translation + * + * Copyright (C) 2010-2012 Guan Xuetao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation, or (at your option) any + * later version. See the COPYING file in the top-level directory. + */ +#include <stdarg.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <inttypes.h> + +#include "cpu.h" +#include "disas/disas.h" +#include "tcg-op.h" +#include "qemu/log.h" +#include "exec/cpu_ldst.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "trace-tcg.h" + + +/* internal defines */ +typedef struct DisasContext { +    target_ulong pc; +    int is_jmp; +    /* Nonzero if this instruction has been conditionally skipped.  */ +    int condjmp; +    /* The label that will be jumped to when the instruction is skipped.  */ +    TCGLabel *condlabel; +    struct TranslationBlock *tb; +    int singlestep_enabled; +#ifndef CONFIG_USER_ONLY +    int user; +#endif +} DisasContext; + +#ifndef CONFIG_USER_ONLY +#define IS_USER(s)      (s->user) +#else +#define IS_USER(s)      1 +#endif + +/* These instructions trap after executing, so defer them until after the +   conditional executions state has been updated.  */ +#define DISAS_SYSCALL 5 + +static TCGv_ptr cpu_env; +static TCGv_i32 cpu_R[32]; + +/* FIXME:  These should be removed.  */ +static TCGv cpu_F0s, cpu_F1s; +static TCGv_i64 cpu_F0d, cpu_F1d; + +#include "exec/gen-icount.h" + +static const char *regnames[] = { +      "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07", +      "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15", +      "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", +      "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" }; + +/* initialize TCG globals.  */ +void uc32_translate_init(void) +{ +    int i; + +    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); + +    for (i = 0; i < 32; i++) { +        cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0, +                                offsetof(CPUUniCore32State, regs[i]), regnames[i]); +    } +} + +static int num_temps; + +/* Allocate a temporary variable.  */ +static TCGv_i32 new_tmp(void) +{ +    num_temps++; +    return tcg_temp_new_i32(); +} + +/* Release a temporary variable.  */ +static void dead_tmp(TCGv tmp) +{ +    tcg_temp_free(tmp); +    num_temps--; +} + +static inline TCGv load_cpu_offset(int offset) +{ +    TCGv tmp = new_tmp(); +    tcg_gen_ld_i32(tmp, cpu_env, offset); +    return tmp; +} + +#define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name)) + +static inline void store_cpu_offset(TCGv var, int offset) +{ +    tcg_gen_st_i32(var, cpu_env, offset); +    dead_tmp(var); +} + +#define store_cpu_field(var, name) \ +    store_cpu_offset(var, offsetof(CPUUniCore32State, name)) + +/* Set a variable to the value of a CPU register.  */ +static void load_reg_var(DisasContext *s, TCGv var, int reg) +{ +    if (reg == 31) { +        uint32_t addr; +        /* normaly, since we updated PC */ +        addr = (long)s->pc; +        tcg_gen_movi_i32(var, addr); +    } else { +        tcg_gen_mov_i32(var, cpu_R[reg]); +    } +} + +/* Create a new temporary and set it to the value of a CPU register.  */ +static inline TCGv load_reg(DisasContext *s, int reg) +{ +    TCGv tmp = new_tmp(); +    load_reg_var(s, tmp, reg); +    return tmp; +} + +/* Set a CPU register.  The source must be a temporary and will be +   marked as dead.  */ +static void store_reg(DisasContext *s, int reg, TCGv var) +{ +    if (reg == 31) { +        tcg_gen_andi_i32(var, var, ~3); +        s->is_jmp = DISAS_JUMP; +    } +    tcg_gen_mov_i32(cpu_R[reg], var); +    dead_tmp(var); +} + +/* Value extensions.  */ +#define gen_uxtb(var)           tcg_gen_ext8u_i32(var, var) +#define gen_uxth(var)           tcg_gen_ext16u_i32(var, var) +#define gen_sxtb(var)           tcg_gen_ext8s_i32(var, var) +#define gen_sxth(var)           tcg_gen_ext16s_i32(var, var) + +#define UCOP_REG_M              (((insn) >>  0) & 0x1f) +#define UCOP_REG_N              (((insn) >> 19) & 0x1f) +#define UCOP_REG_D              (((insn) >> 14) & 0x1f) +#define UCOP_REG_S              (((insn) >>  9) & 0x1f) +#define UCOP_REG_LO             (((insn) >> 14) & 0x1f) +#define UCOP_REG_HI             (((insn) >>  9) & 0x1f) +#define UCOP_SH_OP              (((insn) >>  6) & 0x03) +#define UCOP_SH_IM              (((insn) >>  9) & 0x1f) +#define UCOP_OPCODES            (((insn) >> 25) & 0x0f) +#define UCOP_IMM_9              (((insn) >>  0) & 0x1ff) +#define UCOP_IMM10              (((insn) >>  0) & 0x3ff) +#define UCOP_IMM14              (((insn) >>  0) & 0x3fff) +#define UCOP_COND               (((insn) >> 25) & 0x0f) +#define UCOP_CMOV_COND          (((insn) >> 19) & 0x0f) +#define UCOP_CPNUM              (((insn) >> 10) & 0x0f) +#define UCOP_UCF64_FMT          (((insn) >> 24) & 0x03) +#define UCOP_UCF64_FUNC         (((insn) >>  6) & 0x0f) +#define UCOP_UCF64_COND         (((insn) >>  6) & 0x0f) + +#define UCOP_SET(i)             ((insn) & (1 << (i))) +#define UCOP_SET_P              UCOP_SET(28) +#define UCOP_SET_U              UCOP_SET(27) +#define UCOP_SET_B              UCOP_SET(26) +#define UCOP_SET_W              UCOP_SET(25) +#define UCOP_SET_L              UCOP_SET(24) +#define UCOP_SET_S              UCOP_SET(24) + +#define ILLEGAL         cpu_abort(CPU(cpu),                             \ +                        "Illegal UniCore32 instruction %x at line %d!", \ +                        insn, __LINE__) + +#ifndef CONFIG_USER_ONLY +static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s, +        uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    TCGv tmp, tmp2, tmp3; +    if ((insn & 0xfe000000) == 0xe0000000) { +        tmp2 = new_tmp(); +        tmp3 = new_tmp(); +        tcg_gen_movi_i32(tmp2, UCOP_REG_N); +        tcg_gen_movi_i32(tmp3, UCOP_IMM10); +        if (UCOP_SET_L) { +            tmp = new_tmp(); +            gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3); +            store_reg(s, UCOP_REG_D, tmp); +        } else { +            tmp = load_reg(s, UCOP_REG_D); +            gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3); +            dead_tmp(tmp); +        } +        dead_tmp(tmp2); +        dead_tmp(tmp3); +        return; +    } +    ILLEGAL; +} + +static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s, +        uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    TCGv tmp; + +    if ((insn & 0xff003fff) == 0xe1000400) { +        /* +         * movc rd, pp.nn, #imm9 +         *      rd: UCOP_REG_D +         *      nn: UCOP_REG_N (must be 0) +         *      imm9: 0 +         */ +        if (UCOP_REG_N == 0) { +            tmp = new_tmp(); +            tcg_gen_movi_i32(tmp, 0); +            store_reg(s, UCOP_REG_D, tmp); +            return; +        } else { +            ILLEGAL; +        } +    } +    if ((insn & 0xff003fff) == 0xe0000401) { +        /* +         * movc pp.nn, rn, #imm9 +         *      rn: UCOP_REG_D +         *      nn: UCOP_REG_N (must be 1) +         *      imm9: 1 +         */ +        if (UCOP_REG_N == 1) { +            tmp = load_reg(s, UCOP_REG_D); +            gen_helper_cp1_putc(tmp); +            dead_tmp(tmp); +            return; +        } else { +            ILLEGAL; +        } +    } +    ILLEGAL; +} +#endif + +static inline void gen_set_asr(TCGv var, uint32_t mask) +{ +    TCGv tmp_mask = tcg_const_i32(mask); +    gen_helper_asr_write(cpu_env, var, tmp_mask); +    tcg_temp_free_i32(tmp_mask); +} +/* Set NZCV flags from the high 4 bits of var.  */ +#define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV) + +static void gen_exception(int excp) +{ +    TCGv tmp = new_tmp(); +    tcg_gen_movi_i32(tmp, excp); +    gen_helper_exception(cpu_env, tmp); +    dead_tmp(tmp); +} + +#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF)) + +/* Set CF to the top bit of var.  */ +static void gen_set_CF_bit31(TCGv var) +{ +    TCGv tmp = new_tmp(); +    tcg_gen_shri_i32(tmp, var, 31); +    gen_set_CF(tmp); +    dead_tmp(tmp); +} + +/* Set N and Z flags from var.  */ +static inline void gen_logic_CC(TCGv var) +{ +    tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF)); +    tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF)); +} + +/* dest = T0 + T1 + CF. */ +static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1) +{ +    TCGv tmp; +    tcg_gen_add_i32(dest, t0, t1); +    tmp = load_cpu_field(CF); +    tcg_gen_add_i32(dest, dest, tmp); +    dead_tmp(tmp); +} + +/* dest = T0 - T1 + CF - 1.  */ +static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1) +{ +    TCGv tmp; +    tcg_gen_sub_i32(dest, t0, t1); +    tmp = load_cpu_field(CF); +    tcg_gen_add_i32(dest, dest, tmp); +    tcg_gen_subi_i32(dest, dest, 1); +    dead_tmp(tmp); +} + +static void shifter_out_im(TCGv var, int shift) +{ +    TCGv tmp = new_tmp(); +    if (shift == 0) { +        tcg_gen_andi_i32(tmp, var, 1); +    } else { +        tcg_gen_shri_i32(tmp, var, shift); +        if (shift != 31) { +            tcg_gen_andi_i32(tmp, tmp, 1); +        } +    } +    gen_set_CF(tmp); +    dead_tmp(tmp); +} + +/* Shift by immediate.  Includes special handling for shift == 0.  */ +static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift, +        int flags) +{ +    switch (shiftop) { +    case 0: /* LSL */ +        if (shift != 0) { +            if (flags) { +                shifter_out_im(var, 32 - shift); +            } +            tcg_gen_shli_i32(var, var, shift); +        } +        break; +    case 1: /* LSR */ +        if (shift == 0) { +            if (flags) { +                tcg_gen_shri_i32(var, var, 31); +                gen_set_CF(var); +            } +            tcg_gen_movi_i32(var, 0); +        } else { +            if (flags) { +                shifter_out_im(var, shift - 1); +            } +            tcg_gen_shri_i32(var, var, shift); +        } +        break; +    case 2: /* ASR */ +        if (shift == 0) { +            shift = 32; +        } +        if (flags) { +            shifter_out_im(var, shift - 1); +        } +        if (shift == 32) { +            shift = 31; +        } +        tcg_gen_sari_i32(var, var, shift); +        break; +    case 3: /* ROR/RRX */ +        if (shift != 0) { +            if (flags) { +                shifter_out_im(var, shift - 1); +            } +            tcg_gen_rotri_i32(var, var, shift); break; +        } else { +            TCGv tmp = load_cpu_field(CF); +            if (flags) { +                shifter_out_im(var, 0); +            } +            tcg_gen_shri_i32(var, var, 1); +            tcg_gen_shli_i32(tmp, tmp, 31); +            tcg_gen_or_i32(var, var, tmp); +            dead_tmp(tmp); +        } +    } +}; + +static inline void gen_uc32_shift_reg(TCGv var, int shiftop, +                                     TCGv shift, int flags) +{ +    if (flags) { +        switch (shiftop) { +        case 0: +            gen_helper_shl_cc(var, cpu_env, var, shift); +            break; +        case 1: +            gen_helper_shr_cc(var, cpu_env, var, shift); +            break; +        case 2: +            gen_helper_sar_cc(var, cpu_env, var, shift); +            break; +        case 3: +            gen_helper_ror_cc(var, cpu_env, var, shift); +            break; +        } +    } else { +        switch (shiftop) { +        case 0: +            gen_helper_shl(var, var, shift); +            break; +        case 1: +            gen_helper_shr(var, var, shift); +            break; +        case 2: +            gen_helper_sar(var, var, shift); +            break; +        case 3: +            tcg_gen_andi_i32(shift, shift, 0x1f); +            tcg_gen_rotr_i32(var, var, shift); +            break; +        } +    } +    dead_tmp(shift); +} + +static void gen_test_cc(int cc, TCGLabel *label) +{ +    TCGv tmp; +    TCGv tmp2; +    TCGLabel *inv; + +    switch (cc) { +    case 0: /* eq: Z */ +        tmp = load_cpu_field(ZF); +        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); +        break; +    case 1: /* ne: !Z */ +        tmp = load_cpu_field(ZF); +        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); +        break; +    case 2: /* cs: C */ +        tmp = load_cpu_field(CF); +        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); +        break; +    case 3: /* cc: !C */ +        tmp = load_cpu_field(CF); +        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); +        break; +    case 4: /* mi: N */ +        tmp = load_cpu_field(NF); +        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); +        break; +    case 5: /* pl: !N */ +        tmp = load_cpu_field(NF); +        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); +        break; +    case 6: /* vs: V */ +        tmp = load_cpu_field(VF); +        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); +        break; +    case 7: /* vc: !V */ +        tmp = load_cpu_field(VF); +        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); +        break; +    case 8: /* hi: C && !Z */ +        inv = gen_new_label(); +        tmp = load_cpu_field(CF); +        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); +        dead_tmp(tmp); +        tmp = load_cpu_field(ZF); +        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); +        gen_set_label(inv); +        break; +    case 9: /* ls: !C || Z */ +        tmp = load_cpu_field(CF); +        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); +        dead_tmp(tmp); +        tmp = load_cpu_field(ZF); +        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); +        break; +    case 10: /* ge: N == V -> N ^ V == 0 */ +        tmp = load_cpu_field(VF); +        tmp2 = load_cpu_field(NF); +        tcg_gen_xor_i32(tmp, tmp, tmp2); +        dead_tmp(tmp2); +        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); +        break; +    case 11: /* lt: N != V -> N ^ V != 0 */ +        tmp = load_cpu_field(VF); +        tmp2 = load_cpu_field(NF); +        tcg_gen_xor_i32(tmp, tmp, tmp2); +        dead_tmp(tmp2); +        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); +        break; +    case 12: /* gt: !Z && N == V */ +        inv = gen_new_label(); +        tmp = load_cpu_field(ZF); +        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); +        dead_tmp(tmp); +        tmp = load_cpu_field(VF); +        tmp2 = load_cpu_field(NF); +        tcg_gen_xor_i32(tmp, tmp, tmp2); +        dead_tmp(tmp2); +        tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); +        gen_set_label(inv); +        break; +    case 13: /* le: Z || N != V */ +        tmp = load_cpu_field(ZF); +        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); +        dead_tmp(tmp); +        tmp = load_cpu_field(VF); +        tmp2 = load_cpu_field(NF); +        tcg_gen_xor_i32(tmp, tmp, tmp2); +        dead_tmp(tmp2); +        tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); +        break; +    default: +        fprintf(stderr, "Bad condition code 0x%x\n", cc); +        abort(); +    } +    dead_tmp(tmp); +} + +static const uint8_t table_logic_cc[16] = { +    1, /* and */    1, /* xor */    0, /* sub */    0, /* rsb */ +    0, /* add */    0, /* adc */    0, /* sbc */    0, /* rsc */ +    1, /* andl */   1, /* xorl */   0, /* cmp */    0, /* cmn */ +    1, /* orr */    1, /* mov */    1, /* bic */    1, /* mvn */ +}; + +/* Set PC state from an immediate address.  */ +static inline void gen_bx_im(DisasContext *s, uint32_t addr) +{ +    s->is_jmp = DISAS_UPDATE; +    tcg_gen_movi_i32(cpu_R[31], addr & ~3); +} + +/* Set PC state from var.  var is marked as dead.  */ +static inline void gen_bx(DisasContext *s, TCGv var) +{ +    s->is_jmp = DISAS_UPDATE; +    tcg_gen_andi_i32(cpu_R[31], var, ~3); +    dead_tmp(var); +} + +static inline void store_reg_bx(DisasContext *s, int reg, TCGv var) +{ +    store_reg(s, reg, var); +} + +static inline TCGv gen_ld8s(TCGv addr, int index) +{ +    TCGv tmp = new_tmp(); +    tcg_gen_qemu_ld8s(tmp, addr, index); +    return tmp; +} + +static inline TCGv gen_ld8u(TCGv addr, int index) +{ +    TCGv tmp = new_tmp(); +    tcg_gen_qemu_ld8u(tmp, addr, index); +    return tmp; +} + +static inline TCGv gen_ld16s(TCGv addr, int index) +{ +    TCGv tmp = new_tmp(); +    tcg_gen_qemu_ld16s(tmp, addr, index); +    return tmp; +} + +static inline TCGv gen_ld16u(TCGv addr, int index) +{ +    TCGv tmp = new_tmp(); +    tcg_gen_qemu_ld16u(tmp, addr, index); +    return tmp; +} + +static inline TCGv gen_ld32(TCGv addr, int index) +{ +    TCGv tmp = new_tmp(); +    tcg_gen_qemu_ld32u(tmp, addr, index); +    return tmp; +} + +static inline void gen_st8(TCGv val, TCGv addr, int index) +{ +    tcg_gen_qemu_st8(val, addr, index); +    dead_tmp(val); +} + +static inline void gen_st16(TCGv val, TCGv addr, int index) +{ +    tcg_gen_qemu_st16(val, addr, index); +    dead_tmp(val); +} + +static inline void gen_st32(TCGv val, TCGv addr, int index) +{ +    tcg_gen_qemu_st32(val, addr, index); +    dead_tmp(val); +} + +static inline void gen_set_pc_im(uint32_t val) +{ +    tcg_gen_movi_i32(cpu_R[31], val); +} + +/* Force a TB lookup after an instruction that changes the CPU state.  */ +static inline void gen_lookup_tb(DisasContext *s) +{ +    tcg_gen_movi_i32(cpu_R[31], s->pc & ~1); +    s->is_jmp = DISAS_UPDATE; +} + +static inline void gen_add_data_offset(DisasContext *s, unsigned int insn, +        TCGv var) +{ +    int val; +    TCGv offset; + +    if (UCOP_SET(29)) { +        /* immediate */ +        val = UCOP_IMM14; +        if (!UCOP_SET_U) { +            val = -val; +        } +        if (val != 0) { +            tcg_gen_addi_i32(var, var, val); +        } +    } else { +        /* shift/register */ +        offset = load_reg(s, UCOP_REG_M); +        gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0); +        if (!UCOP_SET_U) { +            tcg_gen_sub_i32(var, var, offset); +        } else { +            tcg_gen_add_i32(var, var, offset); +        } +        dead_tmp(offset); +    } +} + +static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn, +        TCGv var) +{ +    int val; +    TCGv offset; + +    if (UCOP_SET(26)) { +        /* immediate */ +        val = (insn & 0x1f) | ((insn >> 4) & 0x3e0); +        if (!UCOP_SET_U) { +            val = -val; +        } +        if (val != 0) { +            tcg_gen_addi_i32(var, var, val); +        } +    } else { +        /* register */ +        offset = load_reg(s, UCOP_REG_M); +        if (!UCOP_SET_U) { +            tcg_gen_sub_i32(var, var, offset); +        } else { +            tcg_gen_add_i32(var, var, offset); +        } +        dead_tmp(offset); +    } +} + +static inline long ucf64_reg_offset(int reg) +{ +    if (reg & 1) { +        return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1]) +          + offsetof(CPU_DoubleU, l.upper); +    } else { +        return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1]) +          + offsetof(CPU_DoubleU, l.lower); +    } +} + +#define ucf64_gen_ld32(reg)      load_cpu_offset(ucf64_reg_offset(reg)) +#define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg)) + +/* UniCore-F64 single load/store I_offset */ +static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    int offset; +    TCGv tmp; +    TCGv addr; + +    addr = load_reg(s, UCOP_REG_N); +    if (!UCOP_SET_P && !UCOP_SET_W) { +        ILLEGAL; +    } + +    if (UCOP_SET_P) { +        offset = UCOP_IMM10 << 2; +        if (!UCOP_SET_U) { +            offset = -offset; +        } +        if (offset != 0) { +            tcg_gen_addi_i32(addr, addr, offset); +        } +    } + +    if (UCOP_SET_L) { /* load */ +        tmp = gen_ld32(addr, IS_USER(s)); +        ucf64_gen_st32(tmp, UCOP_REG_D); +    } else { /* store */ +        tmp = ucf64_gen_ld32(UCOP_REG_D); +        gen_st32(tmp, addr, IS_USER(s)); +    } + +    if (!UCOP_SET_P) { +        offset = UCOP_IMM10 << 2; +        if (!UCOP_SET_U) { +            offset = -offset; +        } +        if (offset != 0) { +            tcg_gen_addi_i32(addr, addr, offset); +        } +    } +    if (UCOP_SET_W) { +        store_reg(s, UCOP_REG_N, addr); +    } else { +        dead_tmp(addr); +    } +} + +/* UniCore-F64 load/store multiple words */ +static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    unsigned int i; +    int j, n, freg; +    TCGv tmp; +    TCGv addr; + +    if (UCOP_REG_D != 0) { +        ILLEGAL; +    } +    if (UCOP_REG_N == 31) { +        ILLEGAL; +    } +    if ((insn << 24) == 0) { +        ILLEGAL; +    } + +    addr = load_reg(s, UCOP_REG_N); + +    n = 0; +    for (i = 0; i < 8; i++) { +        if (UCOP_SET(i)) { +            n++; +        } +    } + +    if (UCOP_SET_U) { +        if (UCOP_SET_P) { /* pre increment */ +            tcg_gen_addi_i32(addr, addr, 4); +        } /* unnecessary to do anything when post increment */ +    } else { +        if (UCOP_SET_P) { /* pre decrement */ +            tcg_gen_addi_i32(addr, addr, -(n * 4)); +        } else { /* post decrement */ +            if (n != 1) { +                tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); +            } +        } +    } + +    freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */ + +    for (i = 0, j = 0; i < 8; i++, freg++) { +        if (!UCOP_SET(i)) { +            continue; +        } + +        if (UCOP_SET_L) { /* load */ +            tmp = gen_ld32(addr, IS_USER(s)); +            ucf64_gen_st32(tmp, freg); +        } else { /* store */ +            tmp = ucf64_gen_ld32(freg); +            gen_st32(tmp, addr, IS_USER(s)); +        } + +        j++; +        /* unnecessary to add after the last transfer */ +        if (j != n) { +            tcg_gen_addi_i32(addr, addr, 4); +        } +    } + +    if (UCOP_SET_W) { /* write back */ +        if (UCOP_SET_U) { +            if (!UCOP_SET_P) { /* post increment */ +                tcg_gen_addi_i32(addr, addr, 4); +            } /* unnecessary to do anything when pre increment */ +        } else { +            if (UCOP_SET_P) { +                /* pre decrement */ +                if (n != 1) { +                    tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); +                } +            } else { +                /* post decrement */ +                tcg_gen_addi_i32(addr, addr, -(n * 4)); +            } +        } +        store_reg(s, UCOP_REG_N, addr); +    } else { +        dead_tmp(addr); +    } +} + +/* UniCore-F64 mrc/mcr */ +static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    TCGv tmp; + +    if ((insn & 0xfe0003ff) == 0xe2000000) { +        /* control register */ +        if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) { +            ILLEGAL; +        } +        if (UCOP_SET(24)) { +            /* CFF */ +            tmp = new_tmp(); +            gen_helper_ucf64_get_fpscr(tmp, cpu_env); +            store_reg(s, UCOP_REG_D, tmp); +        } else { +            /* CTF */ +            tmp = load_reg(s, UCOP_REG_D); +            gen_helper_ucf64_set_fpscr(cpu_env, tmp); +            dead_tmp(tmp); +            gen_lookup_tb(s); +        } +        return; +    } +    if ((insn & 0xfe0003ff) == 0xe0000000) { +        /* general register */ +        if (UCOP_REG_D == 31) { +            ILLEGAL; +        } +        if (UCOP_SET(24)) { /* MFF */ +            tmp = ucf64_gen_ld32(UCOP_REG_N); +            store_reg(s, UCOP_REG_D, tmp); +        } else { /* MTF */ +            tmp = load_reg(s, UCOP_REG_D); +            ucf64_gen_st32(tmp, UCOP_REG_N); +        } +        return; +    } +    if ((insn & 0xfb000000) == 0xe9000000) { +        /* MFFC */ +        if (UCOP_REG_D != 31) { +            ILLEGAL; +        } +        if (UCOP_UCF64_COND & 0x8) { +            ILLEGAL; +        } + +        tmp = new_tmp(); +        tcg_gen_movi_i32(tmp, UCOP_UCF64_COND); +        if (UCOP_SET(26)) { +            tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N)); +            tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M)); +            gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env); +        } else { +            tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N)); +            tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); +            gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env); +        } +        dead_tmp(tmp); +        return; +    } +    ILLEGAL; +} + +/* UniCore-F64 convert instructions */ +static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); + +    if (UCOP_UCF64_FMT == 3) { +        ILLEGAL; +    } +    if (UCOP_REG_N != 0) { +        ILLEGAL; +    } +    switch (UCOP_UCF64_FUNC) { +    case 0: /* cvt.s */ +        switch (UCOP_UCF64_FMT) { +        case 1 /* d */: +            tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M)); +            gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env); +            tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D)); +            break; +        case 2 /* w */: +            tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); +            gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env); +            tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D)); +            break; +        default /* s */: +            ILLEGAL; +            break; +        } +        break; +    case 1: /* cvt.d */ +        switch (UCOP_UCF64_FMT) { +        case 0 /* s */: +            tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); +            gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env); +            tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D)); +            break; +        case 2 /* w */: +            tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); +            gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env); +            tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D)); +            break; +        default /* d */: +            ILLEGAL; +            break; +        } +        break; +    case 4: /* cvt.w */ +        switch (UCOP_UCF64_FMT) { +        case 0 /* s */: +            tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); +            gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env); +            tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D)); +            break; +        case 1 /* d */: +            tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M)); +            gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env); +            tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D)); +            break; +    default /* w */: +            ILLEGAL; +            break; +        } +        break; +    default: +        ILLEGAL; +    } +} + +/* UniCore-F64 compare instructions */ +static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); + +    if (UCOP_SET(25)) { +        ILLEGAL; +    } +    if (UCOP_REG_D != 0) { +        ILLEGAL; +    } + +    ILLEGAL; /* TODO */ +    if (UCOP_SET(24)) { +        tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N)); +        tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M)); +        /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */ +    } else { +        tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N)); +        tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M)); +        /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */ +    } +} + +#define gen_helper_ucf64_movs(x, y)      do { } while (0) +#define gen_helper_ucf64_movd(x, y)      do { } while (0) + +#define UCF64_OP1(name)    do {                           \ +        if (UCOP_REG_N != 0) {                            \ +            ILLEGAL;                                      \ +        }                                                 \ +        switch (UCOP_UCF64_FMT) {                         \ +        case 0 /* s */:                                   \ +            tcg_gen_ld_i32(cpu_F0s, cpu_env,              \ +                           ucf64_reg_offset(UCOP_REG_M)); \ +            gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \ +            tcg_gen_st_i32(cpu_F0s, cpu_env,              \ +                           ucf64_reg_offset(UCOP_REG_D)); \ +            break;                                        \ +        case 1 /* d */:                                   \ +            tcg_gen_ld_i64(cpu_F0d, cpu_env,              \ +                           ucf64_reg_offset(UCOP_REG_M)); \ +            gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \ +            tcg_gen_st_i64(cpu_F0d, cpu_env,              \ +                           ucf64_reg_offset(UCOP_REG_D)); \ +            break;                                        \ +        case 2 /* w */:                                   \ +            ILLEGAL;                                      \ +            break;                                        \ +        }                                                 \ +    } while (0) + +#define UCF64_OP2(name)    do {                           \ +        switch (UCOP_UCF64_FMT) {                         \ +        case 0 /* s */:                                   \ +            tcg_gen_ld_i32(cpu_F0s, cpu_env,              \ +                           ucf64_reg_offset(UCOP_REG_N)); \ +            tcg_gen_ld_i32(cpu_F1s, cpu_env,              \ +                           ucf64_reg_offset(UCOP_REG_M)); \ +            gen_helper_ucf64_##name##s(cpu_F0s,           \ +                           cpu_F0s, cpu_F1s, cpu_env);    \ +            tcg_gen_st_i32(cpu_F0s, cpu_env,              \ +                           ucf64_reg_offset(UCOP_REG_D)); \ +            break;                                        \ +        case 1 /* d */:                                   \ +            tcg_gen_ld_i64(cpu_F0d, cpu_env,              \ +                           ucf64_reg_offset(UCOP_REG_N)); \ +            tcg_gen_ld_i64(cpu_F1d, cpu_env,              \ +                           ucf64_reg_offset(UCOP_REG_M)); \ +            gen_helper_ucf64_##name##d(cpu_F0d,           \ +                           cpu_F0d, cpu_F1d, cpu_env);    \ +            tcg_gen_st_i64(cpu_F0d, cpu_env,              \ +                           ucf64_reg_offset(UCOP_REG_D)); \ +            break;                                        \ +        case 2 /* w */:                                   \ +            ILLEGAL;                                      \ +            break;                                        \ +        }                                                 \ +    } while (0) + +/* UniCore-F64 data processing */ +static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); + +    if (UCOP_UCF64_FMT == 3) { +        ILLEGAL; +    } +    switch (UCOP_UCF64_FUNC) { +    case 0: /* add */ +        UCF64_OP2(add); +        break; +    case 1: /* sub */ +        UCF64_OP2(sub); +        break; +    case 2: /* mul */ +        UCF64_OP2(mul); +        break; +    case 4: /* div */ +        UCF64_OP2(div); +        break; +    case 5: /* abs */ +        UCF64_OP1(abs); +        break; +    case 6: /* mov */ +        UCF64_OP1(mov); +        break; +    case 7: /* neg */ +        UCF64_OP1(neg); +        break; +    default: +        ILLEGAL; +    } +} + +/* Disassemble an F64 instruction */ +static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); + +    if (!UCOP_SET(29)) { +        if (UCOP_SET(26)) { +            do_ucf64_ldst_m(env, s, insn); +        } else { +            do_ucf64_ldst_i(env, s, insn); +        } +    } else { +        if (UCOP_SET(5)) { +            switch ((insn >> 26) & 0x3) { +            case 0: +                do_ucf64_datap(env, s, insn); +                break; +            case 1: +                ILLEGAL; +                break; +            case 2: +                do_ucf64_fcvt(env, s, insn); +                break; +            case 3: +                do_ucf64_fcmp(env, s, insn); +                break; +            } +        } else { +            do_ucf64_trans(env, s, insn); +        } +    } +} + +static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest) +{ +    TranslationBlock *tb; + +    tb = s->tb; +    if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { +        tcg_gen_goto_tb(n); +        gen_set_pc_im(dest); +        tcg_gen_exit_tb((uintptr_t)tb + n); +    } else { +        gen_set_pc_im(dest); +        tcg_gen_exit_tb(0); +    } +} + +static inline void gen_jmp(DisasContext *s, uint32_t dest) +{ +    if (unlikely(s->singlestep_enabled)) { +        /* An indirect jump so that we still trigger the debug exception.  */ +        gen_bx_im(s, dest); +    } else { +        gen_goto_tb(s, 0, dest); +        s->is_jmp = DISAS_TB_JUMP; +    } +} + +/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */ +static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0) +{ +    TCGv tmp; +    if (bsr) { +        /* ??? This is also undefined in system mode.  */ +        if (IS_USER(s)) { +            return 1; +        } + +        tmp = load_cpu_field(bsr); +        tcg_gen_andi_i32(tmp, tmp, ~mask); +        tcg_gen_andi_i32(t0, t0, mask); +        tcg_gen_or_i32(tmp, tmp, t0); +        store_cpu_field(tmp, bsr); +    } else { +        gen_set_asr(t0, mask); +    } +    dead_tmp(t0); +    gen_lookup_tb(s); +    return 0; +} + +/* Generate an old-style exception return. Marks pc as dead. */ +static void gen_exception_return(DisasContext *s, TCGv pc) +{ +    TCGv tmp; +    store_reg(s, 31, pc); +    tmp = load_cpu_field(bsr); +    gen_set_asr(tmp, 0xffffffff); +    dead_tmp(tmp); +    s->is_jmp = DISAS_UPDATE; +} + +static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s, +        uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); + +    switch (UCOP_CPNUM) { +#ifndef CONFIG_USER_ONLY +    case 0: +        disas_cp0_insn(env, s, insn); +        break; +    case 1: +        disas_ocd_insn(env, s, insn); +        break; +#endif +    case 2: +        disas_ucf64_insn(env, s, insn); +        break; +    default: +        /* Unknown coprocessor. */ +        cpu_abort(CPU(cpu), "Unknown coprocessor!"); +    } +} + +/* data processing instructions */ +static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    TCGv tmp; +    TCGv tmp2; +    int logic_cc; + +    if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) { +        if (UCOP_SET(23)) { /* CMOV instructions */ +            if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) { +                ILLEGAL; +            } +            /* if not always execute, we generate a conditional jump to +               next instruction */ +            s->condlabel = gen_new_label(); +            gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel); +            s->condjmp = 1; +        } +    } + +    logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24); + +    if (UCOP_SET(29)) { +        unsigned int val; +        /* immediate operand */ +        val = UCOP_IMM_9; +        if (UCOP_SH_IM) { +            val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM)); +        } +        tmp2 = new_tmp(); +        tcg_gen_movi_i32(tmp2, val); +        if (logic_cc && UCOP_SH_IM) { +            gen_set_CF_bit31(tmp2); +        } +   } else { +        /* register */ +        tmp2 = load_reg(s, UCOP_REG_M); +        if (UCOP_SET(5)) { +            tmp = load_reg(s, UCOP_REG_S); +            gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc); +        } else { +            gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc); +        } +    } + +    if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) { +        tmp = load_reg(s, UCOP_REG_N); +    } else { +        TCGV_UNUSED(tmp); +    } + +    switch (UCOP_OPCODES) { +    case 0x00: +        tcg_gen_and_i32(tmp, tmp, tmp2); +        if (logic_cc) { +            gen_logic_CC(tmp); +        } +        store_reg_bx(s, UCOP_REG_D, tmp); +        break; +    case 0x01: +        tcg_gen_xor_i32(tmp, tmp, tmp2); +        if (logic_cc) { +            gen_logic_CC(tmp); +        } +        store_reg_bx(s, UCOP_REG_D, tmp); +        break; +    case 0x02: +        if (UCOP_SET_S && UCOP_REG_D == 31) { +            /* SUBS r31, ... is used for exception return.  */ +            if (IS_USER(s)) { +                ILLEGAL; +            } +            gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); +            gen_exception_return(s, tmp); +        } else { +            if (UCOP_SET_S) { +                gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); +            } else { +                tcg_gen_sub_i32(tmp, tmp, tmp2); +            } +            store_reg_bx(s, UCOP_REG_D, tmp); +        } +        break; +    case 0x03: +        if (UCOP_SET_S) { +            gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp); +        } else { +            tcg_gen_sub_i32(tmp, tmp2, tmp); +        } +        store_reg_bx(s, UCOP_REG_D, tmp); +        break; +    case 0x04: +        if (UCOP_SET_S) { +            gen_helper_add_cc(tmp, cpu_env, tmp, tmp2); +        } else { +            tcg_gen_add_i32(tmp, tmp, tmp2); +        } +        store_reg_bx(s, UCOP_REG_D, tmp); +        break; +    case 0x05: +        if (UCOP_SET_S) { +            gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2); +        } else { +            gen_add_carry(tmp, tmp, tmp2); +        } +        store_reg_bx(s, UCOP_REG_D, tmp); +        break; +    case 0x06: +        if (UCOP_SET_S) { +            gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2); +        } else { +            gen_sub_carry(tmp, tmp, tmp2); +        } +        store_reg_bx(s, UCOP_REG_D, tmp); +        break; +    case 0x07: +        if (UCOP_SET_S) { +            gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp); +        } else { +            gen_sub_carry(tmp, tmp2, tmp); +        } +        store_reg_bx(s, UCOP_REG_D, tmp); +        break; +    case 0x08: +        if (UCOP_SET_S) { +            tcg_gen_and_i32(tmp, tmp, tmp2); +            gen_logic_CC(tmp); +        } +        dead_tmp(tmp); +        break; +    case 0x09: +        if (UCOP_SET_S) { +            tcg_gen_xor_i32(tmp, tmp, tmp2); +            gen_logic_CC(tmp); +        } +        dead_tmp(tmp); +        break; +    case 0x0a: +        if (UCOP_SET_S) { +            gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2); +        } +        dead_tmp(tmp); +        break; +    case 0x0b: +        if (UCOP_SET_S) { +            gen_helper_add_cc(tmp, cpu_env, tmp, tmp2); +        } +        dead_tmp(tmp); +        break; +    case 0x0c: +        tcg_gen_or_i32(tmp, tmp, tmp2); +        if (logic_cc) { +            gen_logic_CC(tmp); +        } +        store_reg_bx(s, UCOP_REG_D, tmp); +        break; +    case 0x0d: +        if (logic_cc && UCOP_REG_D == 31) { +            /* MOVS r31, ... is used for exception return.  */ +            if (IS_USER(s)) { +                ILLEGAL; +            } +            gen_exception_return(s, tmp2); +        } else { +            if (logic_cc) { +                gen_logic_CC(tmp2); +            } +            store_reg_bx(s, UCOP_REG_D, tmp2); +        } +        break; +    case 0x0e: +        tcg_gen_andc_i32(tmp, tmp, tmp2); +        if (logic_cc) { +            gen_logic_CC(tmp); +        } +        store_reg_bx(s, UCOP_REG_D, tmp); +        break; +    default: +    case 0x0f: +        tcg_gen_not_i32(tmp2, tmp2); +        if (logic_cc) { +            gen_logic_CC(tmp2); +        } +        store_reg_bx(s, UCOP_REG_D, tmp2); +        break; +    } +    if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) { +        dead_tmp(tmp2); +    } +} + +/* multiply */ +static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    TCGv tmp, tmp2, tmp3, tmp4; + +    if (UCOP_SET(27)) { +        /* 64 bit mul */ +        tmp = load_reg(s, UCOP_REG_M); +        tmp2 = load_reg(s, UCOP_REG_N); +        if (UCOP_SET(26)) { +            tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2); +        } else { +            tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2); +        } +        if (UCOP_SET(25)) { /* mult accumulate */ +            tmp3 = load_reg(s, UCOP_REG_LO); +            tmp4 = load_reg(s, UCOP_REG_HI); +            tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4); +            dead_tmp(tmp3); +            dead_tmp(tmp4); +        } +        store_reg(s, UCOP_REG_LO, tmp); +        store_reg(s, UCOP_REG_HI, tmp2); +    } else { +        /* 32 bit mul */ +        tmp = load_reg(s, UCOP_REG_M); +        tmp2 = load_reg(s, UCOP_REG_N); +        tcg_gen_mul_i32(tmp, tmp, tmp2); +        dead_tmp(tmp2); +        if (UCOP_SET(25)) { +            /* Add */ +            tmp2 = load_reg(s, UCOP_REG_S); +            tcg_gen_add_i32(tmp, tmp, tmp2); +            dead_tmp(tmp2); +        } +        if (UCOP_SET_S) { +            gen_logic_CC(tmp); +        } +        store_reg(s, UCOP_REG_D, tmp); +    } +} + +/* miscellaneous instructions */ +static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    unsigned int val; +    TCGv tmp; + +    if ((insn & 0xffffffe0) == 0x10ffc120) { +        /* Trivial implementation equivalent to bx.  */ +        tmp = load_reg(s, UCOP_REG_M); +        gen_bx(s, tmp); +        return; +    } + +    if ((insn & 0xfbffc000) == 0x30ffc000) { +        /* PSR = immediate */ +        val = UCOP_IMM_9; +        if (UCOP_SH_IM) { +            val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM)); +        } +        tmp = new_tmp(); +        tcg_gen_movi_i32(tmp, val); +        if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) { +            ILLEGAL; +        } +        return; +    } + +    if ((insn & 0xfbffffe0) == 0x12ffc020) { +        /* PSR.flag = reg */ +        tmp = load_reg(s, UCOP_REG_M); +        if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) { +            ILLEGAL; +        } +        return; +    } + +    if ((insn & 0xfbffffe0) == 0x10ffc020) { +        /* PSR = reg */ +        tmp = load_reg(s, UCOP_REG_M); +        if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) { +            ILLEGAL; +        } +        return; +    } + +    if ((insn & 0xfbf83fff) == 0x10f80000) { +        /* reg = PSR */ +        if (UCOP_SET_B) { +            if (IS_USER(s)) { +                ILLEGAL; +            } +            tmp = load_cpu_field(bsr); +        } else { +            tmp = new_tmp(); +            gen_helper_asr_read(tmp, cpu_env); +        } +        store_reg(s, UCOP_REG_D, tmp); +        return; +    } + +    if ((insn & 0xfbf83fe0) == 0x12f80120) { +        /* clz */ +        tmp = load_reg(s, UCOP_REG_M); +        if (UCOP_SET(26)) { +            gen_helper_clo(tmp, tmp); +        } else { +            gen_helper_clz(tmp, tmp); +        } +        store_reg(s, UCOP_REG_D, tmp); +        return; +    } + +    /* otherwise */ +    ILLEGAL; +} + +/* load/store I_offset and R_offset */ +static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    unsigned int mmu_idx; +    TCGv tmp; +    TCGv tmp2; + +    tmp2 = load_reg(s, UCOP_REG_N); +    mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W)); + +    /* immediate */ +    if (UCOP_SET_P) { +        gen_add_data_offset(s, insn, tmp2); +    } + +    if (UCOP_SET_L) { +        /* load */ +        if (UCOP_SET_B) { +            tmp = gen_ld8u(tmp2, mmu_idx); +        } else { +            tmp = gen_ld32(tmp2, mmu_idx); +        } +    } else { +        /* store */ +        tmp = load_reg(s, UCOP_REG_D); +        if (UCOP_SET_B) { +            gen_st8(tmp, tmp2, mmu_idx); +        } else { +            gen_st32(tmp, tmp2, mmu_idx); +        } +    } +    if (!UCOP_SET_P) { +        gen_add_data_offset(s, insn, tmp2); +        store_reg(s, UCOP_REG_N, tmp2); +    } else if (UCOP_SET_W) { +        store_reg(s, UCOP_REG_N, tmp2); +    } else { +        dead_tmp(tmp2); +    } +    if (UCOP_SET_L) { +        /* Complete the load.  */ +        if (UCOP_REG_D == 31) { +            gen_bx(s, tmp); +        } else { +            store_reg(s, UCOP_REG_D, tmp); +        } +    } +} + +/* SWP instruction */ +static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    TCGv addr; +    TCGv tmp; +    TCGv tmp2; + +    if ((insn & 0xff003fe0) != 0x40000120) { +        ILLEGAL; +    } + +    /* ??? This is not really atomic.  However we know +       we never have multiple CPUs running in parallel, +       so it is good enough.  */ +    addr = load_reg(s, UCOP_REG_N); +    tmp = load_reg(s, UCOP_REG_M); +    if (UCOP_SET_B) { +        tmp2 = gen_ld8u(addr, IS_USER(s)); +        gen_st8(tmp, addr, IS_USER(s)); +    } else { +        tmp2 = gen_ld32(addr, IS_USER(s)); +        gen_st32(tmp, addr, IS_USER(s)); +    } +    dead_tmp(addr); +    store_reg(s, UCOP_REG_D, tmp2); +} + +/* load/store hw/sb */ +static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    TCGv addr; +    TCGv tmp; + +    if (UCOP_SH_OP == 0) { +        do_swap(env, s, insn); +        return; +    } + +    addr = load_reg(s, UCOP_REG_N); +    if (UCOP_SET_P) { +        gen_add_datah_offset(s, insn, addr); +    } + +    if (UCOP_SET_L) { /* load */ +        switch (UCOP_SH_OP) { +        case 1: +            tmp = gen_ld16u(addr, IS_USER(s)); +            break; +        case 2: +            tmp = gen_ld8s(addr, IS_USER(s)); +            break; +        default: /* see do_swap */ +        case 3: +            tmp = gen_ld16s(addr, IS_USER(s)); +            break; +        } +    } else { /* store */ +        if (UCOP_SH_OP != 1) { +            ILLEGAL; +        } +        tmp = load_reg(s, UCOP_REG_D); +        gen_st16(tmp, addr, IS_USER(s)); +    } +    /* Perform base writeback before the loaded value to +       ensure correct behavior with overlapping index registers. */ +    if (!UCOP_SET_P) { +        gen_add_datah_offset(s, insn, addr); +        store_reg(s, UCOP_REG_N, addr); +    } else if (UCOP_SET_W) { +        store_reg(s, UCOP_REG_N, addr); +    } else { +        dead_tmp(addr); +    } +    if (UCOP_SET_L) { +        /* Complete the load.  */ +        store_reg(s, UCOP_REG_D, tmp); +    } +} + +/* load/store multiple words */ +static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    unsigned int val, i, mmu_idx; +    int j, n, reg, user, loaded_base; +    TCGv tmp; +    TCGv tmp2; +    TCGv addr; +    TCGv loaded_var; + +    if (UCOP_SET(7)) { +        ILLEGAL; +    } +    /* XXX: store correct base if write back */ +    user = 0; +    if (UCOP_SET_B) { /* S bit in instruction table */ +        if (IS_USER(s)) { +            ILLEGAL; /* only usable in supervisor mode */ +        } +        if (UCOP_SET(18) == 0) { /* pc reg */ +            user = 1; +        } +    } + +    mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W)); +    addr = load_reg(s, UCOP_REG_N); + +    /* compute total size */ +    loaded_base = 0; +    TCGV_UNUSED(loaded_var); +    n = 0; +    for (i = 0; i < 6; i++) { +        if (UCOP_SET(i)) { +            n++; +        } +    } +    for (i = 9; i < 19; i++) { +        if (UCOP_SET(i)) { +            n++; +        } +    } +    /* XXX: test invalid n == 0 case ? */ +    if (UCOP_SET_U) { +        if (UCOP_SET_P) { +            /* pre increment */ +            tcg_gen_addi_i32(addr, addr, 4); +        } else { +            /* post increment */ +        } +    } else { +        if (UCOP_SET_P) { +            /* pre decrement */ +            tcg_gen_addi_i32(addr, addr, -(n * 4)); +        } else { +            /* post decrement */ +            if (n != 1) { +                tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); +            } +        } +    } + +    j = 0; +    reg = UCOP_SET(6) ? 16 : 0; +    for (i = 0; i < 19; i++, reg++) { +        if (i == 6) { +            i = i + 3; +        } +        if (UCOP_SET(i)) { +            if (UCOP_SET_L) { /* load */ +                tmp = gen_ld32(addr, mmu_idx); +                if (reg == 31) { +                    gen_bx(s, tmp); +                } else if (user) { +                    tmp2 = tcg_const_i32(reg); +                    gen_helper_set_user_reg(cpu_env, tmp2, tmp); +                    tcg_temp_free_i32(tmp2); +                    dead_tmp(tmp); +                } else if (reg == UCOP_REG_N) { +                    loaded_var = tmp; +                    loaded_base = 1; +                } else { +                    store_reg(s, reg, tmp); +                } +            } else { /* store */ +                if (reg == 31) { +                    /* special case: r31 = PC + 4 */ +                    val = (long)s->pc; +                    tmp = new_tmp(); +                    tcg_gen_movi_i32(tmp, val); +                } else if (user) { +                    tmp = new_tmp(); +                    tmp2 = tcg_const_i32(reg); +                    gen_helper_get_user_reg(tmp, cpu_env, tmp2); +                    tcg_temp_free_i32(tmp2); +                } else { +                    tmp = load_reg(s, reg); +                } +                gen_st32(tmp, addr, mmu_idx); +            } +            j++; +            /* no need to add after the last transfer */ +            if (j != n) { +                tcg_gen_addi_i32(addr, addr, 4); +            } +        } +    } +    if (UCOP_SET_W) { /* write back */ +        if (UCOP_SET_U) { +            if (UCOP_SET_P) { +                /* pre increment */ +            } else { +                /* post increment */ +                tcg_gen_addi_i32(addr, addr, 4); +            } +        } else { +            if (UCOP_SET_P) { +                /* pre decrement */ +                if (n != 1) { +                    tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); +                } +            } else { +                /* post decrement */ +                tcg_gen_addi_i32(addr, addr, -(n * 4)); +            } +        } +        store_reg(s, UCOP_REG_N, addr); +    } else { +        dead_tmp(addr); +    } +    if (loaded_base) { +        store_reg(s, UCOP_REG_N, loaded_var); +    } +    if (UCOP_SET_B && !user) { +        /* Restore ASR from BSR.  */ +        tmp = load_cpu_field(bsr); +        gen_set_asr(tmp, 0xffffffff); +        dead_tmp(tmp); +        s->is_jmp = DISAS_UPDATE; +    } +} + +/* branch (and link) */ +static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    unsigned int val; +    int32_t offset; +    TCGv tmp; + +    if (UCOP_COND == 0xf) { +        ILLEGAL; +    } + +    if (UCOP_COND != 0xe) { +        /* if not always execute, we generate a conditional jump to +           next instruction */ +        s->condlabel = gen_new_label(); +        gen_test_cc(UCOP_COND ^ 1, s->condlabel); +        s->condjmp = 1; +    } + +    val = (int32_t)s->pc; +    if (UCOP_SET_L) { +        tmp = new_tmp(); +        tcg_gen_movi_i32(tmp, val); +        store_reg(s, 30, tmp); +    } +    offset = (((int32_t)insn << 8) >> 8); +    val += (offset << 2); /* unicore is pc+4 */ +    gen_jmp(s, val); +} + +static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    unsigned int insn; + +    if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { +        tcg_gen_debug_insn_start(s->pc); +    } + +    insn = cpu_ldl_code(env, s->pc); +    s->pc += 4; + +    /* UniCore instructions class: +     * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx +     * AAA  : see switch case +     * BBBB : opcodes or cond or PUBW +     * C    : S OR L +     * D    : 8 +     * E    : 5 +     */ +    switch (insn >> 29) { +    case 0x0: +        if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) { +            do_mult(env, s, insn); +            break; +        } + +        if (UCOP_SET(8)) { +            do_misc(env, s, insn); +            break; +        } +    case 0x1: +        if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) { +            do_misc(env, s, insn); +            break; +        } +        do_datap(env, s, insn); +        break; + +    case 0x2: +        if (UCOP_SET(8) && UCOP_SET(5)) { +            do_ldst_hwsb(env, s, insn); +            break; +        } +        if (UCOP_SET(8) || UCOP_SET(5)) { +            ILLEGAL; +        } +    case 0x3: +        do_ldst_ir(env, s, insn); +        break; + +    case 0x4: +        if (UCOP_SET(8)) { +            ILLEGAL; /* extended instructions */ +        } +        do_ldst_m(env, s, insn); +        break; +    case 0x5: +        do_branch(env, s, insn); +        break; +    case 0x6: +        /* Coprocessor.  */ +        disas_coproc_insn(env, s, insn); +        break; +    case 0x7: +        if (!UCOP_SET(28)) { +            disas_coproc_insn(env, s, insn); +            break; +        } +        if ((insn & 0xff000000) == 0xff000000) { /* syscall */ +            gen_set_pc_im(s->pc); +            s->is_jmp = DISAS_SYSCALL; +            break; +        } +        ILLEGAL; +    } +} + +/* generate intermediate code in gen_opc_buf and gen_opparam_buf for +   basic block 'tb'. If search_pc is TRUE, also generate PC +   information for each intermediate instruction. */ +static inline void gen_intermediate_code_internal(UniCore32CPU *cpu, +        TranslationBlock *tb, bool search_pc) +{ +    CPUState *cs = CPU(cpu); +    CPUUniCore32State *env = &cpu->env; +    DisasContext dc1, *dc = &dc1; +    CPUBreakpoint *bp; +    int j, lj; +    target_ulong pc_start; +    uint32_t next_page_start; +    int num_insns; +    int max_insns; + +    /* generate intermediate code */ +    num_temps = 0; + +    pc_start = tb->pc; + +    dc->tb = tb; + +    dc->is_jmp = DISAS_NEXT; +    dc->pc = pc_start; +    dc->singlestep_enabled = cs->singlestep_enabled; +    dc->condjmp = 0; +    cpu_F0s = tcg_temp_new_i32(); +    cpu_F1s = tcg_temp_new_i32(); +    cpu_F0d = tcg_temp_new_i64(); +    cpu_F1d = tcg_temp_new_i64(); +    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; +    lj = -1; +    num_insns = 0; +    max_insns = tb->cflags & CF_COUNT_MASK; +    if (max_insns == 0) { +        max_insns = CF_COUNT_MASK; +    } + +#ifndef CONFIG_USER_ONLY +    if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) { +        dc->user = 1; +    } else { +        dc->user = 0; +    } +#endif + +    gen_tb_start(tb); +    do { +        if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { +            QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { +                if (bp->pc == dc->pc) { +                    gen_set_pc_im(dc->pc); +                    gen_exception(EXCP_DEBUG); +                    dc->is_jmp = DISAS_JUMP; +                    /* Advance PC so that clearing the breakpoint will +                       invalidate this TB.  */ +                    dc->pc += 2; /* FIXME */ +                    goto done_generating; +                } +            } +        } +        if (search_pc) { +            j = tcg_op_buf_count(); +            if (lj < j) { +                lj++; +                while (lj < j) { +                    tcg_ctx.gen_opc_instr_start[lj++] = 0; +                } +            } +            tcg_ctx.gen_opc_pc[lj] = dc->pc; +            tcg_ctx.gen_opc_instr_start[lj] = 1; +            tcg_ctx.gen_opc_icount[lj] = num_insns; +        } + +        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) { +            gen_io_start(); +        } + +        disas_uc32_insn(env, dc); + +        if (num_temps) { +            fprintf(stderr, "Internal resource leak before %08x\n", dc->pc); +            num_temps = 0; +        } + +        if (dc->condjmp && !dc->is_jmp) { +            gen_set_label(dc->condlabel); +            dc->condjmp = 0; +        } +        /* Translation stops when a conditional branch is encountered. +         * Otherwise the subsequent code could get translated several times. +         * Also stop translation when a page boundary is reached.  This +         * ensures prefetch aborts occur at the right place.  */ +        num_insns++; +    } while (!dc->is_jmp && !tcg_op_buf_full() && +             !cs->singlestep_enabled && +             !singlestep && +             dc->pc < next_page_start && +             num_insns < max_insns); + +    if (tb->cflags & CF_LAST_IO) { +        if (dc->condjmp) { +            /* FIXME:  This can theoretically happen with self-modifying +               code.  */ +            cpu_abort(cs, "IO on conditional branch instruction"); +        } +        gen_io_end(); +    } + +    /* At this stage dc->condjmp will only be set when the skipped +       instruction was a conditional branch or trap, and the PC has +       already been written.  */ +    if (unlikely(cs->singlestep_enabled)) { +        /* Make sure the pc is updated, and raise a debug exception.  */ +        if (dc->condjmp) { +            if (dc->is_jmp == DISAS_SYSCALL) { +                gen_exception(UC32_EXCP_PRIV); +            } else { +                gen_exception(EXCP_DEBUG); +            } +            gen_set_label(dc->condlabel); +        } +        if (dc->condjmp || !dc->is_jmp) { +            gen_set_pc_im(dc->pc); +            dc->condjmp = 0; +        } +        if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) { +            gen_exception(UC32_EXCP_PRIV); +        } else { +            gen_exception(EXCP_DEBUG); +        } +    } else { +        /* While branches must always occur at the end of an IT block, +           there are a few other things that can cause us to terminate +           the TB in the middel of an IT block: +            - Exception generating instructions (bkpt, swi, undefined). +            - Page boundaries. +            - Hardware watchpoints. +           Hardware breakpoints have already been handled and skip this code. +         */ +        switch (dc->is_jmp) { +        case DISAS_NEXT: +            gen_goto_tb(dc, 1, dc->pc); +            break; +        default: +        case DISAS_JUMP: +        case DISAS_UPDATE: +            /* indicate that the hash table must be used to find the next TB */ +            tcg_gen_exit_tb(0); +            break; +        case DISAS_TB_JUMP: +            /* nothing more to generate */ +            break; +        case DISAS_SYSCALL: +            gen_exception(UC32_EXCP_PRIV); +            break; +        } +        if (dc->condjmp) { +            gen_set_label(dc->condlabel); +            gen_goto_tb(dc, 1, dc->pc); +            dc->condjmp = 0; +        } +    } + +done_generating: +    gen_tb_end(tb, num_insns); + +#ifdef DEBUG_DISAS +    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { +        qemu_log("----------------\n"); +        qemu_log("IN: %s\n", lookup_symbol(pc_start)); +        log_target_disas(cs, pc_start, dc->pc - pc_start, 0); +        qemu_log("\n"); +    } +#endif +    if (search_pc) { +        j = tcg_op_buf_count(); +        lj++; +        while (lj <= j) { +            tcg_ctx.gen_opc_instr_start[lj++] = 0; +        } +    } else { +        tb->size = dc->pc - pc_start; +        tb->icount = num_insns; +    } +} + +void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb) +{ +    gen_intermediate_code_internal(uc32_env_get_cpu(env), tb, false); +} + +void gen_intermediate_code_pc(CPUUniCore32State *env, TranslationBlock *tb) +{ +    gen_intermediate_code_internal(uc32_env_get_cpu(env), tb, true); +} + +static const char *cpu_mode_names[16] = { +    "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP", +    "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR" +}; + +#undef UCF64_DUMP_STATE +#ifdef UCF64_DUMP_STATE +static void cpu_dump_state_ucf64(CPUUniCore32State *env, FILE *f, +        fprintf_function cpu_fprintf, int flags) +{ +    int i; +    union { +        uint32_t i; +        float s; +    } s0, s1; +    CPU_DoubleU d; +    /* ??? This assumes float64 and double have the same layout. +       Oh well, it's only debug dumps.  */ +    union { +        float64 f64; +        double d; +    } d0; + +    for (i = 0; i < 16; i++) { +        d.d = env->ucf64.regs[i]; +        s0.i = d.l.lower; +        s1.i = d.l.upper; +        d0.f64 = d.d; +        cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g)", +                    i * 2, (int)s0.i, s0.s, +                    i * 2 + 1, (int)s1.i, s1.s); +        cpu_fprintf(f, " d%02d=%" PRIx64 "(%8g)\n", +                    i, (uint64_t)d0.f64, d0.d); +    } +    cpu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]); +} +#else +#define cpu_dump_state_ucf64(env, file, pr, flags)      do { } while (0) +#endif + +void uc32_cpu_dump_state(CPUState *cs, FILE *f, +                         fprintf_function cpu_fprintf, int flags) +{ +    UniCore32CPU *cpu = UNICORE32_CPU(cs); +    CPUUniCore32State *env = &cpu->env; +    int i; +    uint32_t psr; + +    for (i = 0; i < 32; i++) { +        cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]); +        if ((i % 4) == 3) { +            cpu_fprintf(f, "\n"); +        } else { +            cpu_fprintf(f, " "); +        } +    } +    psr = cpu_asr_read(env); +    cpu_fprintf(f, "PSR=%08x %c%c%c%c %s\n", +                psr, +                psr & (1 << 31) ? 'N' : '-', +                psr & (1 << 30) ? 'Z' : '-', +                psr & (1 << 29) ? 'C' : '-', +                psr & (1 << 28) ? 'V' : '-', +                cpu_mode_names[psr & 0xf]); + +    cpu_dump_state_ucf64(env, f, cpu_fprintf, flags); +} + +void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb, int pc_pos) +{ +    env->regs[31] = tcg_ctx.gen_opc_pc[pc_pos]; +} diff --git a/target-unicore32/ucf64_helper.c b/target-unicore32/ucf64_helper.c new file mode 100644 index 00000000..5af008fc --- /dev/null +++ b/target-unicore32/ucf64_helper.c @@ -0,0 +1,324 @@ +/* + * UniCore-F64 simulation helpers for QEMU. + * + * Copyright (C) 2010-2012 Guan Xuetao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation, or any later version. + * See the COPYING file in the top-level directory. + */ +#include "cpu.h" +#include "exec/helper-proto.h" + +/* + * The convention used for UniCore-F64 instructions: + *  Single precition routines have a "s" suffix + *  Double precision routines have a "d" suffix. + */ + +/* Convert host exception flags to f64 form.  */ +static inline int ucf64_exceptbits_from_host(int host_bits) +{ +    int target_bits = 0; + +    if (host_bits & float_flag_invalid) { +        target_bits |= UCF64_FPSCR_FLAG_INVALID; +    } +    if (host_bits & float_flag_divbyzero) { +        target_bits |= UCF64_FPSCR_FLAG_DIVZERO; +    } +    if (host_bits & float_flag_overflow) { +        target_bits |= UCF64_FPSCR_FLAG_OVERFLOW; +    } +    if (host_bits & float_flag_underflow) { +        target_bits |= UCF64_FPSCR_FLAG_UNDERFLOW; +    } +    if (host_bits & float_flag_inexact) { +        target_bits |= UCF64_FPSCR_FLAG_INEXACT; +    } +    return target_bits; +} + +uint32_t HELPER(ucf64_get_fpscr)(CPUUniCore32State *env) +{ +    int i; +    uint32_t fpscr; + +    fpscr = (env->ucf64.xregs[UC32_UCF64_FPSCR] & UCF64_FPSCR_MASK); +    i = get_float_exception_flags(&env->ucf64.fp_status); +    fpscr |= ucf64_exceptbits_from_host(i); +    return fpscr; +} + +/* Convert ucf64 exception flags to target form.  */ +static inline int ucf64_exceptbits_to_host(int target_bits) +{ +    int host_bits = 0; + +    if (target_bits & UCF64_FPSCR_FLAG_INVALID) { +        host_bits |= float_flag_invalid; +    } +    if (target_bits & UCF64_FPSCR_FLAG_DIVZERO) { +        host_bits |= float_flag_divbyzero; +    } +    if (target_bits & UCF64_FPSCR_FLAG_OVERFLOW) { +        host_bits |= float_flag_overflow; +    } +    if (target_bits & UCF64_FPSCR_FLAG_UNDERFLOW) { +        host_bits |= float_flag_underflow; +    } +    if (target_bits & UCF64_FPSCR_FLAG_INEXACT) { +        host_bits |= float_flag_inexact; +    } +    return host_bits; +} + +void HELPER(ucf64_set_fpscr)(CPUUniCore32State *env, uint32_t val) +{ +    UniCore32CPU *cpu = uc32_env_get_cpu(env); +    int i; +    uint32_t changed; + +    changed = env->ucf64.xregs[UC32_UCF64_FPSCR]; +    env->ucf64.xregs[UC32_UCF64_FPSCR] = (val & UCF64_FPSCR_MASK); + +    changed ^= val; +    if (changed & (UCF64_FPSCR_RND_MASK)) { +        i = UCF64_FPSCR_RND(val); +        switch (i) { +        case 0: +            i = float_round_nearest_even; +            break; +        case 1: +            i = float_round_to_zero; +            break; +        case 2: +            i = float_round_up; +            break; +        case 3: +            i = float_round_down; +            break; +        default: /* 100 and 101 not implement */ +            cpu_abort(CPU(cpu), "Unsupported UniCore-F64 round mode"); +        } +        set_float_rounding_mode(i, &env->ucf64.fp_status); +    } + +    i = ucf64_exceptbits_to_host(UCF64_FPSCR_TRAPEN(val)); +    set_float_exception_flags(i, &env->ucf64.fp_status); +} + +float32 HELPER(ucf64_adds)(float32 a, float32 b, CPUUniCore32State *env) +{ +    return float32_add(a, b, &env->ucf64.fp_status); +} + +float64 HELPER(ucf64_addd)(float64 a, float64 b, CPUUniCore32State *env) +{ +    return float64_add(a, b, &env->ucf64.fp_status); +} + +float32 HELPER(ucf64_subs)(float32 a, float32 b, CPUUniCore32State *env) +{ +    return float32_sub(a, b, &env->ucf64.fp_status); +} + +float64 HELPER(ucf64_subd)(float64 a, float64 b, CPUUniCore32State *env) +{ +    return float64_sub(a, b, &env->ucf64.fp_status); +} + +float32 HELPER(ucf64_muls)(float32 a, float32 b, CPUUniCore32State *env) +{ +    return float32_mul(a, b, &env->ucf64.fp_status); +} + +float64 HELPER(ucf64_muld)(float64 a, float64 b, CPUUniCore32State *env) +{ +    return float64_mul(a, b, &env->ucf64.fp_status); +} + +float32 HELPER(ucf64_divs)(float32 a, float32 b, CPUUniCore32State *env) +{ +    return float32_div(a, b, &env->ucf64.fp_status); +} + +float64 HELPER(ucf64_divd)(float64 a, float64 b, CPUUniCore32State *env) +{ +    return float64_div(a, b, &env->ucf64.fp_status); +} + +float32 HELPER(ucf64_negs)(float32 a) +{ +    return float32_chs(a); +} + +float64 HELPER(ucf64_negd)(float64 a) +{ +    return float64_chs(a); +} + +float32 HELPER(ucf64_abss)(float32 a) +{ +    return float32_abs(a); +} + +float64 HELPER(ucf64_absd)(float64 a) +{ +    return float64_abs(a); +} + +void HELPER(ucf64_cmps)(float32 a, float32 b, uint32_t c, +        CPUUniCore32State *env) +{ +    int flag; +    flag = float32_compare_quiet(a, b, &env->ucf64.fp_status); +    env->CF = 0; +    switch (c & 0x7) { +    case 0: /* F */ +        break; +    case 1: /* UN */ +        if (flag == 2) { +            env->CF = 1; +        } +        break; +    case 2: /* EQ */ +        if (flag == 0) { +            env->CF = 1; +        } +        break; +    case 3: /* UEQ */ +        if ((flag == 0) || (flag == 2)) { +            env->CF = 1; +        } +        break; +    case 4: /* OLT */ +        if (flag == -1) { +            env->CF = 1; +        } +        break; +    case 5: /* ULT */ +        if ((flag == -1) || (flag == 2)) { +            env->CF = 1; +        } +        break; +    case 6: /* OLE */ +        if ((flag == -1) || (flag == 0)) { +            env->CF = 1; +        } +        break; +    case 7: /* ULE */ +        if (flag != 1) { +            env->CF = 1; +        } +        break; +    } +    env->ucf64.xregs[UC32_UCF64_FPSCR] = (env->CF << 29) +                    | (env->ucf64.xregs[UC32_UCF64_FPSCR] & 0x0fffffff); +} + +void HELPER(ucf64_cmpd)(float64 a, float64 b, uint32_t c, +        CPUUniCore32State *env) +{ +    int flag; +    flag = float64_compare_quiet(a, b, &env->ucf64.fp_status); +    env->CF = 0; +    switch (c & 0x7) { +    case 0: /* F */ +        break; +    case 1: /* UN */ +        if (flag == 2) { +            env->CF = 1; +        } +        break; +    case 2: /* EQ */ +        if (flag == 0) { +            env->CF = 1; +        } +        break; +    case 3: /* UEQ */ +        if ((flag == 0) || (flag == 2)) { +            env->CF = 1; +        } +        break; +    case 4: /* OLT */ +        if (flag == -1) { +            env->CF = 1; +        } +        break; +    case 5: /* ULT */ +        if ((flag == -1) || (flag == 2)) { +            env->CF = 1; +        } +        break; +    case 6: /* OLE */ +        if ((flag == -1) || (flag == 0)) { +            env->CF = 1; +        } +        break; +    case 7: /* ULE */ +        if (flag != 1) { +            env->CF = 1; +        } +        break; +    } +    env->ucf64.xregs[UC32_UCF64_FPSCR] = (env->CF << 29) +                    | (env->ucf64.xregs[UC32_UCF64_FPSCR] & 0x0fffffff); +} + +/* Helper routines to perform bitwise copies between float and int.  */ +static inline float32 ucf64_itos(uint32_t i) +{ +    union { +        uint32_t i; +        float32 s; +    } v; + +    v.i = i; +    return v.s; +} + +static inline uint32_t ucf64_stoi(float32 s) +{ +    union { +        uint32_t i; +        float32 s; +    } v; + +    v.s = s; +    return v.i; +} + +/* Integer to float conversion.  */ +float32 HELPER(ucf64_si2sf)(float32 x, CPUUniCore32State *env) +{ +    return int32_to_float32(ucf64_stoi(x), &env->ucf64.fp_status); +} + +float64 HELPER(ucf64_si2df)(float32 x, CPUUniCore32State *env) +{ +    return int32_to_float64(ucf64_stoi(x), &env->ucf64.fp_status); +} + +/* Float to integer conversion.  */ +float32 HELPER(ucf64_sf2si)(float32 x, CPUUniCore32State *env) +{ +    return ucf64_itos(float32_to_int32(x, &env->ucf64.fp_status)); +} + +float32 HELPER(ucf64_df2si)(float64 x, CPUUniCore32State *env) +{ +    return ucf64_itos(float64_to_int32(x, &env->ucf64.fp_status)); +} + +/* floating point conversion */ +float64 HELPER(ucf64_sf2df)(float32 x, CPUUniCore32State *env) +{ +    return float32_to_float64(x, &env->ucf64.fp_status); +} + +float32 HELPER(ucf64_df2sf)(float64 x, CPUUniCore32State *env) +{ +    return float64_to_float32(x, &env->ucf64.fp_status); +}  | 
