aboutsummaryrefslogtreecommitdiffstats
path: root/include/exec
diff options
context:
space:
mode:
Diffstat (limited to 'include/exec')
-rw-r--r--include/exec/address-spaces.h41
-rw-r--r--include/exec/cpu-all.h334
-rw-r--r--include/exec/cpu-common.h138
-rw-r--r--include/exec/cpu-defs.h154
-rw-r--r--include/exec/cpu_ldst.h460
-rw-r--r--include/exec/cpu_ldst_template.h141
-rw-r--r--include/exec/cpu_ldst_useronly_template.h81
-rw-r--r--include/exec/cputlb.h47
-rw-r--r--include/exec/exec-all.h367
-rw-r--r--include/exec/gdbstub.h98
-rw-r--r--include/exec/gen-icount.h79
-rw-r--r--include/exec/helper-gen.h72
-rw-r--r--include/exec/helper-head.h134
-rw-r--r--include/exec/helper-proto.h40
-rw-r--r--include/exec/helper-tcg.h49
-rw-r--r--include/exec/hwaddr.h20
-rw-r--r--include/exec/ioport.h80
-rw-r--r--include/exec/memattrs.h49
-rw-r--r--include/exec/memory-internal.h35
-rw-r--r--include/exec/memory.h1340
-rw-r--r--include/exec/poison.h62
-rw-r--r--include/exec/ram_addr.h253
-rw-r--r--include/exec/semihost.h62
-rw-r--r--include/exec/softmmu-semi.h80
-rw-r--r--include/exec/spinlock.h49
-rw-r--r--include/exec/tb-hash.h51
-rw-r--r--include/exec/user/abitypes.h66
-rw-r--r--include/exec/user/thunk.h191
28 files changed, 4573 insertions, 0 deletions
diff --git a/include/exec/address-spaces.h b/include/exec/address-spaces.h
new file mode 100644
index 00000000..3d12cdde
--- /dev/null
+++ b/include/exec/address-spaces.h
@@ -0,0 +1,41 @@
+/*
+ * Internal memory management interfaces
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef EXEC_MEMORY_H
+#define EXEC_MEMORY_H
+
+/*
+ * Internal interfaces between memory.c/exec.c/vl.c. Do not #include unless
+ * you're one of them.
+ */
+
+#include "exec/memory.h"
+
+#ifndef CONFIG_USER_ONLY
+
+/* Get the root memory region. This interface should only be used temporarily
+ * until a proper bus interface is available.
+ */
+MemoryRegion *get_system_memory(void);
+
+/* Get the root I/O port region. This interface should only be used
+ * temporarily until a proper bus interface is available.
+ */
+MemoryRegion *get_system_io(void);
+
+extern AddressSpace address_space_memory;
+extern AddressSpace address_space_io;
+
+#endif
+
+#endif
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
new file mode 100644
index 00000000..ea6a9a66
--- /dev/null
+++ b/include/exec/cpu-all.h
@@ -0,0 +1,334 @@
+/*
+ * defines common to all virtual CPUs
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_ALL_H
+#define CPU_ALL_H
+
+#include "qemu-common.h"
+#include "exec/cpu-common.h"
+#include "exec/memory.h"
+#include "qemu/thread.h"
+#include "qom/cpu.h"
+#include "qemu/rcu.h"
+
+#define EXCP_INTERRUPT 0x10000 /* async interruption */
+#define EXCP_HLT 0x10001 /* hlt instruction reached */
+#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
+#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
+#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
+
+/* some important defines:
+ *
+ * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
+ * memory accesses.
+ *
+ * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
+ * otherwise little endian.
+ *
+ * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
+ *
+ * TARGET_WORDS_BIGENDIAN : same for target cpu
+ */
+
+#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
+#define BSWAP_NEEDED
+#endif
+
+#ifdef BSWAP_NEEDED
+
+static inline uint16_t tswap16(uint16_t s)
+{
+ return bswap16(s);
+}
+
+static inline uint32_t tswap32(uint32_t s)
+{
+ return bswap32(s);
+}
+
+static inline uint64_t tswap64(uint64_t s)
+{
+ return bswap64(s);
+}
+
+static inline void tswap16s(uint16_t *s)
+{
+ *s = bswap16(*s);
+}
+
+static inline void tswap32s(uint32_t *s)
+{
+ *s = bswap32(*s);
+}
+
+static inline void tswap64s(uint64_t *s)
+{
+ *s = bswap64(*s);
+}
+
+#else
+
+static inline uint16_t tswap16(uint16_t s)
+{
+ return s;
+}
+
+static inline uint32_t tswap32(uint32_t s)
+{
+ return s;
+}
+
+static inline uint64_t tswap64(uint64_t s)
+{
+ return s;
+}
+
+static inline void tswap16s(uint16_t *s)
+{
+}
+
+static inline void tswap32s(uint32_t *s)
+{
+}
+
+static inline void tswap64s(uint64_t *s)
+{
+}
+
+#endif
+
+#if TARGET_LONG_SIZE == 4
+#define tswapl(s) tswap32(s)
+#define tswapls(s) tswap32s((uint32_t *)(s))
+#define bswaptls(s) bswap32s(s)
+#else
+#define tswapl(s) tswap64(s)
+#define tswapls(s) tswap64s((uint64_t *)(s))
+#define bswaptls(s) bswap64s(s)
+#endif
+
+/* Target-endianness CPU memory access functions. These fit into the
+ * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
+ */
+#if defined(TARGET_WORDS_BIGENDIAN)
+#define lduw_p(p) lduw_be_p(p)
+#define ldsw_p(p) ldsw_be_p(p)
+#define ldl_p(p) ldl_be_p(p)
+#define ldq_p(p) ldq_be_p(p)
+#define ldfl_p(p) ldfl_be_p(p)
+#define ldfq_p(p) ldfq_be_p(p)
+#define stw_p(p, v) stw_be_p(p, v)
+#define stl_p(p, v) stl_be_p(p, v)
+#define stq_p(p, v) stq_be_p(p, v)
+#define stfl_p(p, v) stfl_be_p(p, v)
+#define stfq_p(p, v) stfq_be_p(p, v)
+#else
+#define lduw_p(p) lduw_le_p(p)
+#define ldsw_p(p) ldsw_le_p(p)
+#define ldl_p(p) ldl_le_p(p)
+#define ldq_p(p) ldq_le_p(p)
+#define ldfl_p(p) ldfl_le_p(p)
+#define ldfq_p(p) ldfq_le_p(p)
+#define stw_p(p, v) stw_le_p(p, v)
+#define stl_p(p, v) stl_le_p(p, v)
+#define stq_p(p, v) stq_le_p(p, v)
+#define stfl_p(p, v) stfl_le_p(p, v)
+#define stfq_p(p, v) stfq_le_p(p, v)
+#endif
+
+/* MMU memory access macros */
+
+#if defined(CONFIG_USER_ONLY)
+#include <assert.h>
+#include "exec/user/abitypes.h"
+
+/* On some host systems the guest address space is reserved on the host.
+ * This allows the guest address space to be offset to a convenient location.
+ */
+#if defined(CONFIG_USE_GUEST_BASE)
+extern unsigned long guest_base;
+extern int have_guest_base;
+extern unsigned long reserved_va;
+#define GUEST_BASE guest_base
+#define RESERVED_VA reserved_va
+#else
+#define GUEST_BASE 0ul
+#define RESERVED_VA 0ul
+#endif
+
+#define GUEST_ADDR_MAX (RESERVED_VA ? RESERVED_VA : \
+ (1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
+#endif
+
+/* page related stuff */
+
+#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
+#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
+#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
+
+/* ??? These should be the larger of uintptr_t and target_ulong. */
+extern uintptr_t qemu_real_host_page_size;
+extern uintptr_t qemu_real_host_page_mask;
+extern uintptr_t qemu_host_page_size;
+extern uintptr_t qemu_host_page_mask;
+
+#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
+#define REAL_HOST_PAGE_ALIGN(addr) (((addr) + qemu_real_host_page_size - 1) & \
+ qemu_real_host_page_mask)
+
+/* same as PROT_xxx */
+#define PAGE_READ 0x0001
+#define PAGE_WRITE 0x0002
+#define PAGE_EXEC 0x0004
+#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
+#define PAGE_VALID 0x0008
+/* original state of the write flag (used when tracking self-modifying
+ code */
+#define PAGE_WRITE_ORG 0x0010
+#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
+/* FIXME: Code that sets/uses this is broken and needs to go away. */
+#define PAGE_RESERVED 0x0020
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+void page_dump(FILE *f);
+
+typedef int (*walk_memory_regions_fn)(void *, target_ulong,
+ target_ulong, unsigned long);
+int walk_memory_regions(void *, walk_memory_regions_fn);
+
+int page_get_flags(target_ulong address);
+void page_set_flags(target_ulong start, target_ulong end, int flags);
+int page_check_range(target_ulong start, target_ulong len, int flags);
+#endif
+
+CPUArchState *cpu_copy(CPUArchState *env);
+
+/* Flags for use in ENV->INTERRUPT_PENDING.
+
+ The numbers assigned here are non-sequential in order to preserve
+ binary compatibility with the vmstate dump. Bit 0 (0x0001) was
+ previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
+ the vmstate dump. */
+
+/* External hardware interrupt pending. This is typically used for
+ interrupts from devices. */
+#define CPU_INTERRUPT_HARD 0x0002
+
+/* Exit the current TB. This is typically used when some system-level device
+ makes some change to the memory mapping. E.g. the a20 line change. */
+#define CPU_INTERRUPT_EXITTB 0x0004
+
+/* Halt the CPU. */
+#define CPU_INTERRUPT_HALT 0x0020
+
+/* Debug event pending. */
+#define CPU_INTERRUPT_DEBUG 0x0080
+
+/* Reset signal. */
+#define CPU_INTERRUPT_RESET 0x0400
+
+/* Several target-specific external hardware interrupts. Each target/cpu.h
+ should define proper names based on these defines. */
+#define CPU_INTERRUPT_TGT_EXT_0 0x0008
+#define CPU_INTERRUPT_TGT_EXT_1 0x0010
+#define CPU_INTERRUPT_TGT_EXT_2 0x0040
+#define CPU_INTERRUPT_TGT_EXT_3 0x0200
+#define CPU_INTERRUPT_TGT_EXT_4 0x1000
+
+/* Several target-specific internal interrupts. These differ from the
+ preceding target-specific interrupts in that they are intended to
+ originate from within the cpu itself, typically in response to some
+ instruction being executed. These, therefore, are not masked while
+ single-stepping within the debugger. */
+#define CPU_INTERRUPT_TGT_INT_0 0x0100
+#define CPU_INTERRUPT_TGT_INT_1 0x0800
+#define CPU_INTERRUPT_TGT_INT_2 0x2000
+
+/* First unused bit: 0x4000. */
+
+/* The set of all bits that should be masked when single-stepping. */
+#define CPU_INTERRUPT_SSTEP_MASK \
+ (CPU_INTERRUPT_HARD \
+ | CPU_INTERRUPT_TGT_EXT_0 \
+ | CPU_INTERRUPT_TGT_EXT_1 \
+ | CPU_INTERRUPT_TGT_EXT_2 \
+ | CPU_INTERRUPT_TGT_EXT_3 \
+ | CPU_INTERRUPT_TGT_EXT_4)
+
+#if !defined(CONFIG_USER_ONLY)
+
+/* memory API */
+
+typedef struct RAMBlock RAMBlock;
+
+struct RAMBlock {
+ struct rcu_head rcu;
+ struct MemoryRegion *mr;
+ uint8_t *host;
+ ram_addr_t offset;
+ ram_addr_t used_length;
+ ram_addr_t max_length;
+ void (*resized)(const char*, uint64_t length, void *host);
+ uint32_t flags;
+ /* Protected by iothread lock. */
+ char idstr[256];
+ /* RCU-enabled, writes protected by the ramlist lock */
+ QLIST_ENTRY(RAMBlock) next;
+ int fd;
+};
+
+static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
+{
+ assert(offset < block->used_length);
+ assert(block->host);
+ return (char *)block->host + offset;
+}
+
+typedef struct RAMList {
+ QemuMutex mutex;
+ /* Protected by the iothread lock. */
+ unsigned long *dirty_memory[DIRTY_MEMORY_NUM];
+ RAMBlock *mru_block;
+ /* RCU-enabled, writes protected by the ramlist lock. */
+ QLIST_HEAD(, RAMBlock) blocks;
+ uint32_t version;
+} RAMList;
+extern RAMList ram_list;
+
+/* Flags stored in the low bits of the TLB virtual address. These are
+ defined so that fast path ram access is all zeros. */
+/* Zero if TLB entry is valid. */
+#define TLB_INVALID_MASK (1 << 3)
+/* Set if TLB entry references a clean RAM page. The iotlb entry will
+ contain the page physical address. */
+#define TLB_NOTDIRTY (1 << 4)
+/* Set if TLB entry is an IO callback. */
+#define TLB_MMIO (1 << 5)
+
+void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
+void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
+ram_addr_t last_ram_offset(void);
+void qemu_mutex_lock_ramlist(void);
+void qemu_mutex_unlock_ramlist(void);
+#endif /* !CONFIG_USER_ONLY */
+
+int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
+ uint8_t *buf, int len, int is_write);
+
+#endif /* CPU_ALL_H */
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
new file mode 100644
index 00000000..9fb1d541
--- /dev/null
+++ b/include/exec/cpu-common.h
@@ -0,0 +1,138 @@
+#ifndef CPU_COMMON_H
+#define CPU_COMMON_H 1
+
+/* CPU interfaces that are target independent. */
+
+#ifndef CONFIG_USER_ONLY
+#include "exec/hwaddr.h"
+#endif
+
+#ifndef NEED_CPU_H
+#include "exec/poison.h"
+#endif
+
+#include "qemu/bswap.h"
+#include "qemu/queue.h"
+#include "qemu/fprintf-fn.h"
+#include "qemu/typedefs.h"
+
+/**
+ * CPUListState:
+ * @cpu_fprintf: Print function.
+ * @file: File to print to using @cpu_fprint.
+ *
+ * State commonly used for iterating over CPU models.
+ */
+typedef struct CPUListState {
+ fprintf_function cpu_fprintf;
+ FILE *file;
+} CPUListState;
+
+typedef enum MMUAccessType {
+ MMU_DATA_LOAD = 0,
+ MMU_DATA_STORE = 1,
+ MMU_INST_FETCH = 2
+} MMUAccessType;
+
+#if !defined(CONFIG_USER_ONLY)
+
+enum device_endian {
+ DEVICE_NATIVE_ENDIAN,
+ DEVICE_BIG_ENDIAN,
+ DEVICE_LITTLE_ENDIAN,
+};
+
+/* address in the RAM (different from a physical address) */
+#if defined(CONFIG_XEN_BACKEND)
+typedef uint64_t ram_addr_t;
+# define RAM_ADDR_MAX UINT64_MAX
+# define RAM_ADDR_FMT "%" PRIx64
+#else
+typedef uintptr_t ram_addr_t;
+# define RAM_ADDR_MAX UINTPTR_MAX
+# define RAM_ADDR_FMT "%" PRIxPTR
+#endif
+
+extern ram_addr_t ram_size;
+ram_addr_t get_current_ram_size(void);
+
+/* memory API */
+
+typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value);
+typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
+
+void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
+/* This should not be used by devices. */
+MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
+void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev);
+void qemu_ram_unset_idstr(ram_addr_t addr);
+
+void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
+ int len, int is_write);
+static inline void cpu_physical_memory_read(hwaddr addr,
+ void *buf, int len)
+{
+ cpu_physical_memory_rw(addr, buf, len, 0);
+}
+static inline void cpu_physical_memory_write(hwaddr addr,
+ const void *buf, int len)
+{
+ cpu_physical_memory_rw(addr, (void *)buf, len, 1);
+}
+void *cpu_physical_memory_map(hwaddr addr,
+ hwaddr *plen,
+ int is_write);
+void cpu_physical_memory_unmap(void *buffer, hwaddr len,
+ int is_write, hwaddr access_len);
+void cpu_register_map_client(QEMUBH *bh);
+void cpu_unregister_map_client(QEMUBH *bh);
+
+bool cpu_physical_memory_is_io(hwaddr phys_addr);
+
+/* Coalesced MMIO regions are areas where write operations can be reordered.
+ * This usually implies that write operations are side-effect free. This allows
+ * batching which can make a major impact on performance when using
+ * virtualization.
+ */
+void qemu_flush_coalesced_mmio_buffer(void);
+
+uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
+uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr);
+uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr);
+uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
+uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
+uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
+uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
+void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
+void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
+
+#ifdef NEED_CPU_H
+uint32_t lduw_phys(AddressSpace *as, hwaddr addr);
+uint32_t ldl_phys(AddressSpace *as, hwaddr addr);
+uint64_t ldq_phys(AddressSpace *as, hwaddr addr);
+void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val);
+void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val);
+void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val);
+#endif
+
+void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
+ const uint8_t *buf, int len);
+void cpu_flush_icache_range(hwaddr start, int len);
+
+extern struct MemoryRegion io_mem_rom;
+extern struct MemoryRegion io_mem_notdirty;
+
+typedef int (RAMBlockIterFunc)(const char *block_name, void *host_addr,
+ ram_addr_t offset, ram_addr_t length, void *opaque);
+
+int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
+
+#endif
+
+#endif /* !CPU_COMMON_H */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
new file mode 100644
index 00000000..98b9cff3
--- /dev/null
+++ b/include/exec/cpu-defs.h
@@ -0,0 +1,154 @@
+/*
+ * common defines for all CPUs
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_DEFS_H
+#define CPU_DEFS_H
+
+#ifndef NEED_CPU_H
+#error cpu.h included from common code
+#endif
+
+#include "config.h"
+#include <inttypes.h>
+#include "qemu/osdep.h"
+#include "qemu/queue.h"
+#include "tcg-target.h"
+#ifndef CONFIG_USER_ONLY
+#include "exec/hwaddr.h"
+#endif
+#include "exec/memattrs.h"
+
+#ifndef TARGET_LONG_BITS
+#error TARGET_LONG_BITS must be defined before including this header
+#endif
+
+#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
+
+/* target_ulong is the type of a virtual address */
+#if TARGET_LONG_SIZE == 4
+typedef int32_t target_long;
+typedef uint32_t target_ulong;
+#define TARGET_FMT_lx "%08x"
+#define TARGET_FMT_ld "%d"
+#define TARGET_FMT_lu "%u"
+#elif TARGET_LONG_SIZE == 8
+typedef int64_t target_long;
+typedef uint64_t target_ulong;
+#define TARGET_FMT_lx "%016" PRIx64
+#define TARGET_FMT_ld "%" PRId64
+#define TARGET_FMT_lu "%" PRIu64
+#else
+#error TARGET_LONG_SIZE undefined
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+/* use a fully associative victim tlb of 8 entries */
+#define CPU_VTLB_SIZE 8
+
+#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
+#define CPU_TLB_ENTRY_BITS 4
+#else
+#define CPU_TLB_ENTRY_BITS 5
+#endif
+
+/* TCG_TARGET_TLB_DISPLACEMENT_BITS is used in CPU_TLB_BITS to ensure that
+ * the TLB is not unnecessarily small, but still small enough for the
+ * TLB lookup instruction sequence used by the TCG target.
+ *
+ * TCG will have to generate an operand as large as the distance between
+ * env and the tlb_table[NB_MMU_MODES - 1][0].addend. For simplicity,
+ * the TCG targets just round everything up to the next power of two, and
+ * count bits. This works because: 1) the size of each TLB is a largish
+ * power of two, 2) and because the limit of the displacement is really close
+ * to a power of two, 3) the offset of tlb_table[0][0] inside env is smaller
+ * than the size of a TLB.
+ *
+ * For example, the maximum displacement 0xFFF0 on PPC and MIPS, but TCG
+ * just says "the displacement is 16 bits". TCG_TARGET_TLB_DISPLACEMENT_BITS
+ * then ensures that tlb_table at least 0x8000 bytes large ("not unnecessarily
+ * small": 2^15). The operand then will come up smaller than 0xFFF0 without
+ * any particular care, because the TLB for a single MMU mode is larger than
+ * 0x10000-0xFFF0=16 bytes. In the end, the maximum value of the operand
+ * could be something like 0xC000 (the offset of the last TLB table) plus
+ * 0x18 (the offset of the addend field in each TLB entry) plus the offset
+ * of tlb_table inside env (which is non-trivial but not huge).
+ */
+#define CPU_TLB_BITS \
+ MIN(8, \
+ TCG_TARGET_TLB_DISPLACEMENT_BITS - CPU_TLB_ENTRY_BITS - \
+ (NB_MMU_MODES <= 1 ? 0 : \
+ NB_MMU_MODES <= 2 ? 1 : \
+ NB_MMU_MODES <= 4 ? 2 : \
+ NB_MMU_MODES <= 8 ? 3 : 4))
+
+#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
+
+typedef struct CPUTLBEntry {
+ /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
+ bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
+ go directly to ram.
+ bit 3 : indicates that the entry is invalid
+ bit 2..0 : zero
+ */
+ target_ulong addr_read;
+ target_ulong addr_write;
+ target_ulong addr_code;
+ /* Addend to virtual address to get host address. IO accesses
+ use the corresponding iotlb value. */
+ uintptr_t addend;
+ /* padding to get a power of two size */
+ uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
+ (sizeof(target_ulong) * 3 +
+ ((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) +
+ sizeof(uintptr_t))];
+} CPUTLBEntry;
+
+QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
+
+/* The IOTLB is not accessed directly inline by generated TCG code,
+ * so the CPUIOTLBEntry layout is not as critical as that of the
+ * CPUTLBEntry. (This is also why we don't want to combine the two
+ * structs into one.)
+ */
+typedef struct CPUIOTLBEntry {
+ hwaddr addr;
+ MemTxAttrs attrs;
+} CPUIOTLBEntry;
+
+#define CPU_COMMON_TLB \
+ /* The meaning of the MMU modes is defined in the target code. */ \
+ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
+ CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
+ target_ulong tlb_flush_addr; \
+ target_ulong tlb_flush_mask; \
+ target_ulong vtlb_index; \
+
+#else
+
+#define CPU_COMMON_TLB
+
+#endif
+
+
+#define CPU_COMMON \
+ /* soft mmu support */ \
+ CPU_COMMON_TLB \
+
+#endif
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
new file mode 100644
index 00000000..1239c60f
--- /dev/null
+++ b/include/exec/cpu_ldst.h
@@ -0,0 +1,460 @@
+/*
+ * Software MMU support
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+/*
+ * Generate inline load/store functions for all MMU modes (typically
+ * at least _user and _kernel) as well as _data versions, for all data
+ * sizes.
+ *
+ * Used by target op helpers.
+ *
+ * The syntax for the accessors is:
+ *
+ * load: cpu_ld{sign}{size}_{mmusuffix}(env, ptr)
+ *
+ * store: cpu_st{sign}{size}_{mmusuffix}(env, ptr, val)
+ *
+ * sign is:
+ * (empty): for 32 and 64 bit sizes
+ * u : unsigned
+ * s : signed
+ *
+ * size is:
+ * b: 8 bits
+ * w: 16 bits
+ * l: 32 bits
+ * q: 64 bits
+ *
+ * mmusuffix is one of the generic suffixes "data" or "code", or
+ * (for softmmu configs) a target-specific MMU mode suffix as defined
+ * in target cpu.h.
+ */
+#ifndef CPU_LDST_H
+#define CPU_LDST_H
+
+#if defined(CONFIG_USER_ONLY)
+/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
+#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
+
+#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
+#define h2g_valid(x) 1
+#else
+#define h2g_valid(x) ({ \
+ unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
+ (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
+ (!RESERVED_VA || (__guest < RESERVED_VA)); \
+})
+#endif
+
+#define h2g_nocheck(x) ({ \
+ unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
+ (abi_ulong)__ret; \
+})
+
+#define h2g(x) ({ \
+ /* Check if given address fits target address space */ \
+ assert(h2g_valid(x)); \
+ h2g_nocheck(x); \
+})
+
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+
+/* In user-only mode we provide only the _code and _data accessors. */
+
+#define MEMSUFFIX _data
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_useronly_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_useronly_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_useronly_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_useronly_template.h"
+#undef MEMSUFFIX
+
+#define MEMSUFFIX _code
+#define CODE_ACCESS
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_useronly_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_useronly_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_useronly_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_useronly_template.h"
+#undef MEMSUFFIX
+#undef CODE_ACCESS
+
+#else
+
+/* The memory helpers for tcg-generated code need tcg_target_long etc. */
+#include "tcg.h"
+
+uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+
+void helper_stb_mmu(CPUArchState *env, target_ulong addr,
+ uint8_t val, int mmu_idx);
+void helper_stw_mmu(CPUArchState *env, target_ulong addr,
+ uint16_t val, int mmu_idx);
+void helper_stl_mmu(CPUArchState *env, target_ulong addr,
+ uint32_t val, int mmu_idx);
+void helper_stq_mmu(CPUArchState *env, target_ulong addr,
+ uint64_t val, int mmu_idx);
+
+uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+
+#ifdef MMU_MODE0_SUFFIX
+#define CPU_MMU_INDEX 0
+#define MEMSUFFIX MMU_MODE0_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif
+
+#if (NB_MMU_MODES >= 2) && defined(MMU_MODE1_SUFFIX)
+#define CPU_MMU_INDEX 1
+#define MEMSUFFIX MMU_MODE1_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif
+
+#if (NB_MMU_MODES >= 3) && defined(MMU_MODE2_SUFFIX)
+
+#define CPU_MMU_INDEX 2
+#define MEMSUFFIX MMU_MODE2_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 3) */
+
+#if (NB_MMU_MODES >= 4) && defined(MMU_MODE3_SUFFIX)
+
+#define CPU_MMU_INDEX 3
+#define MEMSUFFIX MMU_MODE3_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 4) */
+
+#if (NB_MMU_MODES >= 5) && defined(MMU_MODE4_SUFFIX)
+
+#define CPU_MMU_INDEX 4
+#define MEMSUFFIX MMU_MODE4_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 5) */
+
+#if (NB_MMU_MODES >= 6) && defined(MMU_MODE5_SUFFIX)
+
+#define CPU_MMU_INDEX 5
+#define MEMSUFFIX MMU_MODE5_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 6) */
+
+#if (NB_MMU_MODES >= 7) && defined(MMU_MODE6_SUFFIX)
+
+#define CPU_MMU_INDEX 6
+#define MEMSUFFIX MMU_MODE6_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 7) */
+
+#if (NB_MMU_MODES >= 8) && defined(MMU_MODE7_SUFFIX)
+
+#define CPU_MMU_INDEX 7
+#define MEMSUFFIX MMU_MODE7_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 8) */
+
+#if (NB_MMU_MODES >= 9) && defined(MMU_MODE8_SUFFIX)
+
+#define CPU_MMU_INDEX 8
+#define MEMSUFFIX MMU_MODE8_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 9) */
+
+#if (NB_MMU_MODES >= 10) && defined(MMU_MODE9_SUFFIX)
+
+#define CPU_MMU_INDEX 9
+#define MEMSUFFIX MMU_MODE9_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 10) */
+
+#if (NB_MMU_MODES >= 11) && defined(MMU_MODE10_SUFFIX)
+
+#define CPU_MMU_INDEX 10
+#define MEMSUFFIX MMU_MODE10_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 11) */
+
+#if (NB_MMU_MODES >= 12) && defined(MMU_MODE11_SUFFIX)
+
+#define CPU_MMU_INDEX 11
+#define MEMSUFFIX MMU_MODE11_SUFFIX
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 12) */
+
+#if (NB_MMU_MODES > 12)
+#error "NB_MMU_MODES > 12 is not supported for now"
+#endif /* (NB_MMU_MODES > 12) */
+
+/* these access are slower, they must be as rare as possible */
+#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define MEMSUFFIX _data
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+
+#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define MEMSUFFIX _code
+#define SOFTMMU_CODE_ACCESS
+
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#undef SOFTMMU_CODE_ACCESS
+
+#endif /* defined(CONFIG_USER_ONLY) */
+
+/**
+ * tlb_vaddr_to_host:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @access_type: 0 for read, 1 for write, 2 for execute
+ * @mmu_idx: MMU index to use for lookup
+ *
+ * Look up the specified guest virtual index in the TCG softmmu TLB.
+ * If the TLB contains a host virtual address suitable for direct RAM
+ * access, then return it. Otherwise (TLB miss, TLB entry is for an
+ * I/O access, etc) return NULL.
+ *
+ * This is the equivalent of the initial fast-path code used by
+ * TCG backends for guest load and store accesses.
+ */
+static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
+ int access_type, int mmu_idx)
+{
+#if defined(CONFIG_USER_ONLY)
+ return g2h(vaddr);
+#else
+ int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index];
+ target_ulong tlb_addr;
+ uintptr_t haddr;
+
+ switch (access_type) {
+ case 0:
+ tlb_addr = tlbentry->addr_read;
+ break;
+ case 1:
+ tlb_addr = tlbentry->addr_write;
+ break;
+ case 2:
+ tlb_addr = tlbentry->addr_code;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if ((addr & TARGET_PAGE_MASK)
+ != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ /* TLB entry is for a different page */
+ return NULL;
+ }
+
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ return NULL;
+ }
+
+ haddr = addr + env->tlb_table[mmu_idx][index].addend;
+ return (void *)haddr;
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+#endif /* CPU_LDST_H */
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
new file mode 100644
index 00000000..95ab7504
--- /dev/null
+++ b/include/exec/cpu_ldst_template.h
@@ -0,0 +1,141 @@
+/*
+ * Software MMU support
+ *
+ * Generate inline load/store functions for one MMU mode and data
+ * size.
+ *
+ * Generate a store function as well as signed and unsigned loads.
+ *
+ * Not used directly but included from cpu_ldst.h.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#if DATA_SIZE == 8
+#define SUFFIX q
+#define USUFFIX q
+#define DATA_TYPE uint64_t
+#elif DATA_SIZE == 4
+#define SUFFIX l
+#define USUFFIX l
+#define DATA_TYPE uint32_t
+#elif DATA_SIZE == 2
+#define SUFFIX w
+#define USUFFIX uw
+#define DATA_TYPE uint16_t
+#define DATA_STYPE int16_t
+#elif DATA_SIZE == 1
+#define SUFFIX b
+#define USUFFIX ub
+#define DATA_TYPE uint8_t
+#define DATA_STYPE int8_t
+#else
+#error unsupported data size
+#endif
+
+#if DATA_SIZE == 8
+#define RES_TYPE uint64_t
+#else
+#define RES_TYPE uint32_t
+#endif
+
+#ifdef SOFTMMU_CODE_ACCESS
+#define ADDR_READ addr_code
+#define MMUSUFFIX _cmmu
+#else
+#define ADDR_READ addr_read
+#define MMUSUFFIX _mmu
+#endif
+
+/* generic load/store macros */
+
+static inline RES_TYPE
+glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+{
+ int page_index;
+ RES_TYPE res;
+ target_ulong addr;
+ int mmu_idx;
+
+ addr = ptr;
+ page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ mmu_idx = CPU_MMU_INDEX;
+ if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
+ (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+ res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
+ } else {
+ uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
+ res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr);
+ }
+ return res;
+}
+
+#if DATA_SIZE <= 2
+static inline int
+glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+{
+ int res, page_index;
+ target_ulong addr;
+ int mmu_idx;
+
+ addr = ptr;
+ page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ mmu_idx = CPU_MMU_INDEX;
+ if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
+ (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+ res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
+ MMUSUFFIX)(env, addr, mmu_idx);
+ } else {
+ uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
+ res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr);
+ }
+ return res;
+}
+#endif
+
+#ifndef SOFTMMU_CODE_ACCESS
+
+/* generic store macro */
+
+static inline void
+glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
+ RES_TYPE v)
+{
+ int page_index;
+ target_ulong addr;
+ int mmu_idx;
+
+ addr = ptr;
+ page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ mmu_idx = CPU_MMU_INDEX;
+ if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
+ (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
+ } else {
+ uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
+ glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v);
+ }
+}
+
+#endif /* !SOFTMMU_CODE_ACCESS */
+
+#undef RES_TYPE
+#undef DATA_TYPE
+#undef DATA_STYPE
+#undef SUFFIX
+#undef USUFFIX
+#undef DATA_SIZE
+#undef MMUSUFFIX
+#undef ADDR_READ
diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h
new file mode 100644
index 00000000..b3b865fa
--- /dev/null
+++ b/include/exec/cpu_ldst_useronly_template.h
@@ -0,0 +1,81 @@
+/*
+ * User-only accessor function support
+ *
+ * Generate inline load/store functions for one data size.
+ *
+ * Generate a store function as well as signed and unsigned loads.
+ *
+ * Not used directly but included from cpu_ldst.h.
+ *
+ * Copyright (c) 2015 Linaro Limited
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#if DATA_SIZE == 8
+#define SUFFIX q
+#define USUFFIX q
+#define DATA_TYPE uint64_t
+#elif DATA_SIZE == 4
+#define SUFFIX l
+#define USUFFIX l
+#define DATA_TYPE uint32_t
+#elif DATA_SIZE == 2
+#define SUFFIX w
+#define USUFFIX uw
+#define DATA_TYPE uint16_t
+#define DATA_STYPE int16_t
+#elif DATA_SIZE == 1
+#define SUFFIX b
+#define USUFFIX ub
+#define DATA_TYPE uint8_t
+#define DATA_STYPE int8_t
+#else
+#error unsupported data size
+#endif
+
+#if DATA_SIZE == 8
+#define RES_TYPE uint64_t
+#else
+#define RES_TYPE uint32_t
+#endif
+
+static inline RES_TYPE
+glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+{
+ return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
+}
+
+#if DATA_SIZE <= 2
+static inline int
+glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+{
+ return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
+}
+#endif
+
+#ifndef CODE_ACCESS
+static inline void
+glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
+ RES_TYPE v)
+{
+ glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
+}
+#endif
+
+#undef RES_TYPE
+#undef DATA_TYPE
+#undef DATA_STYPE
+#undef SUFFIX
+#undef USUFFIX
+#undef DATA_SIZE
diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
new file mode 100644
index 00000000..360815e1
--- /dev/null
+++ b/include/exec/cputlb.h
@@ -0,0 +1,47 @@
+/*
+ * Common CPU TLB handling
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPUTLB_H
+#define CPUTLB_H
+
+#if !defined(CONFIG_USER_ONLY)
+/* cputlb.c */
+void tlb_protect_code(ram_addr_t ram_addr);
+void tlb_unprotect_code(ram_addr_t ram_addr);
+void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
+ uintptr_t length);
+void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length);
+void tlb_set_dirty(CPUArchState *env, target_ulong vaddr);
+extern int tlb_flush_count;
+
+/* exec.c */
+void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
+
+MemoryRegionSection *
+address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr, hwaddr *xlat,
+ hwaddr *plen);
+hwaddr memory_region_section_get_iotlb(CPUState *cpu,
+ MemoryRegionSection *section,
+ target_ulong vaddr,
+ hwaddr paddr, hwaddr xlat,
+ int prot,
+ target_ulong *address);
+bool memory_region_is_unassigned(MemoryRegion *mr);
+
+#endif
+#endif
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
new file mode 100644
index 00000000..60f12bc6
--- /dev/null
+++ b/include/exec/exec-all.h
@@ -0,0 +1,367 @@
+/*
+ * internal execution defines for qemu
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _EXEC_ALL_H_
+#define _EXEC_ALL_H_
+
+#include "qemu-common.h"
+
+/* allow to see translation results - the slowdown should be negligible, so we leave it */
+#define DEBUG_DISAS
+
+/* Page tracking code uses ram addresses in system mode, and virtual
+ addresses in userspace mode. Define tb_page_addr_t to be an appropriate
+ type. */
+#if defined(CONFIG_USER_ONLY)
+typedef abi_ulong tb_page_addr_t;
+#else
+typedef ram_addr_t tb_page_addr_t;
+#endif
+
+/* is_jmp field values */
+#define DISAS_NEXT 0 /* next instruction can be analyzed */
+#define DISAS_JUMP 1 /* only pc was modified dynamically */
+#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
+#define DISAS_TB_JUMP 3 /* only pc was modified statically */
+
+struct TranslationBlock;
+typedef struct TranslationBlock TranslationBlock;
+
+/* XXX: make safe guess about sizes */
+#define MAX_OP_PER_INSTR 266
+
+#if HOST_LONG_BITS == 32
+#define MAX_OPC_PARAM_PER_ARG 2
+#else
+#define MAX_OPC_PARAM_PER_ARG 1
+#endif
+#define MAX_OPC_PARAM_IARGS 5
+#define MAX_OPC_PARAM_OARGS 1
+#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
+
+/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
+ * and up to 4 + N parameters on 64-bit archs
+ * (N = number of input arguments + output arguments). */
+#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
+#define OPC_BUF_SIZE 640
+#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
+
+/* Maximum size a TCG op can expand to. This is complicated because a
+ single op may require several host instructions and register reloads.
+ For now take a wild guess at 192 bytes, which should allow at least
+ a couple of fixup instructions per argument. */
+#define TCG_MAX_OP_SIZE 192
+
+#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
+
+#include "qemu/log.h"
+
+void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
+void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
+void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
+ int pc_pos);
+
+void cpu_gen_init(void);
+int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
+ int *gen_code_size_ptr);
+bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
+void page_size_init(void);
+
+void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
+void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
+TranslationBlock *tb_gen_code(CPUState *cpu,
+ target_ulong pc, target_ulong cs_base, int flags,
+ int cflags);
+void cpu_exec_init(CPUState *cpu, Error **errp);
+void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
+
+#if !defined(CONFIG_USER_ONLY)
+bool qemu_in_vcpu_thread(void);
+void cpu_reload_memory_map(CPUState *cpu);
+void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
+/* cputlb.c */
+void tlb_flush_page(CPUState *cpu, target_ulong addr);
+void tlb_flush(CPUState *cpu, int flush_global);
+void tlb_set_page(CPUState *cpu, target_ulong vaddr,
+ hwaddr paddr, int prot,
+ int mmu_idx, target_ulong size);
+void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
+ hwaddr paddr, MemTxAttrs attrs,
+ int prot, int mmu_idx, target_ulong size);
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
+void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
+ uintptr_t retaddr);
+#else
+static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
+{
+}
+
+static inline void tlb_flush(CPUState *cpu, int flush_global)
+{
+}
+#endif
+
+#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
+
+#define CODE_GEN_PHYS_HASH_BITS 15
+#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
+
+/* estimated block size for TB allocation */
+/* XXX: use a per code average code fragment size and modulate it
+ according to the host CPU */
+#if defined(CONFIG_SOFTMMU)
+#define CODE_GEN_AVG_BLOCK_SIZE 128
+#else
+#define CODE_GEN_AVG_BLOCK_SIZE 64
+#endif
+
+#if defined(__arm__) || defined(_ARCH_PPC) \
+ || defined(__x86_64__) || defined(__i386__) \
+ || defined(__sparc__) || defined(__aarch64__) \
+ || defined(__s390x__) || defined(__mips__) \
+ || defined(CONFIG_TCG_INTERPRETER)
+#define USE_DIRECT_JUMP
+#endif
+
+struct TranslationBlock {
+ target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
+ target_ulong cs_base; /* CS base for this block */
+ uint64_t flags; /* flags defining in which context the code was generated */
+ uint16_t size; /* size of target code for this block (1 <=
+ size <= TARGET_PAGE_SIZE) */
+ uint16_t icount;
+ uint32_t cflags; /* compile flags */
+#define CF_COUNT_MASK 0x7fff
+#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
+#define CF_NOCACHE 0x10000 /* To be freed after execution */
+#define CF_USE_ICOUNT 0x20000
+
+ void *tc_ptr; /* pointer to the translated code */
+ /* next matching tb for physical address. */
+ struct TranslationBlock *phys_hash_next;
+ /* first and second physical page containing code. The lower bit
+ of the pointer tells the index in page_next[] */
+ struct TranslationBlock *page_next[2];
+ tb_page_addr_t page_addr[2];
+
+ /* the following data are used to directly call another TB from
+ the code of this one. */
+ uint16_t tb_next_offset[2]; /* offset of original jump target */
+#ifdef USE_DIRECT_JUMP
+ uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
+#else
+ uintptr_t tb_next[2]; /* address of jump generated code */
+#endif
+ /* list of TBs jumping to this one. This is a circular list using
+ the two least significant bits of the pointers to tell what is
+ the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
+ jmp_first */
+ struct TranslationBlock *jmp_next[2];
+ struct TranslationBlock *jmp_first;
+};
+
+#include "exec/spinlock.h"
+
+typedef struct TBContext TBContext;
+
+struct TBContext {
+
+ TranslationBlock *tbs;
+ TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
+ int nb_tbs;
+ /* any access to the tbs or the page table must use this lock */
+ spinlock_t tb_lock;
+
+ /* statistics */
+ int tb_flush_count;
+ int tb_phys_invalidate_count;
+
+ int tb_invalidated_flag;
+};
+
+void tb_free(TranslationBlock *tb);
+void tb_flush(CPUState *cpu);
+void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
+
+#if defined(USE_DIRECT_JUMP)
+
+#if defined(CONFIG_TCG_INTERPRETER)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+ /* patch the branch destination */
+ *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
+ /* no need to flush icache explicitly */
+}
+#elif defined(_ARCH_PPC)
+void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
+#define tb_set_jmp_target1 ppc_tb_set_jmp_target
+#elif defined(__i386__) || defined(__x86_64__)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+ /* patch the branch destination */
+ stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
+ /* no need to flush icache explicitly */
+}
+#elif defined(__s390x__)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+ /* patch the branch destination */
+ intptr_t disp = addr - (jmp_addr - 2);
+ stl_be_p((void*)jmp_addr, disp / 2);
+ /* no need to flush icache explicitly */
+}
+#elif defined(__aarch64__)
+void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
+#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
+#elif defined(__arm__)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+#if !QEMU_GNUC_PREREQ(4, 1)
+ register unsigned long _beg __asm ("a1");
+ register unsigned long _end __asm ("a2");
+ register unsigned long _flg __asm ("a3");
+#endif
+
+ /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
+ *(uint32_t *)jmp_addr =
+ (*(uint32_t *)jmp_addr & ~0xffffff)
+ | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
+
+#if QEMU_GNUC_PREREQ(4, 1)
+ __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
+#else
+ /* flush icache */
+ _beg = jmp_addr;
+ _end = jmp_addr + 4;
+ _flg = 0;
+ __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
+#endif
+}
+#elif defined(__sparc__) || defined(__mips__)
+void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
+#else
+#error tb_set_jmp_target1 is missing
+#endif
+
+static inline void tb_set_jmp_target(TranslationBlock *tb,
+ int n, uintptr_t addr)
+{
+ uint16_t offset = tb->tb_jmp_offset[n];
+ tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
+}
+
+#else
+
+/* set the jump target */
+static inline void tb_set_jmp_target(TranslationBlock *tb,
+ int n, uintptr_t addr)
+{
+ tb->tb_next[n] = addr;
+}
+
+#endif
+
+static inline void tb_add_jump(TranslationBlock *tb, int n,
+ TranslationBlock *tb_next)
+{
+ /* NOTE: this test is only needed for thread safety */
+ if (!tb->jmp_next[n]) {
+ /* patch the native jump address */
+ tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
+
+ /* add in TB jmp circular list */
+ tb->jmp_next[n] = tb_next->jmp_first;
+ tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
+ }
+}
+
+/* GETRA is the true target of the return instruction that we'll execute,
+ defined here for simplicity of defining the follow-up macros. */
+#if defined(CONFIG_TCG_INTERPRETER)
+extern uintptr_t tci_tb_ptr;
+# define GETRA() tci_tb_ptr
+#else
+# define GETRA() \
+ ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
+#endif
+
+/* The true return address will often point to a host insn that is part of
+ the next translated guest insn. Adjust the address backward to point to
+ the middle of the call insn. Subtracting one would do the job except for
+ several compressed mode architectures (arm, mips) which set the low bit
+ to indicate the compressed mode; subtracting two works around that. It
+ is also the case that there are no host isas that contain a call insn
+ smaller than 4 bytes, so we don't worry about special-casing this. */
+#define GETPC_ADJ 2
+
+#define GETPC() (GETRA() - GETPC_ADJ)
+
+#if !defined(CONFIG_USER_ONLY)
+
+void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align));
+
+struct MemoryRegion *iotlb_to_region(CPUState *cpu,
+ hwaddr index);
+
+void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
+ uintptr_t retaddr);
+
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
+{
+ return addr;
+}
+#else
+/* cputlb.c */
+tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
+#endif
+
+/* vl.c */
+extern int singlestep;
+
+/* cpu-exec.c */
+extern volatile sig_atomic_t exit_request;
+
+/**
+ * cpu_can_do_io:
+ * @cpu: The CPU for which to check IO.
+ *
+ * Deterministic execution requires that IO only be performed on the last
+ * instruction of a TB so that interrupts take effect immediately.
+ *
+ * Returns: %true if memory-mapped IO is safe, %false otherwise.
+ */
+static inline bool cpu_can_do_io(CPUState *cpu)
+{
+ if (!use_icount) {
+ return true;
+ }
+ /* If not executing code then assume we are ok. */
+ if (cpu->current_tb == NULL) {
+ return true;
+ }
+ return cpu->can_do_io != 0;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
+#endif
+#endif
diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h
new file mode 100644
index 00000000..05f57c24
--- /dev/null
+++ b/include/exec/gdbstub.h
@@ -0,0 +1,98 @@
+#ifndef GDBSTUB_H
+#define GDBSTUB_H
+
+#define DEFAULT_GDBSTUB_PORT "1234"
+
+/* GDB breakpoint/watchpoint types */
+#define GDB_BREAKPOINT_SW 0
+#define GDB_BREAKPOINT_HW 1
+#define GDB_WATCHPOINT_WRITE 2
+#define GDB_WATCHPOINT_READ 3
+#define GDB_WATCHPOINT_ACCESS 4
+
+#ifdef NEED_CPU_H
+typedef void (*gdb_syscall_complete_cb)(CPUState *cpu,
+ target_ulong ret, target_ulong err);
+
+void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
+int use_gdb_syscalls(void);
+void gdb_set_stop_cpu(CPUState *cpu);
+void gdb_exit(CPUArchState *, int);
+#ifdef CONFIG_USER_ONLY
+int gdb_queuesig (void);
+int gdb_handlesig(CPUState *, int);
+void gdb_signalled(CPUArchState *, int);
+void gdbserver_fork(CPUState *);
+#endif
+/* Get or set a register. Returns the size of the register. */
+typedef int (*gdb_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
+void gdb_register_coprocessor(CPUState *cpu,
+ gdb_reg_cb get_reg, gdb_reg_cb set_reg,
+ int num_regs, const char *xml, int g_pos);
+
+static inline int cpu_index(CPUState *cpu)
+{
+#if defined(CONFIG_USER_ONLY)
+ return cpu->host_tid;
+#else
+ return cpu->cpu_index + 1;
+#endif
+}
+
+/* The GDB remote protocol transfers values in target byte order. This means
+ * we can use the raw memory access routines to access the value buffer.
+ * Conveniently, these also handle the case where the buffer is mis-aligned.
+ */
+
+static inline int gdb_get_reg8(uint8_t *mem_buf, uint8_t val)
+{
+ stb_p(mem_buf, val);
+ return 1;
+}
+
+static inline int gdb_get_reg16(uint8_t *mem_buf, uint16_t val)
+{
+ stw_p(mem_buf, val);
+ return 2;
+}
+
+static inline int gdb_get_reg32(uint8_t *mem_buf, uint32_t val)
+{
+ stl_p(mem_buf, val);
+ return 4;
+}
+
+static inline int gdb_get_reg64(uint8_t *mem_buf, uint64_t val)
+{
+ stq_p(mem_buf, val);
+ return 8;
+}
+
+#if TARGET_LONG_BITS == 64
+#define gdb_get_regl(buf, val) gdb_get_reg64(buf, val)
+#define ldtul_p(addr) ldq_p(addr)
+#else
+#define gdb_get_regl(buf, val) gdb_get_reg32(buf, val)
+#define ldtul_p(addr) ldl_p(addr)
+#endif
+
+#endif
+
+#ifdef CONFIG_USER_ONLY
+int gdbserver_start(int);
+#else
+int gdbserver_start(const char *port);
+#endif
+
+/**
+ * gdb_has_xml:
+ * This is an ugly hack to cope with both new and old gdb.
+ * If gdb sends qXfer:features:read then assume we're talking to a newish
+ * gdb that understands target descriptions.
+ */
+extern bool gdb_has_xml;
+
+/* in gdbstub-xml.c, generated by scripts/feature_to_c.sh */
+extern const char *const xml_builtin[][2];
+
+#endif
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
new file mode 100644
index 00000000..05d89d35
--- /dev/null
+++ b/include/exec/gen-icount.h
@@ -0,0 +1,79 @@
+#ifndef GEN_ICOUNT_H
+#define GEN_ICOUNT_H 1
+
+#include "qemu/timer.h"
+
+/* Helpers for instruction counting code generation. */
+
+static TCGArg *icount_arg;
+static TCGLabel *icount_label;
+static TCGLabel *exitreq_label;
+
+static inline void gen_tb_start(TranslationBlock *tb)
+{
+ TCGv_i32 count, flag, imm;
+ int i;
+
+ exitreq_label = gen_new_label();
+ flag = tcg_temp_new_i32();
+ tcg_gen_ld_i32(flag, cpu_env,
+ offsetof(CPUState, tcg_exit_req) - ENV_OFFSET);
+ tcg_gen_brcondi_i32(TCG_COND_NE, flag, 0, exitreq_label);
+ tcg_temp_free_i32(flag);
+
+ if (!(tb->cflags & CF_USE_ICOUNT)) {
+ return;
+ }
+
+ icount_label = gen_new_label();
+ count = tcg_temp_local_new_i32();
+ tcg_gen_ld_i32(count, cpu_env,
+ -ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
+
+ imm = tcg_temp_new_i32();
+ tcg_gen_movi_i32(imm, 0xdeadbeef);
+
+ /* This is a horrid hack to allow fixing up the value later. */
+ i = tcg_ctx.gen_last_op_idx;
+ i = tcg_ctx.gen_op_buf[i].args;
+ icount_arg = &tcg_ctx.gen_opparam_buf[i + 1];
+
+ tcg_gen_sub_i32(count, count, imm);
+ tcg_temp_free_i32(imm);
+
+ tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
+ tcg_gen_st16_i32(count, cpu_env,
+ -ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
+ tcg_temp_free_i32(count);
+}
+
+static void gen_tb_end(TranslationBlock *tb, int num_insns)
+{
+ gen_set_label(exitreq_label);
+ tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED);
+
+ if (tb->cflags & CF_USE_ICOUNT) {
+ *icount_arg = num_insns;
+ gen_set_label(icount_label);
+ tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED);
+ }
+
+ /* Terminate the linked list. */
+ tcg_ctx.gen_op_buf[tcg_ctx.gen_last_op_idx].next = -1;
+}
+
+static inline void gen_io_start(void)
+{
+ TCGv_i32 tmp = tcg_const_i32(1);
+ tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
+ tcg_temp_free_i32(tmp);
+}
+
+static inline void gen_io_end(void)
+{
+ TCGv_i32 tmp = tcg_const_i32(0);
+ tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
+ tcg_temp_free_i32(tmp);
+}
+
+#endif
diff --git a/include/exec/helper-gen.h b/include/exec/helper-gen.h
new file mode 100644
index 00000000..0d0da3ae
--- /dev/null
+++ b/include/exec/helper-gen.h
@@ -0,0 +1,72 @@
+/* Helper file for declaring TCG helper functions.
+ This one expands generation functions for tcg opcodes. */
+
+#ifndef HELPER_GEN_H
+#define HELPER_GEN_H 1
+
+#include <exec/helper-head.h>
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
+{ \
+ tcg_gen_callN(&tcg_ctx, HELPER(name), dh_retvar(ret), 0, NULL); \
+}
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1)) \
+{ \
+ TCGArg args[1] = { dh_arg(t1, 1) }; \
+ tcg_gen_callN(&tcg_ctx, HELPER(name), dh_retvar(ret), 1, args); \
+}
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
+{ \
+ TCGArg args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \
+ tcg_gen_callN(&tcg_ctx, HELPER(name), dh_retvar(ret), 2, args); \
+}
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
+{ \
+ TCGArg args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \
+ tcg_gen_callN(&tcg_ctx, HELPER(name), dh_retvar(ret), 3, args); \
+}
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
+ dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
+{ \
+ TCGArg args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \
+ dh_arg(t3, 3), dh_arg(t4, 4) }; \
+ tcg_gen_callN(&tcg_ctx, HELPER(name), dh_retvar(ret), 4, args); \
+}
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
+{ \
+ TCGArg args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5) }; \
+ tcg_gen_callN(&tcg_ctx, HELPER(name), dh_retvar(ret), 5, args); \
+}
+
+#include "helper.h"
+#include "trace/generated-helpers.h"
+#include "trace/generated-helpers-wrappers.h"
+#include "tcg-runtime.h"
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef GEN_HELPER
+
+#endif /* HELPER_GEN_H */
diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h
new file mode 100644
index 00000000..b009ccb1
--- /dev/null
+++ b/include/exec/helper-head.h
@@ -0,0 +1,134 @@
+/* Helper file for declaring TCG helper functions.
+ Used by other helper files.
+
+ Targets should use DEF_HELPER_N and DEF_HELPER_FLAGS_N to declare helper
+ functions. Names should be specified without the helper_ prefix, and
+ the return and argument types specified. 3 basic types are understood
+ (i32, i64 and ptr). Additional aliases are provided for convenience and
+ to match the types used by the C helper implementation.
+
+ The target helper.h should be included in all files that use/define
+ helper functions. THis will ensure that function prototypes are
+ consistent. In addition it should be included an extra two times for
+ helper.c, defining:
+ GEN_HELPER 1 to produce op generation functions (gen_helper_*)
+ GEN_HELPER 2 to do runtime registration helper functions.
+ */
+
+#ifndef DEF_HELPER_H
+#define DEF_HELPER_H 1
+
+#include "qemu/osdep.h"
+
+#define HELPER(name) glue(helper_, name)
+
+#define GET_TCGV_i32 GET_TCGV_I32
+#define GET_TCGV_i64 GET_TCGV_I64
+#define GET_TCGV_ptr GET_TCGV_PTR
+
+/* Some types that make sense in C, but not for TCG. */
+#define dh_alias_i32 i32
+#define dh_alias_s32 i32
+#define dh_alias_int i32
+#define dh_alias_i64 i64
+#define dh_alias_s64 i64
+#define dh_alias_f32 i32
+#define dh_alias_f64 i64
+#ifdef TARGET_LONG_BITS
+# if TARGET_LONG_BITS == 32
+# define dh_alias_tl i32
+# else
+# define dh_alias_tl i64
+# endif
+#endif
+#define dh_alias_ptr ptr
+#define dh_alias_void void
+#define dh_alias_noreturn noreturn
+#define dh_alias_env ptr
+#define dh_alias(t) glue(dh_alias_, t)
+
+#define dh_ctype_i32 uint32_t
+#define dh_ctype_s32 int32_t
+#define dh_ctype_int int
+#define dh_ctype_i64 uint64_t
+#define dh_ctype_s64 int64_t
+#define dh_ctype_f32 float32
+#define dh_ctype_f64 float64
+#define dh_ctype_tl target_ulong
+#define dh_ctype_ptr void *
+#define dh_ctype_void void
+#define dh_ctype_noreturn void QEMU_NORETURN
+#define dh_ctype_env CPUArchState *
+#define dh_ctype(t) dh_ctype_##t
+
+/* We can't use glue() here because it falls foul of C preprocessor
+ recursive expansion rules. */
+#define dh_retvar_decl0_void void
+#define dh_retvar_decl0_noreturn void
+#define dh_retvar_decl0_i32 TCGv_i32 retval
+#define dh_retvar_decl0_i64 TCGv_i64 retval
+#define dh_retvar_decl0_ptr TCGv_ptr retval
+#define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t))
+
+#define dh_retvar_decl_void
+#define dh_retvar_decl_noreturn
+#define dh_retvar_decl_i32 TCGv_i32 retval,
+#define dh_retvar_decl_i64 TCGv_i64 retval,
+#define dh_retvar_decl_ptr TCGv_ptr retval,
+#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t))
+
+#define dh_retvar_void TCG_CALL_DUMMY_ARG
+#define dh_retvar_noreturn TCG_CALL_DUMMY_ARG
+#define dh_retvar_i32 GET_TCGV_i32(retval)
+#define dh_retvar_i64 GET_TCGV_i64(retval)
+#define dh_retvar_ptr GET_TCGV_ptr(retval)
+#define dh_retvar(t) glue(dh_retvar_, dh_alias(t))
+
+#define dh_is_64bit_void 0
+#define dh_is_64bit_noreturn 0
+#define dh_is_64bit_i32 0
+#define dh_is_64bit_i64 1
+#define dh_is_64bit_ptr (sizeof(void *) == 8)
+#define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t))
+
+#define dh_is_signed_void 0
+#define dh_is_signed_noreturn 0
+#define dh_is_signed_i32 0
+#define dh_is_signed_s32 1
+#define dh_is_signed_i64 0
+#define dh_is_signed_s64 1
+#define dh_is_signed_f32 0
+#define dh_is_signed_f64 0
+#define dh_is_signed_tl 0
+#define dh_is_signed_int 1
+/* ??? This is highly specific to the host cpu. There are even special
+ extension instructions that may be required, e.g. ia64's addp4. But
+ for now we don't support any 64-bit targets with 32-bit pointers. */
+#define dh_is_signed_ptr 0
+#define dh_is_signed_env dh_is_signed_ptr
+#define dh_is_signed(t) dh_is_signed_##t
+
+#define dh_sizemask(t, n) \
+ ((dh_is_64bit(t) << (n*2)) | (dh_is_signed(t) << (n*2+1)))
+
+#define dh_arg(t, n) \
+ glue(GET_TCGV_, dh_alias(t))(glue(arg, n))
+
+#define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n)
+
+#define DEF_HELPER_0(name, ret) \
+ DEF_HELPER_FLAGS_0(name, 0, ret)
+#define DEF_HELPER_1(name, ret, t1) \
+ DEF_HELPER_FLAGS_1(name, 0, ret, t1)
+#define DEF_HELPER_2(name, ret, t1, t2) \
+ DEF_HELPER_FLAGS_2(name, 0, ret, t1, t2)
+#define DEF_HELPER_3(name, ret, t1, t2, t3) \
+ DEF_HELPER_FLAGS_3(name, 0, ret, t1, t2, t3)
+#define DEF_HELPER_4(name, ret, t1, t2, t3, t4) \
+ DEF_HELPER_FLAGS_4(name, 0, ret, t1, t2, t3, t4)
+#define DEF_HELPER_5(name, ret, t1, t2, t3, t4, t5) \
+ DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5)
+
+/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
+
+#endif /* DEF_HELPER_H */
diff --git a/include/exec/helper-proto.h b/include/exec/helper-proto.h
new file mode 100644
index 00000000..effdd438
--- /dev/null
+++ b/include/exec/helper-proto.h
@@ -0,0 +1,40 @@
+/* Helper file for declaring TCG helper functions.
+ This one expands prototypes for the helper functions. */
+
+#ifndef HELPER_PROTO_H
+#define HELPER_PROTO_H 1
+
+#include <exec/helper-head.h>
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+dh_ctype(ret) HELPER(name) (void);
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1));
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2));
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3));
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4));
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5));
+
+#include "helper.h"
+#include "trace/generated-helpers.h"
+#include "tcg-runtime.h"
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+
+#endif /* HELPER_PROTO_H */
diff --git a/include/exec/helper-tcg.h b/include/exec/helper-tcg.h
new file mode 100644
index 00000000..79fa3c8c
--- /dev/null
+++ b/include/exec/helper-tcg.h
@@ -0,0 +1,49 @@
+/* Helper file for declaring TCG helper functions.
+ This one defines data structures private to tcg.c. */
+
+#ifndef HELPER_TCG_H
+#define HELPER_TCG_H 1
+
+#include <exec/helper-head.h>
+
+#define DEF_HELPER_FLAGS_0(NAME, FLAGS, ret) \
+ { .func = HELPER(NAME), .name = #NAME, .flags = FLAGS, \
+ .sizemask = dh_sizemask(ret, 0) },
+
+#define DEF_HELPER_FLAGS_1(NAME, FLAGS, ret, t1) \
+ { .func = HELPER(NAME), .name = #NAME, .flags = FLAGS, \
+ .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) },
+
+#define DEF_HELPER_FLAGS_2(NAME, FLAGS, ret, t1, t2) \
+ { .func = HELPER(NAME), .name = #NAME, .flags = FLAGS, \
+ .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
+ | dh_sizemask(t2, 2) },
+
+#define DEF_HELPER_FLAGS_3(NAME, FLAGS, ret, t1, t2, t3) \
+ { .func = HELPER(NAME), .name = #NAME, .flags = FLAGS, \
+ .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
+ | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) },
+
+#define DEF_HELPER_FLAGS_4(NAME, FLAGS, ret, t1, t2, t3, t4) \
+ { .func = HELPER(NAME), .name = #NAME, .flags = FLAGS, \
+ .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
+ | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) },
+
+#define DEF_HELPER_FLAGS_5(NAME, FLAGS, ret, t1, t2, t3, t4, t5) \
+ { .func = HELPER(NAME), .name = #NAME, .flags = FLAGS, \
+ .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
+ | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \
+ | dh_sizemask(t5, 5) },
+
+#include "helper.h"
+#include "trace/generated-helpers.h"
+#include "tcg-runtime.h"
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+
+#endif /* HELPER_TCG_H */
diff --git a/include/exec/hwaddr.h b/include/exec/hwaddr.h
new file mode 100644
index 00000000..c9eb78fb
--- /dev/null
+++ b/include/exec/hwaddr.h
@@ -0,0 +1,20 @@
+/* Define hwaddr if it exists. */
+
+#ifndef HWADDR_H
+#define HWADDR_H
+
+#define HWADDR_BITS 64
+/* hwaddr is the type of a physical address (its size can
+ be different from 'target_ulong'). */
+
+typedef uint64_t hwaddr;
+#define HWADDR_MAX UINT64_MAX
+#define TARGET_FMT_plx "%016" PRIx64
+#define HWADDR_PRId PRId64
+#define HWADDR_PRIi PRIi64
+#define HWADDR_PRIo PRIo64
+#define HWADDR_PRIu PRIu64
+#define HWADDR_PRIx PRIx64
+#define HWADDR_PRIX PRIX64
+
+#endif
diff --git a/include/exec/ioport.h b/include/exec/ioport.h
new file mode 100644
index 00000000..3bd67226
--- /dev/null
+++ b/include/exec/ioport.h
@@ -0,0 +1,80 @@
+/*
+ * defines ioport related functions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**************************************************************************
+ * IO ports API
+ */
+
+#ifndef IOPORT_H
+#define IOPORT_H
+
+#include "qemu-common.h"
+#include "qom/object.h"
+#include "exec/memory.h"
+
+typedef uint32_t pio_addr_t;
+#define FMT_pioaddr PRIx32
+
+#define MAX_IOPORTS (64 * 1024)
+#define IOPORTS_MASK (MAX_IOPORTS - 1)
+
+typedef struct MemoryRegionPortio {
+ uint32_t offset;
+ uint32_t len;
+ unsigned size;
+ uint32_t (*read)(void *opaque, uint32_t address);
+ void (*write)(void *opaque, uint32_t address, uint32_t data);
+ uint32_t base; /* private field */
+} MemoryRegionPortio;
+
+#define PORTIO_END_OF_LIST() { }
+
+#ifndef CONFIG_USER_ONLY
+extern const MemoryRegionOps unassigned_io_ops;
+#endif
+
+void cpu_outb(pio_addr_t addr, uint8_t val);
+void cpu_outw(pio_addr_t addr, uint16_t val);
+void cpu_outl(pio_addr_t addr, uint32_t val);
+uint8_t cpu_inb(pio_addr_t addr);
+uint16_t cpu_inw(pio_addr_t addr);
+uint32_t cpu_inl(pio_addr_t addr);
+
+typedef struct PortioList {
+ const struct MemoryRegionPortio *ports;
+ Object *owner;
+ struct MemoryRegion *address_space;
+ unsigned nr;
+ struct MemoryRegion **regions;
+ void *opaque;
+ const char *name;
+ bool flush_coalesced_mmio;
+} PortioList;
+
+void portio_list_init(PortioList *piolist, Object *owner,
+ const struct MemoryRegionPortio *callbacks,
+ void *opaque, const char *name);
+void portio_list_set_flush_coalesced(PortioList *piolist);
+void portio_list_destroy(PortioList *piolist);
+void portio_list_add(PortioList *piolist,
+ struct MemoryRegion *address_space,
+ uint32_t addr);
+void portio_list_del(PortioList *piolist);
+
+#endif /* IOPORT_H */
diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
new file mode 100644
index 00000000..f8537a8d
--- /dev/null
+++ b/include/exec/memattrs.h
@@ -0,0 +1,49 @@
+/*
+ * Memory transaction attributes
+ *
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * Authors:
+ * Peter Maydell <peter.maydell@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMATTRS_H
+#define MEMATTRS_H
+
+/* Every memory transaction has associated with it a set of
+ * attributes. Some of these are generic (such as the ID of
+ * the bus master); some are specific to a particular kind of
+ * bus (such as the ARM Secure/NonSecure bit). We define them
+ * all as non-overlapping bitfields in a single struct to avoid
+ * confusion if different parts of QEMU used the same bit for
+ * different semantics.
+ */
+typedef struct MemTxAttrs {
+ /* Bus masters which don't specify any attributes will get this
+ * (via the MEMTXATTRS_UNSPECIFIED constant), so that we can
+ * distinguish "all attributes deliberately clear" from
+ * "didn't specify" if necessary.
+ */
+ unsigned int unspecified:1;
+ /* ARM/AMBA: TrustZone Secure access
+ * x86: System Management Mode access
+ */
+ unsigned int secure:1;
+ /* Memory access is usermode (unprivileged) */
+ unsigned int user:1;
+ /* Stream ID (for MSI for example) */
+ unsigned int stream_id:16;
+} MemTxAttrs;
+
+/* Bus masters which don't specify any attributes will get this,
+ * which has all attribute bits clear except the topmost one
+ * (so that we can distinguish "all attributes deliberately clear"
+ * from "didn't specify" if necessary).
+ */
+#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 })
+
+#endif
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
new file mode 100644
index 00000000..fb467acd
--- /dev/null
+++ b/include/exec/memory-internal.h
@@ -0,0 +1,35 @@
+/*
+ * Declarations for obsolete exec.c functions
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * This header is for use by exec.c and memory.c ONLY. Do not include it.
+ * The functions declared here will be removed soon.
+ */
+
+#ifndef MEMORY_INTERNAL_H
+#define MEMORY_INTERNAL_H
+
+#ifndef CONFIG_USER_ONLY
+typedef struct AddressSpaceDispatch AddressSpaceDispatch;
+
+void address_space_init_dispatch(AddressSpace *as);
+void address_space_unregister(AddressSpace *as);
+void address_space_destroy_dispatch(AddressSpace *as);
+
+extern const MemoryRegionOps unassigned_mem_ops;
+
+bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
+ unsigned size, bool is_write);
+
+#endif
+#endif
diff --git a/include/exec/memory.h b/include/exec/memory.h
new file mode 100644
index 00000000..94d20eae
--- /dev/null
+++ b/include/exec/memory.h
@@ -0,0 +1,1340 @@
+/*
+ * Physical memory management API
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMORY_H
+#define MEMORY_H
+
+#ifndef CONFIG_USER_ONLY
+
+#define DIRTY_MEMORY_VGA 0
+#define DIRTY_MEMORY_CODE 1
+#define DIRTY_MEMORY_MIGRATION 2
+#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "exec/cpu-common.h"
+#ifndef CONFIG_USER_ONLY
+#include "exec/hwaddr.h"
+#endif
+#include "exec/memattrs.h"
+#include "qemu/queue.h"
+#include "qemu/int128.h"
+#include "qemu/notify.h"
+#include "qapi/error.h"
+#include "qom/object.h"
+#include "qemu/rcu.h"
+
+#define MAX_PHYS_ADDR_SPACE_BITS 62
+#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
+
+#define TYPE_MEMORY_REGION "qemu:memory-region"
+#define MEMORY_REGION(obj) \
+ OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
+
+typedef struct MemoryRegionOps MemoryRegionOps;
+typedef struct MemoryRegionMmio MemoryRegionMmio;
+
+struct MemoryRegionMmio {
+ CPUReadMemoryFunc *read[3];
+ CPUWriteMemoryFunc *write[3];
+};
+
+typedef struct IOMMUTLBEntry IOMMUTLBEntry;
+
+/* See address_space_translate: bit 0 is read, bit 1 is write. */
+typedef enum {
+ IOMMU_NONE = 0,
+ IOMMU_RO = 1,
+ IOMMU_WO = 2,
+ IOMMU_RW = 3,
+} IOMMUAccessFlags;
+
+struct IOMMUTLBEntry {
+ AddressSpace *target_as;
+ hwaddr iova;
+ hwaddr translated_addr;
+ hwaddr addr_mask; /* 0xfff = 4k translation */
+ IOMMUAccessFlags perm;
+};
+
+/* New-style MMIO accessors can indicate that the transaction failed.
+ * A zero (MEMTX_OK) response means success; anything else is a failure
+ * of some kind. The memory subsystem will bitwise-OR together results
+ * if it is synthesizing an operation from multiple smaller accesses.
+ */
+#define MEMTX_OK 0
+#define MEMTX_ERROR (1U << 0) /* device returned an error */
+#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
+typedef uint32_t MemTxResult;
+
+/*
+ * Memory region callbacks
+ */
+struct MemoryRegionOps {
+ /* Read from the memory region. @addr is relative to @mr; @size is
+ * in bytes. */
+ uint64_t (*read)(void *opaque,
+ hwaddr addr,
+ unsigned size);
+ /* Write to the memory region. @addr is relative to @mr; @size is
+ * in bytes. */
+ void (*write)(void *opaque,
+ hwaddr addr,
+ uint64_t data,
+ unsigned size);
+
+ MemTxResult (*read_with_attrs)(void *opaque,
+ hwaddr addr,
+ uint64_t *data,
+ unsigned size,
+ MemTxAttrs attrs);
+ MemTxResult (*write_with_attrs)(void *opaque,
+ hwaddr addr,
+ uint64_t data,
+ unsigned size,
+ MemTxAttrs attrs);
+
+ enum device_endian endianness;
+ /* Guest-visible constraints: */
+ struct {
+ /* If nonzero, specify bounds on access sizes beyond which a machine
+ * check is thrown.
+ */
+ unsigned min_access_size;
+ unsigned max_access_size;
+ /* If true, unaligned accesses are supported. Otherwise unaligned
+ * accesses throw machine checks.
+ */
+ bool unaligned;
+ /*
+ * If present, and returns #false, the transaction is not accepted
+ * by the device (and results in machine dependent behaviour such
+ * as a machine check exception).
+ */
+ bool (*accepts)(void *opaque, hwaddr addr,
+ unsigned size, bool is_write);
+ } valid;
+ /* Internal implementation constraints: */
+ struct {
+ /* If nonzero, specifies the minimum size implemented. Smaller sizes
+ * will be rounded upwards and a partial result will be returned.
+ */
+ unsigned min_access_size;
+ /* If nonzero, specifies the maximum size implemented. Larger sizes
+ * will be done as a series of accesses with smaller sizes.
+ */
+ unsigned max_access_size;
+ /* If true, unaligned accesses are supported. Otherwise all accesses
+ * are converted to (possibly multiple) naturally aligned accesses.
+ */
+ bool unaligned;
+ } impl;
+
+ /* If .read and .write are not present, old_mmio may be used for
+ * backwards compatibility with old mmio registration
+ */
+ const MemoryRegionMmio old_mmio;
+};
+
+typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps;
+
+struct MemoryRegionIOMMUOps {
+ /* Return a TLB entry that contains a given address. */
+ IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write);
+};
+
+typedef struct CoalescedMemoryRange CoalescedMemoryRange;
+typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
+
+struct MemoryRegion {
+ Object parent_obj;
+ /* All fields are private - violators will be prosecuted */
+ const MemoryRegionOps *ops;
+ const MemoryRegionIOMMUOps *iommu_ops;
+ void *opaque;
+ MemoryRegion *container;
+ Int128 size;
+ hwaddr addr;
+ void (*destructor)(MemoryRegion *mr);
+ ram_addr_t ram_addr;
+ uint64_t align;
+ bool subpage;
+ bool terminates;
+ bool romd_mode;
+ bool ram;
+ bool skip_dump;
+ bool readonly; /* For RAM regions */
+ bool enabled;
+ bool rom_device;
+ bool warning_printed; /* For reservations */
+ bool flush_coalesced_mmio;
+ bool global_locking;
+ uint8_t vga_logging_count;
+ MemoryRegion *alias;
+ hwaddr alias_offset;
+ int32_t priority;
+ bool may_overlap;
+ QTAILQ_HEAD(subregions, MemoryRegion) subregions;
+ QTAILQ_ENTRY(MemoryRegion) subregions_link;
+ QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
+ const char *name;
+ uint8_t dirty_log_mask;
+ unsigned ioeventfd_nb;
+ MemoryRegionIoeventfd *ioeventfds;
+ NotifierList iommu_notify;
+};
+
+/**
+ * MemoryListener: callbacks structure for updates to the physical memory map
+ *
+ * Allows a component to adjust to changes in the guest-visible memory map.
+ * Use with memory_listener_register() and memory_listener_unregister().
+ */
+struct MemoryListener {
+ void (*begin)(MemoryListener *listener);
+ void (*commit)(MemoryListener *listener);
+ void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
+ int old, int new);
+ void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
+ int old, int new);
+ void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_global_start)(MemoryListener *listener);
+ void (*log_global_stop)(MemoryListener *listener);
+ void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e);
+ void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e);
+ void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
+ hwaddr addr, hwaddr len);
+ void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
+ hwaddr addr, hwaddr len);
+ /* Lower = earlier (during add), later (during del) */
+ unsigned priority;
+ AddressSpace *address_space_filter;
+ QTAILQ_ENTRY(MemoryListener) link;
+};
+
+/**
+ * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
+ */
+struct AddressSpace {
+ /* All fields are private. */
+ struct rcu_head rcu;
+ char *name;
+ MemoryRegion *root;
+
+ /* Accessed via RCU. */
+ struct FlatView *current_map;
+
+ int ioeventfd_nb;
+ struct MemoryRegionIoeventfd *ioeventfds;
+ struct AddressSpaceDispatch *dispatch;
+ struct AddressSpaceDispatch *next_dispatch;
+ MemoryListener dispatch_listener;
+
+ QTAILQ_ENTRY(AddressSpace) address_spaces_link;
+};
+
+/**
+ * MemoryRegionSection: describes a fragment of a #MemoryRegion
+ *
+ * @mr: the region, or %NULL if empty
+ * @address_space: the address space the region is mapped in
+ * @offset_within_region: the beginning of the section, relative to @mr's start
+ * @size: the size of the section; will not exceed @mr's boundaries
+ * @offset_within_address_space: the address of the first byte of the section
+ * relative to the region's address space
+ * @readonly: writes to this section are ignored
+ */
+struct MemoryRegionSection {
+ MemoryRegion *mr;
+ AddressSpace *address_space;
+ hwaddr offset_within_region;
+ Int128 size;
+ hwaddr offset_within_address_space;
+ bool readonly;
+};
+
+/**
+ * memory_region_init: Initialize a memory region
+ *
+ * The region typically acts as a container for other memory regions. Use
+ * memory_region_add_subregion() to add subregions.
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @owner: the object that tracks the region's reference count
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region; any subregions beyond this size will be clipped
+ */
+void memory_region_init(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_ref: Add 1 to a memory region's reference count
+ *
+ * Whenever memory regions are accessed outside the BQL, they need to be
+ * preserved against hot-unplug. MemoryRegions actually do not have their
+ * own reference count; they piggyback on a QOM object, their "owner".
+ * This function adds a reference to the owner.
+ *
+ * All MemoryRegions must have an owner if they can disappear, even if the
+ * device they belong to operates exclusively under the BQL. This is because
+ * the region could be returned at any time by memory_region_find, and this
+ * is usually under guest control.
+ *
+ * @mr: the #MemoryRegion
+ */
+void memory_region_ref(MemoryRegion *mr);
+
+/**
+ * memory_region_unref: Remove 1 to a memory region's reference count
+ *
+ * Whenever memory regions are accessed outside the BQL, they need to be
+ * preserved against hot-unplug. MemoryRegions actually do not have their
+ * own reference count; they piggyback on a QOM object, their "owner".
+ * This function removes a reference to the owner and possibly destroys it.
+ *
+ * @mr: the #MemoryRegion
+ */
+void memory_region_unref(MemoryRegion *mr);
+
+/**
+ * memory_region_init_io: Initialize an I/O memory region.
+ *
+ * Accesses into the region will cause the callbacks in @ops to be called.
+ * if @size is nonzero, subregions will be clipped to @size.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @ops: a structure containing read and write callbacks to be used when
+ * I/O is performed on the region.
+ * @opaque: passed to to the read and write callbacks of the @ops structure.
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_io(MemoryRegion *mr,
+ struct Object *owner,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_init_ram: Initialize RAM memory region. Accesses into the
+ * region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: the name of the region.
+ * @size: size of the region.
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+void memory_region_init_ram(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+/**
+ * memory_region_init_resizeable_ram: Initialize memory region with resizeable
+ * RAM. Accesses into the region will
+ * modify memory directly. Only an initial
+ * portion of this RAM is actually used.
+ * The used size can change across reboots.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: the name of the region.
+ * @size: used size of the region.
+ * @max_size: max size of the region.
+ * @resized: callback to notify owner about used size change.
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+void memory_region_init_resizeable_ram(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size,
+ uint64_t max_size,
+ void (*resized)(const char*,
+ uint64_t length,
+ void *host),
+ Error **errp);
+#ifdef __linux__
+/**
+ * memory_region_init_ram_from_file: Initialize RAM memory region with a
+ * mmap-ed backend.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: the name of the region.
+ * @size: size of the region.
+ * @share: %true if memory must be mmaped with the MAP_SHARED flag
+ * @path: the path in which to allocate the RAM.
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+void memory_region_init_ram_from_file(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size,
+ bool share,
+ const char *path,
+ Error **errp);
+#endif
+
+/**
+ * memory_region_init_ram_ptr: Initialize RAM memory region from a
+ * user-provided pointer. Accesses into the
+ * region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: the name of the region.
+ * @size: size of the region.
+ * @ptr: memory to be mapped; must contain at least @size bytes.
+ */
+void memory_region_init_ram_ptr(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size,
+ void *ptr);
+
+/**
+ * memory_region_init_alias: Initialize a memory region that aliases all or a
+ * part of another memory region.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: used for debugging; not visible to the user or ABI
+ * @orig: the region to be referenced; @mr will be equivalent to
+ * @orig between @offset and @offset + @size - 1.
+ * @offset: start of the section in @orig to be referenced.
+ * @size: size of the region.
+ */
+void memory_region_init_alias(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ MemoryRegion *orig,
+ hwaddr offset,
+ uint64_t size);
+
+/**
+ * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
+ * handled via callbacks.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @ops: callbacks for write access handling.
+ * @name: the name of the region.
+ * @size: size of the region.
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+void memory_region_init_rom_device(MemoryRegion *mr,
+ struct Object *owner,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+/**
+ * memory_region_init_reservation: Initialize a memory region that reserves
+ * I/O space.
+ *
+ * A reservation region primariy serves debugging purposes. It claims I/O
+ * space that is not supposed to be handled by QEMU itself. Any access via
+ * the memory API will cause an abort().
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @owner: the object that tracks the region's reference count
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_reservation(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_init_iommu: Initialize a memory region that translates
+ * addresses
+ *
+ * An IOMMU region translates addresses and forwards accesses to a target
+ * memory region.
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @owner: the object that tracks the region's reference count
+ * @ops: a function that translates addresses into the @target region
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_iommu(MemoryRegion *mr,
+ struct Object *owner,
+ const MemoryRegionIOMMUOps *ops,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_owner: get a memory region's owner.
+ *
+ * @mr: the memory region being queried.
+ */
+struct Object *memory_region_owner(MemoryRegion *mr);
+
+/**
+ * memory_region_size: get a memory region's size.
+ *
+ * @mr: the memory region being queried.
+ */
+uint64_t memory_region_size(MemoryRegion *mr);
+
+/**
+ * memory_region_is_ram: check whether a memory region is random access
+ *
+ * Returns %true is a memory region is random access.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_ram(MemoryRegion *mr);
+
+/**
+ * memory_region_is_skip_dump: check whether a memory region should not be
+ * dumped
+ *
+ * Returns %true is a memory region should not be dumped(e.g. VFIO BAR MMAP).
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_skip_dump(MemoryRegion *mr);
+
+/**
+ * memory_region_set_skip_dump: Set skip_dump flag, dump will ignore this memory
+ * region
+ *
+ * @mr: the memory region being queried
+ */
+void memory_region_set_skip_dump(MemoryRegion *mr);
+
+/**
+ * memory_region_is_romd: check whether a memory region is in ROMD mode
+ *
+ * Returns %true if a memory region is a ROM device and currently set to allow
+ * direct reads.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_romd(MemoryRegion *mr)
+{
+ return mr->rom_device && mr->romd_mode;
+}
+
+/**
+ * memory_region_is_iommu: check whether a memory region is an iommu
+ *
+ * Returns %true is a memory region is an iommu.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_iommu(MemoryRegion *mr);
+
+/**
+ * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
+ *
+ * @mr: the memory region that was changed
+ * @entry: the new entry in the IOMMU translation table. The entry
+ * replaces all old entries for the same virtual I/O address range.
+ * Deleted entries have .@perm == 0.
+ */
+void memory_region_notify_iommu(MemoryRegion *mr,
+ IOMMUTLBEntry entry);
+
+/**
+ * memory_region_register_iommu_notifier: register a notifier for changes to
+ * IOMMU translation entries.
+ *
+ * @mr: the memory region to observe
+ * @n: the notifier to be added; the notifier receives a pointer to an
+ * #IOMMUTLBEntry as the opaque value; the pointer ceases to be
+ * valid on exit from the notifier.
+ */
+void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
+
+/**
+ * memory_region_unregister_iommu_notifier: unregister a notifier for
+ * changes to IOMMU translation entries.
+ *
+ * @n: the notifier to be removed.
+ */
+void memory_region_unregister_iommu_notifier(Notifier *n);
+
+/**
+ * memory_region_name: get a memory region's name
+ *
+ * Returns the string that was used to initialize the memory region.
+ *
+ * @mr: the memory region being queried
+ */
+const char *memory_region_name(const MemoryRegion *mr);
+
+/**
+ * memory_region_is_logging: return whether a memory region is logging writes
+ *
+ * Returns %true if the memory region is logging writes for the given client
+ *
+ * @mr: the memory region being queried
+ * @client: the client being queried
+ */
+bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
+
+/**
+ * memory_region_get_dirty_log_mask: return the clients for which a
+ * memory region is logging writes.
+ *
+ * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
+ * are the bit indices.
+ *
+ * @mr: the memory region being queried
+ */
+uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
+
+/**
+ * memory_region_is_rom: check whether a memory region is ROM
+ *
+ * Returns %true is a memory region is read-only memory.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_rom(MemoryRegion *mr);
+
+/**
+ * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
+ *
+ * Returns a file descriptor backing a file-based RAM memory region,
+ * or -1 if the region is not a file-based RAM memory region.
+ *
+ * @mr: the RAM or alias memory region being queried.
+ */
+int memory_region_get_fd(MemoryRegion *mr);
+
+/**
+ * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
+ *
+ * Returns a host pointer to a RAM memory region (created with
+ * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
+ * care.
+ *
+ * @mr: the memory region being queried.
+ */
+void *memory_region_get_ram_ptr(MemoryRegion *mr);
+
+/* memory_region_ram_resize: Resize a RAM region.
+ *
+ * Only legal before guest might have detected the memory size: e.g. on
+ * incoming migration, or right after reset.
+ *
+ * @mr: a memory region created with @memory_region_init_resizeable_ram.
+ * @newsize: the new size the region
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
+ Error **errp);
+
+/**
+ * memory_region_set_log: Turn dirty logging on or off for a region.
+ *
+ * Turns dirty logging on or off for a specified client (display, migration).
+ * Only meaningful for RAM regions.
+ *
+ * @mr: the memory region being updated.
+ * @log: whether dirty logging is to be enabled or disabled.
+ * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
+ */
+void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
+
+/**
+ * memory_region_get_dirty: Check whether a range of bytes is dirty
+ * for a specified client.
+ *
+ * Checks whether a range of bytes has been written to since the last
+ * call to memory_region_reset_dirty() with the same @client. Dirty logging
+ * must be enabled.
+ *
+ * @mr: the memory region being queried.
+ * @addr: the address (relative to the start of the region) being queried.
+ * @size: the size of the range being queried.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ * %DIRTY_MEMORY_VGA.
+ */
+bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size, unsigned client);
+
+/**
+ * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
+ *
+ * Marks a range of bytes as dirty, after it has been dirtied outside
+ * guest code.
+ *
+ * @mr: the memory region being dirtied.
+ * @addr: the address (relative to the start of the region) being dirtied.
+ * @size: size of the range being dirtied.
+ */
+void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size);
+
+/**
+ * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
+ * for a specified client. It clears them.
+ *
+ * Checks whether a range of bytes has been written to since the last
+ * call to memory_region_reset_dirty() with the same @client. Dirty logging
+ * must be enabled.
+ *
+ * @mr: the memory region being queried.
+ * @addr: the address (relative to the start of the region) being queried.
+ * @size: the size of the range being queried.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ * %DIRTY_MEMORY_VGA.
+ */
+bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size, unsigned client);
+/**
+ * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
+ * any external TLBs (e.g. kvm)
+ *
+ * Flushes dirty information from accelerators such as kvm and vhost-net
+ * and makes it available to users of the memory API.
+ *
+ * @mr: the region being flushed.
+ */
+void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
+
+/**
+ * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
+ * client.
+ *
+ * Marks a range of pages as no longer dirty.
+ *
+ * @mr: the region being updated.
+ * @addr: the start of the subrange being cleaned.
+ * @size: the size of the subrange being cleaned.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ * %DIRTY_MEMORY_VGA.
+ */
+void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size, unsigned client);
+
+/**
+ * memory_region_set_readonly: Turn a memory region read-only (or read-write)
+ *
+ * Allows a memory region to be marked as read-only (turning it into a ROM).
+ * only useful on RAM regions.
+ *
+ * @mr: the region being updated.
+ * @readonly: whether rhe region is to be ROM or RAM.
+ */
+void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
+
+/**
+ * memory_region_rom_device_set_romd: enable/disable ROMD mode
+ *
+ * Allows a ROM device (initialized with memory_region_init_rom_device() to
+ * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
+ * device is mapped to guest memory and satisfies read access directly.
+ * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
+ * Writes are always handled by the #MemoryRegion.write function.
+ *
+ * @mr: the memory region to be updated
+ * @romd_mode: %true to put the region into ROMD mode
+ */
+void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
+
+/**
+ * memory_region_set_coalescing: Enable memory coalescing for the region.
+ *
+ * Enabled writes to a region to be queued for later processing. MMIO ->write
+ * callbacks may be delayed until a non-coalesced MMIO is issued.
+ * Only useful for IO regions. Roughly similar to write-combining hardware.
+ *
+ * @mr: the memory region to be write coalesced
+ */
+void memory_region_set_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
+ * a region.
+ *
+ * Like memory_region_set_coalescing(), but works on a sub-range of a region.
+ * Multiple calls can be issued coalesced disjoint ranges.
+ *
+ * @mr: the memory region to be updated.
+ * @offset: the start of the range within the region to be coalesced.
+ * @size: the size of the subrange to be coalesced.
+ */
+void memory_region_add_coalescing(MemoryRegion *mr,
+ hwaddr offset,
+ uint64_t size);
+
+/**
+ * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
+ *
+ * Disables any coalescing caused by memory_region_set_coalescing() or
+ * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
+ * hardware.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
+ * accesses.
+ *
+ * Ensure that pending coalesced MMIO request are flushed before the memory
+ * region is accessed. This property is automatically enabled for all regions
+ * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_set_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
+ * accesses.
+ *
+ * Clear the automatic coalesced MMIO flushing enabled via
+ * memory_region_set_flush_coalesced. Note that this service has no effect on
+ * memory regions that have MMIO coalescing enabled for themselves. For them,
+ * automatic flushing will stop once coalescing is disabled.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_set_global_locking: Declares the access processing requires
+ * QEMU's global lock.
+ *
+ * When this is invoked, accesses to the memory region will be processed while
+ * holding the global lock of QEMU. This is the default behavior of memory
+ * regions.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_set_global_locking(MemoryRegion *mr);
+
+/**
+ * memory_region_clear_global_locking: Declares that access processing does
+ * not depend on the QEMU global lock.
+ *
+ * By clearing this property, accesses to the memory region will be processed
+ * outside of QEMU's global lock (unless the lock is held on when issuing the
+ * access request). In this case, the device model implementing the access
+ * handlers is responsible for synchronization of concurrency.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_global_locking(MemoryRegion *mr);
+
+/**
+ * memory_region_add_eventfd: Request an eventfd to be triggered when a word
+ * is written to a location.
+ *
+ * Marks a word in an IO region (initialized with memory_region_init_io())
+ * as a trigger for an eventfd event. The I/O callback will not be called.
+ * The caller must be prepared to handle failure (that is, take the required
+ * action if the callback _is_ called).
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
+ **/
+void memory_region_add_eventfd(MemoryRegion *mr,
+ hwaddr addr,
+ unsigned size,
+ bool match_data,
+ uint64_t data,
+ EventNotifier *e);
+
+/**
+ * memory_region_del_eventfd: Cancel an eventfd.
+ *
+ * Cancels an eventfd trigger requested by a previous
+ * memory_region_add_eventfd() call.
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
+ */
+void memory_region_del_eventfd(MemoryRegion *mr,
+ hwaddr addr,
+ unsigned size,
+ bool match_data,
+ uint64_t data,
+ EventNotifier *e);
+
+/**
+ * memory_region_add_subregion: Add a subregion to a container.
+ *
+ * Adds a subregion at @offset. The subregion may not overlap with other
+ * subregions (except for those explicitly marked as overlapping). A region
+ * may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ * initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ */
+void memory_region_add_subregion(MemoryRegion *mr,
+ hwaddr offset,
+ MemoryRegion *subregion);
+/**
+ * memory_region_add_subregion_overlap: Add a subregion to a container
+ * with overlap.
+ *
+ * Adds a subregion at @offset. The subregion may overlap with other
+ * subregions. Conflicts are resolved by having a higher @priority hide a
+ * lower @priority. Subregions without priority are taken as @priority 0.
+ * A region may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ * initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ * @priority: used for resolving overlaps; highest priority wins.
+ */
+void memory_region_add_subregion_overlap(MemoryRegion *mr,
+ hwaddr offset,
+ MemoryRegion *subregion,
+ int priority);
+
+/**
+ * memory_region_get_ram_addr: Get the ram address associated with a memory
+ * region
+ *
+ * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
+ * code is being reworked.
+ */
+ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
+
+uint64_t memory_region_get_alignment(const MemoryRegion *mr);
+/**
+ * memory_region_del_subregion: Remove a subregion.
+ *
+ * Removes a subregion from its container.
+ *
+ * @mr: the container to be updated.
+ * @subregion: the region being removed; must be a current subregion of @mr.
+ */
+void memory_region_del_subregion(MemoryRegion *mr,
+ MemoryRegion *subregion);
+
+/*
+ * memory_region_set_enabled: dynamically enable or disable a region
+ *
+ * Enables or disables a memory region. A disabled memory region
+ * ignores all accesses to itself and its subregions. It does not
+ * obscure sibling subregions with lower priority - it simply behaves as
+ * if it was removed from the hierarchy.
+ *
+ * Regions default to being enabled.
+ *
+ * @mr: the region to be updated
+ * @enabled: whether to enable or disable the region
+ */
+void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
+
+/*
+ * memory_region_set_address: dynamically update the address of a region
+ *
+ * Dynamically updates the address of a region, relative to its container.
+ * May be used on regions are currently part of a memory hierarchy.
+ *
+ * @mr: the region to be updated
+ * @addr: new address, relative to container region
+ */
+void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
+
+/*
+ * memory_region_set_size: dynamically update the size of a region.
+ *
+ * Dynamically updates the size of a region.
+ *
+ * @mr: the region to be updated
+ * @size: used size of the region.
+ */
+void memory_region_set_size(MemoryRegion *mr, uint64_t size);
+
+/*
+ * memory_region_set_alias_offset: dynamically update a memory alias's offset
+ *
+ * Dynamically updates the offset into the target region that an alias points
+ * to, as if the fourth argument to memory_region_init_alias() has changed.
+ *
+ * @mr: the #MemoryRegion to be updated; should be an alias.
+ * @offset: the new offset into the target memory region
+ */
+void memory_region_set_alias_offset(MemoryRegion *mr,
+ hwaddr offset);
+
+/**
+ * memory_region_present: checks if an address relative to a @container
+ * translates into #MemoryRegion within @container
+ *
+ * Answer whether a #MemoryRegion within @container covers the address
+ * @addr.
+ *
+ * @container: a #MemoryRegion within which @addr is a relative address
+ * @addr: the area within @container to be searched
+ */
+bool memory_region_present(MemoryRegion *container, hwaddr addr);
+
+/**
+ * memory_region_is_mapped: returns true if #MemoryRegion is mapped
+ * into any address space.
+ *
+ * @mr: a #MemoryRegion which should be checked if it's mapped
+ */
+bool memory_region_is_mapped(MemoryRegion *mr);
+
+/**
+ * memory_region_find: translate an address/size relative to a
+ * MemoryRegion into a #MemoryRegionSection.
+ *
+ * Locates the first #MemoryRegion within @mr that overlaps the range
+ * given by @addr and @size.
+ *
+ * Returns a #MemoryRegionSection that describes a contiguous overlap.
+ * It will have the following characteristics:
+ * .@size = 0 iff no overlap was found
+ * .@mr is non-%NULL iff an overlap was found
+ *
+ * Remember that in the return value the @offset_within_region is
+ * relative to the returned region (in the .@mr field), not to the
+ * @mr argument.
+ *
+ * Similarly, the .@offset_within_address_space is relative to the
+ * address space that contains both regions, the passed and the
+ * returned one. However, in the special case where the @mr argument
+ * has no container (and thus is the root of the address space), the
+ * following will hold:
+ * .@offset_within_address_space >= @addr
+ * .@offset_within_address_space + .@size <= @addr + @size
+ *
+ * @mr: a MemoryRegion within which @addr is a relative address
+ * @addr: start of the area within @as to be searched
+ * @size: size of the area to be searched
+ */
+MemoryRegionSection memory_region_find(MemoryRegion *mr,
+ hwaddr addr, uint64_t size);
+
+/**
+ * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory
+ *
+ * Synchronizes the dirty page log for an entire address space.
+ * @as: the address space that contains the memory being synchronized
+ */
+void address_space_sync_dirty_bitmap(AddressSpace *as);
+
+/**
+ * memory_region_transaction_begin: Start a transaction.
+ *
+ * During a transaction, changes will be accumulated and made visible
+ * only when the transaction ends (is committed).
+ */
+void memory_region_transaction_begin(void);
+
+/**
+ * memory_region_transaction_commit: Commit a transaction and make changes
+ * visible to the guest.
+ */
+void memory_region_transaction_commit(void);
+
+/**
+ * memory_listener_register: register callbacks to be called when memory
+ * sections are mapped or unmapped into an address
+ * space
+ *
+ * @listener: an object containing the callbacks to be called
+ * @filter: if non-%NULL, only regions in this address space will be observed
+ */
+void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
+
+/**
+ * memory_listener_unregister: undo the effect of memory_listener_register()
+ *
+ * @listener: an object containing the callbacks to be removed
+ */
+void memory_listener_unregister(MemoryListener *listener);
+
+/**
+ * memory_global_dirty_log_start: begin dirty logging for all regions
+ */
+void memory_global_dirty_log_start(void);
+
+/**
+ * memory_global_dirty_log_stop: end dirty logging for all regions
+ */
+void memory_global_dirty_log_stop(void);
+
+void mtree_info(fprintf_function mon_printf, void *f);
+
+/**
+ * memory_region_dispatch_read: perform a read directly to the specified
+ * MemoryRegion.
+ *
+ * @mr: #MemoryRegion to access
+ * @addr: address within that region
+ * @pval: pointer to uint64_t which the data is written to
+ * @size: size of the access in bytes
+ * @attrs: memory transaction attributes to use for the access
+ */
+MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
+ hwaddr addr,
+ uint64_t *pval,
+ unsigned size,
+ MemTxAttrs attrs);
+/**
+ * memory_region_dispatch_write: perform a write directly to the specified
+ * MemoryRegion.
+ *
+ * @mr: #MemoryRegion to access
+ * @addr: address within that region
+ * @data: data to write
+ * @size: size of the access in bytes
+ * @attrs: memory transaction attributes to use for the access
+ */
+MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
+ hwaddr addr,
+ uint64_t data,
+ unsigned size,
+ MemTxAttrs attrs);
+
+/**
+ * address_space_init: initializes an address space
+ *
+ * @as: an uninitialized #AddressSpace
+ * @root: a #MemoryRegion that routes addesses for the address space
+ * @name: an address space name. The name is only used for debugging
+ * output.
+ */
+void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
+
+
+/**
+ * address_space_destroy: destroy an address space
+ *
+ * Releases all resources associated with an address space. After an address space
+ * is destroyed, its root memory region (given by address_space_init()) may be destroyed
+ * as well.
+ *
+ * @as: address space to be destroyed
+ */
+void address_space_destroy(AddressSpace *as);
+
+/**
+ * address_space_rw: read from or write to an address space.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @attrs: memory transaction attributes
+ * @buf: buffer with the data transferred
+ * @is_write: indicates the transfer direction
+ */
+MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf,
+ int len, bool is_write);
+
+/**
+ * address_space_write: write to address space.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @attrs: memory transaction attributes
+ * @buf: buffer with the data transferred
+ */
+MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs,
+ const uint8_t *buf, int len);
+
+/**
+ * address_space_read: read from an address space.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @attrs: memory transaction attributes
+ * @buf: buffer with the data transferred
+ */
+MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
+ uint8_t *buf, int len);
+
+/**
+ * address_space_ld*: load from an address space
+ * address_space_st*: store to an address space
+ *
+ * These functions perform a load or store of the byte, word,
+ * longword or quad to the specified address within the AddressSpace.
+ * The _le suffixed functions treat the data as little endian;
+ * _be indicates big endian; no suffix indicates "same endianness
+ * as guest CPU".
+ *
+ * The "guest CPU endianness" accessors are deprecated for use outside
+ * target-* code; devices should be CPU-agnostic and use either the LE
+ * or the BE accessors.
+ *
+ * @as #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @val: data value, for stores
+ * @attrs: memory transaction attributes
+ * @result: location to write the success/failure of the transaction;
+ * if NULL, this information is discarded
+ */
+uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result);
+uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result);
+uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result);
+uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result);
+uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result);
+uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result);
+uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result);
+void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result);
+void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result);
+void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result);
+void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result);
+void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result);
+void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
+ MemTxAttrs attrs, MemTxResult *result);
+void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
+ MemTxAttrs attrs, MemTxResult *result);
+
+#ifdef NEED_CPU_H
+uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result);
+uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result);
+uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, MemTxResult *result);
+void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result);
+void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result);
+void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
+ MemTxAttrs attrs, MemTxResult *result);
+void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
+ MemTxAttrs attrs, MemTxResult *result);
+#endif
+
+/* address_space_translate: translate an address range into an address space
+ * into a MemoryRegion and an address range into that section. Should be
+ * called from an RCU critical section, to avoid that the last reference
+ * to the returned region disappears after address_space_translate returns.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @xlat: pointer to address within the returned memory region section's
+ * #MemoryRegion.
+ * @len: pointer to length
+ * @is_write: indicates the transfer direction
+ */
+MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
+ hwaddr *xlat, hwaddr *len,
+ bool is_write);
+
+/* address_space_access_valid: check for validity of accessing an address
+ * space range
+ *
+ * Check whether memory is assigned to the given address space range, and
+ * access is permitted by any IOMMU regions that are active for the address
+ * space.
+ *
+ * For now, addr and len should be aligned to a page size. This limitation
+ * will be lifted in the future.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @len: length of the area to be checked
+ * @is_write: indicates the transfer direction
+ */
+bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
+
+/* address_space_map: map a physical memory region into a host virtual address
+ *
+ * May map a subset of the requested range, given by and returned in @plen.
+ * May return %NULL if resources needed to perform the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ * Use cpu_register_map_client() to know when retrying the map operation is
+ * likely to succeed.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @plen: pointer to length of buffer; updated on return
+ * @is_write: indicates the transfer direction
+ */
+void *address_space_map(AddressSpace *as, hwaddr addr,
+ hwaddr *plen, bool is_write);
+
+/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
+ *
+ * Will also mark the memory as dirty if @is_write == %true. @access_len gives
+ * the amount of memory that was actually read or written by the caller.
+ *
+ * @as: #AddressSpace used
+ * @addr: address within that address space
+ * @len: buffer length as returned by address_space_map()
+ * @access_len: amount of data actually transferred
+ * @is_write: indicates the transfer direction
+ */
+void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
+ int is_write, hwaddr access_len);
+
+
+#endif
+
+#endif
diff --git a/include/exec/poison.h b/include/exec/poison.h
new file mode 100644
index 00000000..a4b1eca2
--- /dev/null
+++ b/include/exec/poison.h
@@ -0,0 +1,62 @@
+/* Poison identifiers that should not be used when building
+ target independent device code. */
+
+#ifndef HW_POISON_H
+#define HW_POISON_H
+#ifdef __GNUC__
+
+#pragma GCC poison TARGET_I386
+#pragma GCC poison TARGET_X86_64
+#pragma GCC poison TARGET_ALPHA
+#pragma GCC poison TARGET_ARM
+#pragma GCC poison TARGET_CRIS
+#pragma GCC poison TARGET_LM32
+#pragma GCC poison TARGET_M68K
+#pragma GCC poison TARGET_MIPS
+#pragma GCC poison TARGET_MIPS64
+#pragma GCC poison TARGET_OPENRISC
+#pragma GCC poison TARGET_PPC
+#pragma GCC poison TARGET_PPCEMB
+#pragma GCC poison TARGET_PPC64
+#pragma GCC poison TARGET_ABI32
+#pragma GCC poison TARGET_SH4
+#pragma GCC poison TARGET_SPARC
+#pragma GCC poison TARGET_SPARC64
+
+#pragma GCC poison TARGET_WORDS_BIGENDIAN
+#pragma GCC poison BSWAP_NEEDED
+
+#pragma GCC poison TARGET_LONG_BITS
+#pragma GCC poison TARGET_FMT_lx
+#pragma GCC poison TARGET_FMT_ld
+
+#pragma GCC poison TARGET_PAGE_SIZE
+#pragma GCC poison TARGET_PAGE_MASK
+#pragma GCC poison TARGET_PAGE_BITS
+#pragma GCC poison TARGET_PAGE_ALIGN
+
+#pragma GCC poison CPUArchState
+
+#pragma GCC poison lduw_phys
+#pragma GCC poison ldl_phys
+#pragma GCC poison ldq_phys
+#pragma GCC poison stl_phys_notdirty
+#pragma GCC poison stw_phys
+#pragma GCC poison stl_phys
+#pragma GCC poison stq_phys
+
+#pragma GCC poison CPU_INTERRUPT_HARD
+#pragma GCC poison CPU_INTERRUPT_EXITTB
+#pragma GCC poison CPU_INTERRUPT_HALT
+#pragma GCC poison CPU_INTERRUPT_DEBUG
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_0
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_1
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_2
+
+#endif
+#endif
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
new file mode 100644
index 00000000..c113f211
--- /dev/null
+++ b/include/exec/ram_addr.h
@@ -0,0 +1,253 @@
+/*
+ * Declarations for cpu physical memory functions
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * This header is for use by exec.c and memory.c ONLY. Do not include it.
+ * The functions declared here will be removed soon.
+ */
+
+#ifndef RAM_ADDR_H
+#define RAM_ADDR_H
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/xen/xen.h"
+
+ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
+ bool share, const char *mem_path,
+ Error **errp);
+ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
+ MemoryRegion *mr, Error **errp);
+ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
+ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
+ void (*resized)(const char*,
+ uint64_t length,
+ void *host),
+ MemoryRegion *mr, Error **errp);
+int qemu_get_ram_fd(ram_addr_t addr);
+void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
+void *qemu_get_ram_ptr(ram_addr_t addr);
+void qemu_ram_free(ram_addr_t addr);
+void qemu_ram_free_from_ptr(ram_addr_t addr);
+
+int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
+
+#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
+#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
+
+static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
+ ram_addr_t length,
+ unsigned client)
+{
+ unsigned long end, page, next;
+
+ assert(client < DIRTY_MEMORY_NUM);
+
+ end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
+ page = start >> TARGET_PAGE_BITS;
+ next = find_next_bit(ram_list.dirty_memory[client], end, page);
+
+ return next < end;
+}
+
+static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
+ ram_addr_t length,
+ unsigned client)
+{
+ unsigned long end, page, next;
+
+ assert(client < DIRTY_MEMORY_NUM);
+
+ end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
+ page = start >> TARGET_PAGE_BITS;
+ next = find_next_zero_bit(ram_list.dirty_memory[client], end, page);
+
+ return next >= end;
+}
+
+static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
+ unsigned client)
+{
+ return cpu_physical_memory_get_dirty(addr, 1, client);
+}
+
+static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
+{
+ bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
+ bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
+ bool migration =
+ cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
+ return !(vga && code && migration);
+}
+
+static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
+ ram_addr_t length,
+ uint8_t mask)
+{
+ uint8_t ret = 0;
+
+ if (mask & (1 << DIRTY_MEMORY_VGA) &&
+ !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
+ ret |= (1 << DIRTY_MEMORY_VGA);
+ }
+ if (mask & (1 << DIRTY_MEMORY_CODE) &&
+ !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
+ ret |= (1 << DIRTY_MEMORY_CODE);
+ }
+ if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
+ !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
+ ret |= (1 << DIRTY_MEMORY_MIGRATION);
+ }
+ return ret;
+}
+
+static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
+ unsigned client)
+{
+ assert(client < DIRTY_MEMORY_NUM);
+ set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
+}
+
+static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
+ ram_addr_t length,
+ uint8_t mask)
+{
+ unsigned long end, page;
+ unsigned long **d = ram_list.dirty_memory;
+
+ end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
+ page = start >> TARGET_PAGE_BITS;
+ if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
+ bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
+ bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
+ bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
+ }
+ xen_modified_memory(start, length);
+}
+
+#if !defined(_WIN32)
+static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
+ ram_addr_t start,
+ ram_addr_t pages)
+{
+ unsigned long i, j;
+ unsigned long page_number, c;
+ hwaddr addr;
+ ram_addr_t ram_addr;
+ unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
+ unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
+ unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
+
+ /* start address is aligned at the start of a word? */
+ if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
+ (hpratio == 1)) {
+ long k;
+ long nr = BITS_TO_LONGS(pages);
+
+ for (k = 0; k < nr; k++) {
+ if (bitmap[k]) {
+ unsigned long temp = leul_to_cpu(bitmap[k]);
+ unsigned long **d = ram_list.dirty_memory;
+
+ atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
+ atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
+ if (tcg_enabled()) {
+ atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
+ }
+ }
+ }
+ xen_modified_memory(start, pages << TARGET_PAGE_BITS);
+ } else {
+ uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
+ /*
+ * bitmap-traveling is faster than memory-traveling (for addr...)
+ * especially when most of the memory is not dirty.
+ */
+ for (i = 0; i < len; i++) {
+ if (bitmap[i] != 0) {
+ c = leul_to_cpu(bitmap[i]);
+ do {
+ j = ctzl(c);
+ c &= ~(1ul << j);
+ page_number = (i * HOST_LONG_BITS + j) * hpratio;
+ addr = page_number * TARGET_PAGE_SIZE;
+ ram_addr = start + addr;
+ cpu_physical_memory_set_dirty_range(ram_addr,
+ TARGET_PAGE_SIZE * hpratio, clients);
+ } while (c != 0);
+ }
+ }
+ }
+}
+#endif /* not _WIN32 */
+
+bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
+ ram_addr_t length,
+ unsigned client);
+
+static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
+ ram_addr_t length)
+{
+ cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
+ cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
+ cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
+}
+
+
+static inline
+uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
+ ram_addr_t start,
+ ram_addr_t length)
+{
+ ram_addr_t addr;
+ unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
+ uint64_t num_dirty = 0;
+
+ /* start address is aligned at the start of a word? */
+ if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
+ int k;
+ int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
+ unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
+
+ for (k = page; k < page + nr; k++) {
+ if (src[k]) {
+ unsigned long bits = atomic_xchg(&src[k], 0);
+ unsigned long new_dirty;
+ new_dirty = ~dest[k];
+ dest[k] |= bits;
+ new_dirty &= bits;
+ num_dirty += ctpopl(new_dirty);
+ }
+ }
+ } else {
+ for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
+ if (cpu_physical_memory_test_and_clear_dirty(
+ start + addr,
+ TARGET_PAGE_SIZE,
+ DIRTY_MEMORY_MIGRATION)) {
+ long k = (start + addr) >> TARGET_PAGE_BITS;
+ if (!test_and_set_bit(k, dest)) {
+ num_dirty++;
+ }
+ }
+ }
+ }
+
+ return num_dirty;
+}
+
+#endif
+#endif
diff --git a/include/exec/semihost.h b/include/exec/semihost.h
new file mode 100644
index 00000000..5980939c
--- /dev/null
+++ b/include/exec/semihost.h
@@ -0,0 +1,62 @@
+/*
+ * Semihosting support
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef SEMIHOST_H
+#define SEMIHOST_H
+
+typedef enum SemihostingTarget {
+ SEMIHOSTING_TARGET_AUTO = 0,
+ SEMIHOSTING_TARGET_NATIVE,
+ SEMIHOSTING_TARGET_GDB
+} SemihostingTarget;
+
+#ifdef CONFIG_USER_ONLY
+static inline bool semihosting_enabled(void)
+{
+ return true;
+}
+
+static inline SemihostingTarget semihosting_get_target(void)
+{
+ return SEMIHOSTING_TARGET_AUTO;
+}
+
+static inline const char *semihosting_get_arg(int i)
+{
+ return NULL;
+}
+
+static inline int semihosting_get_argc(void)
+{
+ return 0;
+}
+
+static inline const char *semihosting_get_cmdline(void)
+{
+ return NULL;
+}
+#else
+bool semihosting_enabled(void);
+SemihostingTarget semihosting_get_target(void);
+const char *semihosting_get_arg(int i);
+int semihosting_get_argc(void);
+const char *semihosting_get_cmdline(void);
+#endif
+
+#endif
diff --git a/include/exec/softmmu-semi.h b/include/exec/softmmu-semi.h
new file mode 100644
index 00000000..1819cc24
--- /dev/null
+++ b/include/exec/softmmu-semi.h
@@ -0,0 +1,80 @@
+/*
+ * Helper routines to provide target memory access for semihosting
+ * syscalls in system emulation mode.
+ *
+ * Copyright (c) 2007 CodeSourcery.
+ *
+ * This code is licensed under the GPL
+ */
+#ifndef SOFTMMU_SEMI_H
+#define SOFTMMU_SEMI_H 1
+
+static inline uint32_t softmmu_tget32(CPUArchState *env, target_ulong addr)
+{
+ uint32_t val;
+
+ cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 4, 0);
+ return tswap32(val);
+}
+static inline uint32_t softmmu_tget8(CPUArchState *env, target_ulong addr)
+{
+ uint8_t val;
+
+ cpu_memory_rw_debug(ENV_GET_CPU(env), addr, &val, 1, 0);
+ return val;
+}
+
+#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; })
+#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
+#define get_user_ual(arg, p) get_user_u32(arg, p)
+
+static inline void softmmu_tput32(CPUArchState *env,
+ target_ulong addr, uint32_t val)
+{
+ val = tswap32(val);
+ cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 4, 1);
+}
+#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
+#define put_user_ual(arg, p) put_user_u32(arg, p)
+
+static void *softmmu_lock_user(CPUArchState *env,
+ target_ulong addr, target_ulong len, int copy)
+{
+ uint8_t *p;
+ /* TODO: Make this something that isn't fixed size. */
+ p = malloc(len);
+ if (p && copy) {
+ cpu_memory_rw_debug(ENV_GET_CPU(env), addr, p, len, 0);
+ }
+ return p;
+}
+#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy)
+static char *softmmu_lock_user_string(CPUArchState *env, target_ulong addr)
+{
+ char *p;
+ char *s;
+ uint8_t c;
+ /* TODO: Make this something that isn't fixed size. */
+ s = p = malloc(1024);
+ if (!s) {
+ return NULL;
+ }
+ do {
+ cpu_memory_rw_debug(ENV_GET_CPU(env), addr, &c, 1, 0);
+ addr++;
+ *(p++) = c;
+ } while (c);
+ return s;
+}
+#define lock_user_string(p) softmmu_lock_user_string(env, p)
+static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr,
+ target_ulong len)
+{
+ if (len) {
+ cpu_memory_rw_debug(ENV_GET_CPU(env), addr, p, len, 1);
+ }
+ free(p);
+}
+#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len)
+
+#endif
diff --git a/include/exec/spinlock.h b/include/exec/spinlock.h
new file mode 100644
index 00000000..a72edda1
--- /dev/null
+++ b/include/exec/spinlock.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+/* configure guarantees us that we have pthreads on any host except
+ * mingw32, which doesn't support any of the user-only targets.
+ * So we can simply assume we have pthread mutexes here.
+ */
+#if defined(CONFIG_USER_ONLY)
+
+#include <pthread.h>
+#define spin_lock pthread_mutex_lock
+#define spin_unlock pthread_mutex_unlock
+#define spinlock_t pthread_mutex_t
+#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
+
+#else
+
+/* Empty implementations, on the theory that system mode emulation
+ * is single-threaded. This means that these functions should only
+ * be used from code run in the TCG cpu thread, and cannot protect
+ * data structures which might also be accessed from the IO thread
+ * or from signal handlers.
+ */
+typedef int spinlock_t;
+#define SPIN_LOCK_UNLOCKED 0
+
+static inline void spin_lock(spinlock_t *lock)
+{
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+}
+
+#endif
diff --git a/include/exec/tb-hash.h b/include/exec/tb-hash.h
new file mode 100644
index 00000000..0f4e8a08
--- /dev/null
+++ b/include/exec/tb-hash.h
@@ -0,0 +1,51 @@
+/*
+ * internal execution defines for qemu
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef EXEC_TB_HASH
+#define EXEC_TB_HASH
+
+/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
+ addresses on the same page. The top bits are the same. This allows
+ TLB invalidation to quickly clear a subset of the hash table. */
+#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
+#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
+#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
+#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
+
+static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
+{
+ target_ulong tmp;
+ tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
+ return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
+}
+
+static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
+{
+ target_ulong tmp;
+ tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
+ return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
+ | (tmp & TB_JMP_ADDR_MASK));
+}
+
+static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
+{
+ return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
+}
+
+#endif
diff --git a/include/exec/user/abitypes.h b/include/exec/user/abitypes.h
new file mode 100644
index 00000000..80eedacc
--- /dev/null
+++ b/include/exec/user/abitypes.h
@@ -0,0 +1,66 @@
+#ifndef QEMU_TYPES_H
+#define QEMU_TYPES_H
+#include "cpu.h"
+
+#ifdef TARGET_ABI32
+#define TARGET_ABI_BITS 32
+#else
+#define TARGET_ABI_BITS TARGET_LONG_BITS
+#endif
+
+#ifdef TARGET_M68K
+#define ABI_INT_ALIGNMENT 2
+#define ABI_LONG_ALIGNMENT 2
+#define ABI_LLONG_ALIGNMENT 2
+#endif
+
+#ifndef ABI_SHORT_ALIGNMENT
+#define ABI_SHORT_ALIGNMENT 2
+#endif
+#ifndef ABI_INT_ALIGNMENT
+#define ABI_INT_ALIGNMENT 4
+#endif
+#ifndef ABI_LONG_ALIGNMENT
+#define ABI_LONG_ALIGNMENT (TARGET_ABI_BITS / 8)
+#endif
+#ifndef ABI_LLONG_ALIGNMENT
+#define ABI_LLONG_ALIGNMENT 8
+#endif
+
+typedef int16_t abi_short __attribute__ ((aligned(ABI_SHORT_ALIGNMENT)));
+typedef uint16_t abi_ushort __attribute__((aligned(ABI_SHORT_ALIGNMENT)));
+typedef int32_t abi_int __attribute__((aligned(ABI_INT_ALIGNMENT)));
+typedef uint32_t abi_uint __attribute__((aligned(ABI_INT_ALIGNMENT)));
+typedef int64_t abi_llong __attribute__((aligned(ABI_LLONG_ALIGNMENT)));
+typedef uint64_t abi_ullong __attribute__((aligned(ABI_LLONG_ALIGNMENT)));
+
+#ifdef TARGET_ABI32
+typedef uint32_t abi_ulong __attribute__((aligned(ABI_LONG_ALIGNMENT)));
+typedef int32_t abi_long __attribute__((aligned(ABI_LONG_ALIGNMENT)));
+#define TARGET_ABI_FMT_lx "%08x"
+#define TARGET_ABI_FMT_ld "%d"
+#define TARGET_ABI_FMT_lu "%u"
+
+static inline abi_ulong tswapal(abi_ulong v)
+{
+ return tswap32(v);
+}
+
+#else
+typedef target_ulong abi_ulong __attribute__((aligned(ABI_LONG_ALIGNMENT)));
+typedef target_long abi_long __attribute__((aligned(ABI_LONG_ALIGNMENT)));
+#define TARGET_ABI_FMT_lx TARGET_FMT_lx
+#define TARGET_ABI_FMT_ld TARGET_FMT_ld
+#define TARGET_ABI_FMT_lu TARGET_FMT_lu
+/* for consistency, define ABI32 too */
+#if TARGET_ABI_BITS == 32
+#define TARGET_ABI32 1
+#endif
+
+static inline abi_ulong tswapal(abi_ulong v)
+{
+ return tswapl(v);
+}
+
+#endif
+#endif
diff --git a/include/exec/user/thunk.h b/include/exec/user/thunk.h
new file mode 100644
index 00000000..3b674627
--- /dev/null
+++ b/include/exec/user/thunk.h
@@ -0,0 +1,191 @@
+/*
+ * Generic thunking code to convert data between host and target CPU
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef THUNK_H
+#define THUNK_H
+
+#include <inttypes.h>
+#include "cpu.h"
+
+/* types enums definitions */
+
+typedef enum argtype {
+ TYPE_NULL,
+ TYPE_CHAR,
+ TYPE_SHORT,
+ TYPE_INT,
+ TYPE_LONG,
+ TYPE_ULONG,
+ TYPE_PTRVOID, /* pointer on unknown data */
+ TYPE_LONGLONG,
+ TYPE_ULONGLONG,
+ TYPE_PTR,
+ TYPE_ARRAY,
+ TYPE_STRUCT,
+ TYPE_OLDDEVT,
+} argtype;
+
+#define MK_PTR(type) TYPE_PTR, type
+#define MK_ARRAY(type, size) TYPE_ARRAY, size, type
+#define MK_STRUCT(id) TYPE_STRUCT, id
+
+#define THUNK_TARGET 0
+#define THUNK_HOST 1
+
+typedef struct {
+ /* standard struct handling */
+ const argtype *field_types;
+ int nb_fields;
+ int *field_offsets[2];
+ /* special handling */
+ void (*convert[2])(void *dst, const void *src);
+ int size[2];
+ int align[2];
+ const char *name;
+} StructEntry;
+
+/* Translation table for bitmasks... */
+typedef struct bitmask_transtbl {
+ unsigned int x86_mask;
+ unsigned int x86_bits;
+ unsigned int alpha_mask;
+ unsigned int alpha_bits;
+} bitmask_transtbl;
+
+void thunk_register_struct(int id, const char *name, const argtype *types);
+void thunk_register_struct_direct(int id, const char *name,
+ const StructEntry *se1);
+const argtype *thunk_convert(void *dst, const void *src,
+ const argtype *type_ptr, int to_host);
+#ifndef NO_THUNK_TYPE_SIZE
+
+extern StructEntry *struct_entries;
+
+int thunk_type_size_array(const argtype *type_ptr, int is_host);
+int thunk_type_align_array(const argtype *type_ptr, int is_host);
+
+static inline int thunk_type_size(const argtype *type_ptr, int is_host)
+{
+ int type, size;
+ const StructEntry *se;
+
+ type = *type_ptr;
+ switch(type) {
+ case TYPE_CHAR:
+ return 1;
+ case TYPE_SHORT:
+ return 2;
+ case TYPE_INT:
+ return 4;
+ case TYPE_LONGLONG:
+ case TYPE_ULONGLONG:
+ return 8;
+ case TYPE_LONG:
+ case TYPE_ULONG:
+ case TYPE_PTRVOID:
+ case TYPE_PTR:
+ if (is_host) {
+ return sizeof(void *);
+ } else {
+ return TARGET_ABI_BITS / 8;
+ }
+ break;
+ case TYPE_OLDDEVT:
+ if (is_host) {
+#if defined(HOST_X86_64)
+ return 8;
+#elif defined(HOST_ALPHA) || defined(HOST_IA64) || defined(HOST_MIPS) || \
+ defined(HOST_PARISC) || defined(HOST_SPARC64)
+ return 4;
+#elif defined(HOST_PPC)
+ return sizeof(void *);
+#else
+ return 2;
+#endif
+ } else {
+#if defined(TARGET_X86_64)
+ return 8;
+#elif defined(TARGET_ALPHA) || defined(TARGET_IA64) || defined(TARGET_MIPS) || \
+ defined(TARGET_PARISC) || defined(TARGET_SPARC64)
+ return 4;
+#elif defined(TARGET_PPC)
+ return TARGET_ABI_BITS / 8;
+#else
+ return 2;
+#endif
+ }
+ break;
+ case TYPE_ARRAY:
+ size = type_ptr[1];
+ return size * thunk_type_size_array(type_ptr + 2, is_host);
+ case TYPE_STRUCT:
+ se = struct_entries + type_ptr[1];
+ return se->size[is_host];
+ default:
+ return -1;
+ }
+}
+
+static inline int thunk_type_align(const argtype *type_ptr, int is_host)
+{
+ int type;
+ const StructEntry *se;
+
+ type = *type_ptr;
+ switch(type) {
+ case TYPE_CHAR:
+ return 1;
+ case TYPE_SHORT:
+ return 2;
+ case TYPE_INT:
+ return 4;
+ case TYPE_LONGLONG:
+ case TYPE_ULONGLONG:
+ return 8;
+ case TYPE_LONG:
+ case TYPE_ULONG:
+ case TYPE_PTRVOID:
+ case TYPE_PTR:
+ if (is_host) {
+ return sizeof(void *);
+ } else {
+ return TARGET_ABI_BITS / 8;
+ }
+ break;
+ case TYPE_OLDDEVT:
+ return thunk_type_size(type_ptr, is_host);
+ case TYPE_ARRAY:
+ return thunk_type_align_array(type_ptr + 2, is_host);
+ case TYPE_STRUCT:
+ se = struct_entries + type_ptr[1];
+ return se->align[is_host];
+ default:
+ return -1;
+ }
+}
+
+#endif /* NO_THUNK_TYPE_SIZE */
+
+unsigned int target_to_host_bitmask(unsigned int x86_mask,
+ const bitmask_transtbl * trans_tbl);
+unsigned int host_to_target_bitmask(unsigned int alpha_mask,
+ const bitmask_transtbl * trans_tbl);
+
+void thunk_init(unsigned int max_structs);
+
+#endif