Commit 4acb54baa6557244fd9ea60d8119abfbefae6777

Authored by Edgar E. Iglesias
1 parent 370ab986

microblaze: Add translation routines.

Signed-off-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
target-microblaze/cpu.h 0 → 100644
  1 +/*
  2 + * MicroBlaze virtual CPU header
  3 + *
  4 + * Copyright (c) 2009 Edgar E. Iglesias
  5 + *
  6 + * This library is free software; you can redistribute it and/or
  7 + * modify it under the terms of the GNU Lesser General Public
  8 + * License as published by the Free Software Foundation; either
  9 + * version 2 of the License, or (at your option) any later version.
  10 + *
  11 + * This library is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14 + * General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU Lesser General Public
  17 + * License along with this library; if not, write to the Free Software
  18 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
  19 + */
  20 +#ifndef CPU_MICROBLAZE_H
  21 +#define CPU_MICROBLAZE_H
  22 +
  23 +#define TARGET_LONG_BITS 32
  24 +
  25 +#define CPUState struct CPUMBState
  26 +
  27 +#include "cpu-defs.h"
  28 +struct CPUMBState;
  29 +#if !defined(CONFIG_USER_ONLY)
  30 +#include "mmu.h"
  31 +#endif
  32 +
  33 +#define TARGET_HAS_ICE 1
  34 +
  35 +#define ELF_MACHINE EM_XILINX_MICROBLAZE
  36 +
  37 +#define EXCP_NMI 1
  38 +#define EXCP_MMU 2
  39 +#define EXCP_IRQ 3
  40 +#define EXCP_BREAK 4
  41 +#define EXCP_HW_BREAK 5
  42 +
  43 +/* Register aliases. R0 - R15 */
  44 +#define R_SP 1
  45 +#define SR_PC 0
  46 +#define SR_MSR 1
  47 +#define SR_EAR 3
  48 +#define SR_ESR 5
  49 +#define SR_FSR 7
  50 +#define SR_BTR 0xb
  51 +#define SR_EDR 0xd
  52 +
  53 +/* MSR flags. */
  54 +#define MSR_BE (1<<0) /* 0x001 */
  55 +#define MSR_IE (1<<1) /* 0x002 */
  56 +#define MSR_C (1<<2) /* 0x004 */
  57 +#define MSR_BIP (1<<3) /* 0x008 */
  58 +#define MSR_FSL (1<<4) /* 0x010 */
  59 +#define MSR_ICE (1<<5) /* 0x020 */
  60 +#define MSR_DZ (1<<6) /* 0x040 */
  61 +#define MSR_DCE (1<<7) /* 0x080 */
  62 +#define MSR_EE (1<<8) /* 0x100 */
  63 +#define MSR_EIP (1<<9) /* 0x200 */
  64 +#define MSR_CC (1<<31)
  65 +
  66 +/* Machine State Register (MSR) Fields */
  67 +#define MSR_UM (1<<11) /* User Mode */
  68 +#define MSR_UMS (1<<12) /* User Mode Save */
  69 +#define MSR_VM (1<<13) /* Virtual Mode */
  70 +#define MSR_VMS (1<<14) /* Virtual Mode Save */
  71 +
  72 +#define MSR_KERNEL MSR_EE|MSR_VM
  73 +//#define MSR_USER MSR_KERNEL|MSR_UM|MSR_IE
  74 +#define MSR_KERNEL_VMS MSR_EE|MSR_VMS
  75 +//#define MSR_USER_VMS MSR_KERNEL_VMS|MSR_UMS|MSR_IE
  76 +
  77 +/* Exception State Register (ESR) Fields */
  78 +#define ESR_DIZ (1<<11) /* Zone Protection */
  79 +#define ESR_S (1<<10) /* Store instruction */
  80 +
  81 +
  82 +
  83 +/* Version reg. */
  84 +/* Basic PVR mask */
  85 +#define PVR0_PVR_FULL_MASK 0x80000000
  86 +#define PVR0_USE_BARREL_MASK 0x40000000
  87 +#define PVR0_USE_DIV_MASK 0x20000000
  88 +#define PVR0_USE_HW_MUL_MASK 0x10000000
  89 +#define PVR0_USE_FPU_MASK 0x08000000
  90 +#define PVR0_USE_EXC_MASK 0x04000000
  91 +#define PVR0_USE_ICACHE_MASK 0x02000000
  92 +#define PVR0_USE_DCACHE_MASK 0x01000000
  93 +#define PVR0_USE_MMU 0x00800000 /* new */
  94 +#define PVR0_VERSION_MASK 0x0000FF00
  95 +#define PVR0_USER1_MASK 0x000000FF
  96 +
  97 +/* User 2 PVR mask */
  98 +#define PVR1_USER2_MASK 0xFFFFFFFF
  99 +
  100 +/* Configuration PVR masks */
  101 +#define PVR2_D_OPB_MASK 0x80000000
  102 +#define PVR2_D_LMB_MASK 0x40000000
  103 +#define PVR2_I_OPB_MASK 0x20000000
  104 +#define PVR2_I_LMB_MASK 0x10000000
  105 +#define PVR2_INTERRUPT_IS_EDGE_MASK 0x08000000
  106 +#define PVR2_EDGE_IS_POSITIVE_MASK 0x04000000
  107 +#define PVR2_D_PLB_MASK 0x02000000 /* new */
  108 +#define PVR2_I_PLB_MASK 0x01000000 /* new */
  109 +#define PVR2_INTERCONNECT 0x00800000 /* new */
  110 +#define PVR2_USE_EXTEND_FSL 0x00080000 /* new */
  111 +#define PVR2_USE_FSL_EXC 0x00040000 /* new */
  112 +#define PVR2_USE_MSR_INSTR 0x00020000
  113 +#define PVR2_USE_PCMP_INSTR 0x00010000
  114 +#define PVR2_AREA_OPTIMISED 0x00008000
  115 +#define PVR2_USE_BARREL_MASK 0x00004000
  116 +#define PVR2_USE_DIV_MASK 0x00002000
  117 +#define PVR2_USE_HW_MUL_MASK 0x00001000
  118 +#define PVR2_USE_FPU_MASK 0x00000800
  119 +#define PVR2_USE_MUL64_MASK 0x00000400
  120 +#define PVR2_USE_FPU2_MASK 0x00000200 /* new */
  121 +#define PVR2_USE_IPLBEXC 0x00000100
  122 +#define PVR2_USE_DPLBEXC 0x00000080
  123 +#define PVR2_OPCODE_0x0_ILL_MASK 0x00000040
  124 +#define PVR2_UNALIGNED_EXC_MASK 0x00000020
  125 +#define PVR2_ILL_OPCODE_EXC_MASK 0x00000010
  126 +#define PVR2_IOPB_BUS_EXC_MASK 0x00000008
  127 +#define PVR2_DOPB_BUS_EXC_MASK 0x00000004
  128 +#define PVR2_DIV_ZERO_EXC_MASK 0x00000002
  129 +#define PVR2_FPU_EXC_MASK 0x00000001
  130 +
  131 +/* Debug and exception PVR masks */
  132 +#define PVR3_DEBUG_ENABLED_MASK 0x80000000
  133 +#define PVR3_NUMBER_OF_PC_BRK_MASK 0x1E000000
  134 +#define PVR3_NUMBER_OF_RD_ADDR_BRK_MASK 0x00380000
  135 +#define PVR3_NUMBER_OF_WR_ADDR_BRK_MASK 0x0000E000
  136 +#define PVR3_FSL_LINKS_MASK 0x00000380
  137 +
  138 +/* ICache config PVR masks */
  139 +#define PVR4_USE_ICACHE_MASK 0x80000000
  140 +#define PVR4_ICACHE_ADDR_TAG_BITS_MASK 0x7C000000
  141 +#define PVR4_ICACHE_USE_FSL_MASK 0x02000000
  142 +#define PVR4_ICACHE_ALLOW_WR_MASK 0x01000000
  143 +#define PVR4_ICACHE_LINE_LEN_MASK 0x00E00000
  144 +#define PVR4_ICACHE_BYTE_SIZE_MASK 0x001F0000
  145 +
  146 +/* DCache config PVR masks */
  147 +#define PVR5_USE_DCACHE_MASK 0x80000000
  148 +#define PVR5_DCACHE_ADDR_TAG_BITS_MASK 0x7C000000
  149 +#define PVR5_DCACHE_USE_FSL_MASK 0x02000000
  150 +#define PVR5_DCACHE_ALLOW_WR_MASK 0x01000000
  151 +#define PVR5_DCACHE_LINE_LEN_MASK 0x00E00000
  152 +#define PVR5_DCACHE_BYTE_SIZE_MASK 0x001F0000
  153 +
  154 +/* ICache base address PVR mask */
  155 +#define PVR6_ICACHE_BASEADDR_MASK 0xFFFFFFFF
  156 +
  157 +/* ICache high address PVR mask */
  158 +#define PVR7_ICACHE_HIGHADDR_MASK 0xFFFFFFFF
  159 +
  160 +/* DCache base address PVR mask */
  161 +#define PVR8_DCACHE_BASEADDR_MASK 0xFFFFFFFF
  162 +
  163 +/* DCache high address PVR mask */
  164 +#define PVR9_DCACHE_HIGHADDR_MASK 0xFFFFFFFF
  165 +
  166 +/* Target family PVR mask */
  167 +#define PVR10_TARGET_FAMILY_MASK 0xFF000000
  168 +
  169 +/* MMU descrtiption */
  170 +#define PVR11_USE_MMU 0xC0000000
  171 +#define PVR11_MMU_ITLB_SIZE 0x38000000
  172 +#define PVR11_MMU_DTLB_SIZE 0x07000000
  173 +#define PVR11_MMU_TLB_ACCESS 0x00C00000
  174 +#define PVR11_MMU_ZONES 0x003C0000
  175 +/* MSR Reset value PVR mask */
  176 +#define PVR11_MSR_RESET_VALUE_MASK 0x000007FF
  177 +
  178 +
  179 +
  180 +/* CPU flags. */
  181 +
  182 +/* Condition codes. */
  183 +#define CC_GE 5
  184 +#define CC_GT 4
  185 +#define CC_LE 3
  186 +#define CC_LT 2
  187 +#define CC_NE 1
  188 +#define CC_EQ 0
  189 +
  190 +#define NB_MMU_MODES 3
  191 +typedef struct CPUMBState {
  192 + uint32_t debug;
  193 + uint32_t btaken;
  194 + uint32_t btarget;
  195 + uint32_t bimm;
  196 +
  197 + uint32_t imm;
  198 + uint32_t regs[33];
  199 + uint32_t sregs[24];
  200 +
  201 + /* Internal flags. */
  202 +#define IMM_FLAG 4
  203 +#define DRTI_FLAG (1 << 16)
  204 +#define DRTE_FLAG (1 << 17)
  205 +#define DRTB_FLAG (1 << 18)
  206 +#define D_FLAG (1 << 19) /* Bit in ESR. */
  207 +/* TB dependant CPUState. */
  208 +#define IFLAGS_TB_MASK (D_FLAG | IMM_FLAG | DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)
  209 + uint32_t iflags;
  210 +
  211 + struct {
  212 + uint32_t regs[16];
  213 + } pvr;
  214 +
  215 +#if !defined(CONFIG_USER_ONLY)
  216 + /* Unified MMU. */
  217 + struct microblaze_mmu mmu;
  218 +#endif
  219 +
  220 + CPU_COMMON
  221 +} CPUMBState;
  222 +
  223 +CPUState *cpu_mb_init(const char *cpu_model);
  224 +int cpu_mb_exec(CPUState *s);
  225 +void cpu_mb_close(CPUState *s);
  226 +void do_interrupt(CPUState *env);
  227 +/* you can call this signal handler from your SIGBUS and SIGSEGV
  228 + signal handlers to inform the virtual CPU of exceptions. non zero
  229 + is returned if the signal was handled by the virtual CPU. */
  230 +int cpu_mb_signal_handler(int host_signum, void *pinfo,
  231 + void *puc);
  232 +
  233 +enum {
  234 + CC_OP_DYNAMIC, /* Use env->cc_op */
  235 + CC_OP_FLAGS,
  236 + CC_OP_CMP,
  237 +};
  238 +
  239 +/* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */
  240 +#define TARGET_PAGE_BITS 12
  241 +#define MMAP_SHIFT TARGET_PAGE_BITS
  242 +
  243 +#define cpu_init cpu_mb_init
  244 +#define cpu_exec cpu_mb_exec
  245 +#define cpu_gen_code cpu_mb_gen_code
  246 +#define cpu_signal_handler cpu_mb_signal_handler
  247 +
  248 +#define CPU_SAVE_VERSION 1
  249 +
  250 +/* MMU modes definitions */
  251 +#define MMU_MODE0_SUFFIX _nommu
  252 +#define MMU_MODE1_SUFFIX _kernel
  253 +#define MMU_MODE2_SUFFIX _user
  254 +#define MMU_NOMMU_IDX 0
  255 +#define MMU_KERNEL_IDX 1
  256 +#define MMU_USER_IDX 2
  257 +/* See NB_MMU_MODES further up the file. */
  258 +
  259 +static inline int cpu_mmu_index (CPUState *env)
  260 +{
  261 + /* Are we in nommu mode?. */
  262 + if (!(env->sregs[SR_MSR] & MSR_VM))
  263 + return MMU_NOMMU_IDX;
  264 +
  265 + if (env->sregs[SR_MSR] & MSR_UM)
  266 + return MMU_USER_IDX;
  267 + return MMU_KERNEL_IDX;
  268 +}
  269 +
  270 +int cpu_mb_handle_mmu_fault(CPUState *env, target_ulong address, int rw,
  271 + int mmu_idx, int is_softmmu);
  272 +
  273 +#if defined(CONFIG_USER_ONLY)
  274 +static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
  275 +{
  276 + if (newsp)
  277 + env->regs[R_SP] = newsp;
  278 + env->regs[3] = 0;
  279 +}
  280 +#endif
  281 +
  282 +static inline void cpu_set_tls(CPUState *env, target_ulong newtls)
  283 +{
  284 +}
  285 +
  286 +static inline int cpu_interrupts_enabled(CPUState *env)
  287 +{
  288 + return env->sregs[SR_MSR] & MSR_IE;
  289 +}
  290 +
  291 +#include "cpu-all.h"
  292 +#include "exec-all.h"
  293 +
  294 +static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
  295 +{
  296 + env->sregs[SR_PC] = tb->pc;
  297 +}
  298 +
  299 +static inline target_ulong cpu_get_pc(CPUState *env)
  300 +{
  301 + return env->sregs[SR_PC];
  302 +}
  303 +
  304 +static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
  305 + target_ulong *cs_base, int *flags)
  306 +{
  307 + *pc = env->sregs[SR_PC];
  308 + *cs_base = 0;
  309 + *flags = env->iflags & IFLAGS_TB_MASK;
  310 +}
  311 +#endif
... ...
target-microblaze/exec.h 0 → 100644
  1 +/*
  2 + * Microblaze execution defines
  3 + *
  4 + * Copyright (c) 2009 Edgar E. Iglesias
  5 + *
  6 + * This library is free software; you can redistribute it and/or
  7 + * modify it under the terms of the GNU Lesser General Public
  8 + * License as published by the Free Software Foundation; either
  9 + * version 2 of the License, or (at your option) any later version.
  10 + *
  11 + * This library is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14 + * General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU Lesser General Public
  17 + * License along with this library; if not, write to the Free Software
  18 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
  19 + */
  20 +#include "dyngen-exec.h"
  21 +
  22 +register struct CPUMBState *env asm(AREG0);
  23 +
  24 +#include "cpu.h"
  25 +#include "exec-all.h"
  26 +
  27 +static inline void env_to_regs(void)
  28 +{
  29 +}
  30 +
  31 +static inline void regs_to_env(void)
  32 +{
  33 +}
  34 +
  35 +#if !defined(CONFIG_USER_ONLY)
  36 +#include "softmmu_exec.h"
  37 +#endif
  38 +
  39 +void cpu_mb_flush_flags(CPUMBState *env, int cc_op);
  40 +
  41 +static inline int cpu_has_work(CPUState *env)
  42 +{
  43 + return (env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI));
  44 +}
  45 +
  46 +static inline int cpu_halted(CPUState *env) {
  47 + if (!env->halted)
  48 + return 0;
  49 +
  50 + /* IRQ, NMI and GURU execeptions wakes us up. */
  51 + if (env->interrupt_request
  52 + & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI)) {
  53 + env->halted = 0;
  54 + return 0;
  55 + }
  56 + return EXCP_HALTED;
  57 +}
... ...
target-microblaze/helper.c 0 → 100644
  1 +/*
  2 + * MicroBlaze helper routines.
  3 + *
  4 + * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
  5 + *
  6 + * This library is free software; you can redistribute it and/or
  7 + * modify it under the terms of the GNU Lesser General Public
  8 + * License as published by the Free Software Foundation; either
  9 + * version 2 of the License, or (at your option) any later version.
  10 + *
  11 + * This library is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14 + * Lesser General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU Lesser General Public
  17 + * License along with this library; if not, write to the Free Software
  18 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
  19 + */
  20 +
  21 +#include <stdio.h>
  22 +#include <string.h>
  23 +#include <assert.h>
  24 +
  25 +#include "config.h"
  26 +#include "cpu.h"
  27 +#include "exec-all.h"
  28 +#include "host-utils.h"
  29 +
  30 +#define D(x)
  31 +#define DMMU(x)
  32 +
  33 +#if defined(CONFIG_USER_ONLY)
  34 +
  35 +void do_interrupt (CPUState *env)
  36 +{
  37 + env->exception_index = -1;
  38 + env->regs[14] = env->sregs[SR_PC];
  39 +}
  40 +
  41 +int cpu_mb_handle_mmu_fault(CPUState * env, target_ulong address, int rw,
  42 + int mmu_idx, int is_softmmu)
  43 +{
  44 + env->exception_index = 0xaa;
  45 + cpu_dump_state(env, stderr, fprintf, 0);
  46 + return 1;
  47 +}
  48 +
  49 +target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr)
  50 +{
  51 + return addr;
  52 +}
  53 +
  54 +#else /* !CONFIG_USER_ONLY */
  55 +
  56 +int cpu_mb_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
  57 + int mmu_idx, int is_softmmu)
  58 +{
  59 + unsigned int hit;
  60 + unsigned int mmu_available;
  61 + int r = 1;
  62 + int prot;
  63 +
  64 + mmu_available = 0;
  65 + if (env->pvr.regs[0] & PVR0_USE_MMU) {
  66 + mmu_available = 1;
  67 + if ((env->pvr.regs[0] & PVR0_PVR_FULL_MASK)
  68 + && (env->pvr.regs[11] & PVR11_USE_MMU) != PVR11_USE_MMU) {
  69 + mmu_available = 0;
  70 + }
  71 + }
  72 +
  73 + /* Translate if the MMU is available and enabled. */
  74 + if (mmu_available && (env->sregs[SR_MSR] & MSR_VM)) {
  75 + target_ulong vaddr, paddr;
  76 + struct microblaze_mmu_lookup lu;
  77 +
  78 + hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx);
  79 + if (hit) {
  80 + vaddr = address & TARGET_PAGE_MASK;
  81 + paddr = lu.paddr + vaddr - lu.vaddr;
  82 +
  83 + DMMU(qemu_log("MMU map mmu=%d v=%x p=%x prot=%x\n",
  84 + mmu_idx, vaddr, paddr, lu.prot));
  85 + r = tlb_set_page(env, vaddr,
  86 + paddr, lu.prot, mmu_idx, is_softmmu);
  87 + } else {
  88 + env->sregs[SR_EAR] = address;
  89 + DMMU(qemu_log("mmu=%d miss addr=%x\n", mmu_idx, vaddr));
  90 +
  91 + switch (lu.err) {
  92 + case ERR_PROT:
  93 + env->sregs[SR_ESR] = rw == 2 ? 17 : 16;
  94 + env->sregs[SR_ESR] |= (rw == 1) << 10;
  95 + break;
  96 + case ERR_MISS:
  97 + env->sregs[SR_ESR] = rw == 2 ? 19 : 18;
  98 + env->sregs[SR_ESR] |= (rw == 1) << 10;
  99 + break;
  100 + default:
  101 + abort();
  102 + break;
  103 + }
  104 +
  105 + if (env->exception_index == EXCP_MMU) {
  106 + cpu_abort(env, "recursive faults\n");
  107 + }
  108 +
  109 + /* TLB miss. */
  110 + env->exception_index = EXCP_MMU;
  111 + }
  112 + } else {
  113 + /* MMU disabled or not available. */
  114 + address &= TARGET_PAGE_MASK;
  115 + prot = PAGE_BITS;
  116 + r = tlb_set_page(env, address, address, prot, mmu_idx, is_softmmu);
  117 + }
  118 + return r;
  119 +}
  120 +
  121 +void do_interrupt(CPUState *env)
  122 +{
  123 + uint32_t t;
  124 +
  125 + /* IMM flag cannot propagate accross a branch and into the dslot. */
  126 + assert(!((env->iflags & D_FLAG) && (env->iflags & IMM_FLAG)));
  127 + assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)));
  128 +/* assert(env->sregs[SR_MSR] & (MSR_EE)); Only for HW exceptions. */
  129 + switch (env->exception_index) {
  130 + case EXCP_MMU:
  131 + env->regs[17] = env->sregs[SR_PC];
  132 +
  133 + /* Exception breaks branch + dslot sequence? */
  134 + if (env->iflags & D_FLAG) {
  135 + D(qemu_log("D_FLAG set at exception bimm=%d\n", env->bimm));
  136 + env->sregs[SR_ESR] |= 1 << 12 ;
  137 + env->sregs[SR_BTR] = env->btarget;
  138 +
  139 + /* Reexecute the branch. */
  140 + env->regs[17] -= 4;
  141 + /* was the branch immprefixed?. */
  142 + if (env->bimm) {
  143 + qemu_log_mask(CPU_LOG_INT,
  144 + "bimm exception at pc=%x iflags=%x\n",
  145 + env->sregs[SR_PC], env->iflags);
  146 + env->regs[17] -= 4;
  147 + log_cpu_state_mask(CPU_LOG_INT, env, 0);
  148 + }
  149 + } else if (env->iflags & IMM_FLAG) {
  150 + D(qemu_log("IMM_FLAG set at exception\n"));
  151 + env->regs[17] -= 4;
  152 + }
  153 +
  154 + /* Disable the MMU. */
  155 + t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1;
  156 + env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
  157 + env->sregs[SR_MSR] |= t;
  158 + /* Exception in progress. */
  159 + env->sregs[SR_MSR] |= MSR_EIP;
  160 +
  161 + qemu_log_mask(CPU_LOG_INT,
  162 + "exception at pc=%x ear=%x iflags=%x\n",
  163 + env->sregs[SR_PC], env->sregs[SR_EAR], env->iflags);
  164 + log_cpu_state_mask(CPU_LOG_INT, env, 0);
  165 + env->iflags &= ~(IMM_FLAG | D_FLAG);
  166 + env->sregs[SR_PC] = 0x20;
  167 + break;
  168 +
  169 + case EXCP_IRQ:
  170 + assert(!(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP)));
  171 + assert(env->sregs[SR_MSR] & MSR_IE);
  172 + assert(!(env->iflags & D_FLAG));
  173 +
  174 + t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1;
  175 +
  176 +#if 0
  177 +#include "disas.h"
  178 +
  179 +/* Useful instrumentation when debugging interrupt issues in either
  180 + the models or in sw. */
  181 + {
  182 + const char *sym;
  183 +
  184 + sym = lookup_symbol(env->sregs[SR_PC]);
  185 + if (sym
  186 + && (!strcmp("netif_rx", sym)
  187 + || !strcmp("process_backlog", sym))) {
  188 +
  189 + qemu_log(
  190 + "interrupt at pc=%x msr=%x %x iflags=%x sym=%s\n",
  191 + env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags,
  192 + sym);
  193 +
  194 + log_cpu_state(env, 0);
  195 + }
  196 + }
  197 +#endif
  198 + qemu_log_mask(CPU_LOG_INT,
  199 + "interrupt at pc=%x msr=%x %x iflags=%x\n",
  200 + env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags);
  201 +
  202 + env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM \
  203 + | MSR_UM | MSR_IE);
  204 + env->sregs[SR_MSR] |= t;
  205 +
  206 + env->regs[14] = env->sregs[SR_PC];
  207 + env->sregs[SR_PC] = 0x10;
  208 + //log_cpu_state_mask(CPU_LOG_INT, env, 0);
  209 + break;
  210 +
  211 + case EXCP_BREAK:
  212 + case EXCP_HW_BREAK:
  213 + assert(!(env->iflags & IMM_FLAG));
  214 + assert(!(env->iflags & D_FLAG));
  215 + t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1;
  216 + qemu_log_mask(CPU_LOG_INT,
  217 + "break at pc=%x msr=%x %x iflags=%x\n",
  218 + env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags);
  219 + log_cpu_state_mask(CPU_LOG_INT, env, 0);
  220 + env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
  221 + env->sregs[SR_MSR] |= t;
  222 + env->sregs[SR_MSR] |= MSR_BIP;
  223 + if (env->exception_index == EXCP_HW_BREAK) {
  224 + env->regs[16] = env->sregs[SR_PC];
  225 + env->sregs[SR_MSR] |= MSR_BIP;
  226 + env->sregs[SR_PC] = 0x18;
  227 + } else
  228 + env->sregs[SR_PC] = env->btarget;
  229 + break;
  230 + default:
  231 + cpu_abort(env, "unhandled exception type=%d\n",
  232 + env->exception_index);
  233 + break;
  234 + }
  235 +}
  236 +
  237 +target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr)
  238 +{
  239 + target_ulong vaddr, paddr = 0;
  240 + struct microblaze_mmu_lookup lu;
  241 + unsigned int hit;
  242 +
  243 + if (env->sregs[SR_MSR] & MSR_VM) {
  244 + hit = mmu_translate(&env->mmu, &lu, addr, 0, 0);
  245 + if (hit) {
  246 + vaddr = addr & TARGET_PAGE_MASK;
  247 + paddr = lu.paddr + vaddr - lu.vaddr;
  248 + } else
  249 + paddr = 0; /* ???. */
  250 + } else
  251 + paddr = addr & TARGET_PAGE_MASK;
  252 +
  253 + return paddr;
  254 +}
  255 +#endif
... ...
target-microblaze/helper.h 0 → 100644
  1 +#include "def-helper.h"
  2 +
  3 +DEF_HELPER_1(raise_exception, void, i32)
  4 +DEF_HELPER_0(debug, void)
  5 +DEF_HELPER_4(addkc, i32, i32, i32, i32, i32)
  6 +DEF_HELPER_4(subkc, i32, i32, i32, i32, i32)
  7 +DEF_HELPER_2(cmp, i32, i32, i32)
  8 +DEF_HELPER_2(cmpu, i32, i32, i32)
  9 +
  10 +DEF_HELPER_2(divs, i32, i32, i32)
  11 +DEF_HELPER_2(divu, i32, i32, i32)
  12 +
  13 +DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_PURE | TCG_CALL_CONST, i32, i32, i32)
  14 +#if !defined(CONFIG_USER_ONLY)
  15 +DEF_HELPER_1(mmu_read, i32, i32)
  16 +DEF_HELPER_2(mmu_write, void, i32, i32)
  17 +#endif
  18 +
  19 +#include "def-helper.h"
... ...
target-microblaze/microblaze-decode.h 0 → 100644
  1 +/*
  2 + * MicroBlaze insn decoding macros.
  3 + *
  4 + * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
  5 + *
  6 + * This library is free software; you can redistribute it and/or
  7 + * modify it under the terms of the GNU Lesser General Public
  8 + * License as published by the Free Software Foundation; either
  9 + * version 2 of the License, or (at your option) any later version.
  10 + *
  11 + * This library is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14 + * Lesser General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU Lesser General Public
  17 + * License along with this library; if not, write to the Free Software
  18 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
  19 + */
  20 +
  21 +/* Convenient binary macros. */
  22 +#define HEX__(n) 0x##n##LU
  23 +#define B8__(x) ((x&0x0000000FLU)?1:0) \
  24 + + ((x&0x000000F0LU)?2:0) \
  25 + + ((x&0x00000F00LU)?4:0) \
  26 + + ((x&0x0000F000LU)?8:0) \
  27 + + ((x&0x000F0000LU)?16:0) \
  28 + + ((x&0x00F00000LU)?32:0) \
  29 + + ((x&0x0F000000LU)?64:0) \
  30 + + ((x&0xF0000000LU)?128:0)
  31 +#define B8(d) ((unsigned char)B8__(HEX__(d)))
  32 +
  33 +/* Decode logic, mask and value. */
  34 +#define DEC_ADD {B8(00000000), B8(00110001)}
  35 +#define DEC_SUB {B8(00000001), B8(00110001)}
  36 +#define DEC_AND {B8(00100001), B8(00110101)}
  37 +#define DEC_XOR {B8(00100010), B8(00110111)}
  38 +#define DEC_OR {B8(00100000), B8(00110111)}
  39 +#define DEC_BIT {B8(00100100), B8(00111111)}
  40 +#define DEC_MSR {B8(00100101), B8(00111111)}
  41 +
  42 +#define DEC_BARREL {B8(00010001), B8(00110111)}
  43 +#define DEC_MUL {B8(00010000), B8(00110111)}
  44 +#define DEC_DIV {B8(00010010), B8(00110111)}
  45 +
  46 +#define DEC_LD {B8(00110000), B8(00110100)}
  47 +#define DEC_ST {B8(00110100), B8(00110100)}
  48 +#define DEC_IMM {B8(00101100), B8(00111111)}
  49 +
  50 +#define DEC_BR {B8(00100110), B8(00110111)}
  51 +#define DEC_BCC {B8(00100111), B8(00110111)}
  52 +#define DEC_RTS {B8(00101101), B8(00111111)}
... ...
target-microblaze/op_helper.c 0 → 100644
  1 +/*
  2 + * Microblaze helper routines.
  3 + *
  4 + * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>.
  5 + *
  6 + * This library is free software; you can redistribute it and/or
  7 + * modify it under the terms of the GNU Lesser General Public
  8 + * License as published by the Free Software Foundation; either
  9 + * version 2 of the License, or (at your option) any later version.
  10 + *
  11 + * This library is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14 + * Lesser General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU Lesser General Public
  17 + * License along with this library; if not, write to the Free Software
  18 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
  19 + */
  20 +
  21 +#include <assert.h>
  22 +#include "exec.h"
  23 +#include "helper.h"
  24 +#include "host-utils.h"
  25 +
  26 +#define D(x)
  27 +
  28 +#if !defined(CONFIG_USER_ONLY)
  29 +#define MMUSUFFIX _mmu
  30 +#define SHIFT 0
  31 +#include "softmmu_template.h"
  32 +#define SHIFT 1
  33 +#include "softmmu_template.h"
  34 +#define SHIFT 2
  35 +#include "softmmu_template.h"
  36 +#define SHIFT 3
  37 +#include "softmmu_template.h"
  38 +
  39 +/* Try to fill the TLB and return an exception if error. If retaddr is
  40 + NULL, it means that the function was called in C code (i.e. not
  41 + from generated code or from helper.c) */
  42 +/* XXX: fix it to restore all registers */
  43 +void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
  44 +{
  45 + TranslationBlock *tb;
  46 + CPUState *saved_env;
  47 + unsigned long pc;
  48 + int ret;
  49 +
  50 + /* XXX: hack to restore env in all cases, even if not called from
  51 + generated code */
  52 + saved_env = env;
  53 + env = cpu_single_env;
  54 +
  55 + ret = cpu_mb_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
  56 + if (unlikely(ret)) {
  57 + if (retaddr) {
  58 + /* now we have a real cpu fault */
  59 + pc = (unsigned long)retaddr;
  60 + tb = tb_find_pc(pc);
  61 + if (tb) {
  62 + /* the PC is inside the translated code. It means that we have
  63 + a virtual CPU fault */
  64 + cpu_restore_state(tb, env, pc, NULL);
  65 + }
  66 + }
  67 + cpu_loop_exit();
  68 + }
  69 + env = saved_env;
  70 +}
  71 +#endif
  72 +
  73 +void helper_raise_exception(uint32_t index)
  74 +{
  75 + env->exception_index = index;
  76 + cpu_loop_exit();
  77 +}
  78 +
  79 +void helper_debug(void)
  80 +{
  81 + int i;
  82 +
  83 + qemu_log("PC=%8.8x\n", env->sregs[SR_PC]);
  84 + for (i = 0; i < 32; i++) {
  85 + qemu_log("r%2.2d=%8.8x ", i, env->regs[i]);
  86 + if ((i + 1) % 4 == 0)
  87 + qemu_log("\n");
  88 + }
  89 + qemu_log("\n\n");
  90 +}
  91 +
  92 +static inline uint32_t compute_carry(uint32_t a, uint32_t b, uint32_t cin)
  93 +{
  94 + uint32_t cout = 0;
  95 +
  96 + if ((b == ~0) && cin)
  97 + cout = 1;
  98 + else if ((~0 - a) < (b + cin))
  99 + cout = 1;
  100 + return cout;
  101 +}
  102 +
  103 +uint32_t helper_cmp(uint32_t a, uint32_t b)
  104 +{
  105 + uint32_t t;
  106 +
  107 + t = b + ~a + 1;
  108 + if ((b & 0x80000000) ^ (a & 0x80000000))
  109 + t = (t & 0x7fffffff) | (b & 0x80000000);
  110 + return t;
  111 +}
  112 +
  113 +uint32_t helper_cmpu(uint32_t a, uint32_t b)
  114 +{
  115 + uint32_t t;
  116 +
  117 + t = b + ~a + 1;
  118 + if ((b & 0x80000000) ^ (a & 0x80000000))
  119 + t = (t & 0x7fffffff) | (a & 0x80000000);
  120 + return t;
  121 +}
  122 +
  123 +uint32_t helper_addkc(uint32_t a, uint32_t b, uint32_t k, uint32_t c)
  124 +{
  125 + uint32_t d, cf = 0, ncf;
  126 +
  127 + if (c)
  128 + cf = env->sregs[SR_MSR] >> 31;
  129 + assert(cf == 0 || cf == 1);
  130 + d = a + b + cf;
  131 +
  132 + if (!k) {
  133 + ncf = compute_carry(a, b, cf);
  134 + assert(ncf == 0 || ncf == 1);
  135 + if (ncf)
  136 + env->sregs[SR_MSR] |= MSR_C | MSR_CC;
  137 + else
  138 + env->sregs[SR_MSR] &= ~(MSR_C | MSR_CC);
  139 + }
  140 + D(qemu_log("%x = %x + %x cf=%d ncf=%d k=%d c=%d\n",
  141 + d, a, b, cf, ncf, k, c));
  142 + return d;
  143 +}
  144 +
  145 +uint32_t helper_subkc(uint32_t a, uint32_t b, uint32_t k, uint32_t c)
  146 +{
  147 + uint32_t d, cf = 1, ncf;
  148 +
  149 + if (c)
  150 + cf = env->sregs[SR_MSR] >> 31;
  151 + assert(cf == 0 || cf == 1);
  152 + d = b + ~a + cf;
  153 +
  154 + if (!k) {
  155 + ncf = compute_carry(b, ~a, cf);
  156 + assert(ncf == 0 || ncf == 1);
  157 + if (ncf)
  158 + env->sregs[SR_MSR] |= MSR_C | MSR_CC;
  159 + else
  160 + env->sregs[SR_MSR] &= ~(MSR_C | MSR_CC);
  161 + }
  162 + D(qemu_log("%x = %x + %x cf=%d ncf=%d k=%d c=%d\n",
  163 + d, a, b, cf, ncf, k, c));
  164 + return d;
  165 +}
  166 +
  167 +static inline int div_prepare(uint32_t a, uint32_t b)
  168 +{
  169 + if (b == 0) {
  170 + env->sregs[SR_MSR] |= MSR_DZ;
  171 + /* FIXME: Raise the div by zero exception. */
  172 + return 0;
  173 + }
  174 + env->sregs[SR_MSR] &= ~MSR_DZ;
  175 + return 1;
  176 +}
  177 +
  178 +uint32_t helper_divs(uint32_t a, uint32_t b)
  179 +{
  180 + if (!div_prepare(a, b))
  181 + return 0;
  182 + return (int32_t)a / (int32_t)b;
  183 +}
  184 +
  185 +uint32_t helper_divu(uint32_t a, uint32_t b)
  186 +{
  187 + if (!div_prepare(a, b))
  188 + return 0;
  189 + return a / b;
  190 +}
  191 +
  192 +uint32_t helper_pcmpbf(uint32_t a, uint32_t b)
  193 +{
  194 + unsigned int i;
  195 + uint32_t mask = 0xff000000;
  196 +
  197 + for (i = 0; i < 4; i++) {
  198 + if ((a & mask) == (b & mask))
  199 + return i + 1;
  200 + mask >>= 8;
  201 + }
  202 + return 0;
  203 +}
  204 +
  205 +#if !defined(CONFIG_USER_ONLY)
  206 +/* Writes/reads to the MMU's special regs end up here. */
  207 +uint32_t helper_mmu_read(uint32_t rn)
  208 +{
  209 + return mmu_read(env, rn);
  210 +}
  211 +
  212 +void helper_mmu_write(uint32_t rn, uint32_t v)
  213 +{
  214 + mmu_write(env, rn, v);
  215 +}
  216 +#endif
... ...
target-microblaze/translate.c 0 → 100644
  1 +/*
  2 + * Xilinx MicroBlaze emulation for qemu: main translation routines.
  3 + *
  4 + * Copyright (c) 2009 Edgar E. Iglesias.
  5 + *
  6 + * This library is free software; you can redistribute it and/or
  7 + * modify it under the terms of the GNU Lesser General Public
  8 + * License as published by the Free Software Foundation; either
  9 + * version 2 of the License, or (at your option) any later version.
  10 + *
  11 + * This library is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14 + * Lesser General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU Lesser General Public
  17 + * License along with this library; if not, write to the Free Software
  18 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
  19 + */
  20 +
  21 +#include <stdarg.h>
  22 +#include <stdlib.h>
  23 +#include <stdio.h>
  24 +#include <string.h>
  25 +#include <inttypes.h>
  26 +#include <assert.h>
  27 +
  28 +#include "cpu.h"
  29 +#include "exec-all.h"
  30 +#include "disas.h"
  31 +#include "tcg-op.h"
  32 +#include "helper.h"
  33 +#include "microblaze-decode.h"
  34 +#include "qemu-common.h"
  35 +
  36 +#define GEN_HELPER 1
  37 +#include "helper.h"
  38 +
  39 +#define SIM_COMPAT 0
  40 +#define DISAS_GNU 1
  41 +#define DISAS_MB 1
  42 +#if DISAS_MB && !SIM_COMPAT
  43 +# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
  44 +#else
  45 +# define LOG_DIS(...) do { } while (0)
  46 +#endif
  47 +
  48 +#define D(x)
  49 +
  50 +#define EXTRACT_FIELD(src, start, end) \
  51 + (((src) >> start) & ((1 << (end - start + 1)) - 1))
  52 +
  53 +static TCGv env_debug;
  54 +static TCGv_ptr cpu_env;
  55 +static TCGv cpu_R[32];
  56 +static TCGv cpu_SR[18];
  57 +static TCGv env_imm;
  58 +static TCGv env_btaken;
  59 +static TCGv env_btarget;
  60 +static TCGv env_iflags;
  61 +
  62 +#include "gen-icount.h"
  63 +
  64 +/* This is the state at translation time. */
  65 +typedef struct DisasContext {
  66 + CPUState *env;
  67 + target_ulong pc, ppc;
  68 + target_ulong cache_pc;
  69 +
  70 + /* Decoder. */
  71 + int type_b;
  72 + uint32_t ir;
  73 + uint8_t opcode;
  74 + uint8_t rd, ra, rb;
  75 + uint16_t imm;
  76 +
  77 + unsigned int cpustate_changed;
  78 + unsigned int delayed_branch;
  79 + unsigned int tb_flags, synced_flags; /* tb dependent flags. */
  80 + unsigned int clear_imm;
  81 + int is_jmp;
  82 +
  83 +#define JMP_NOJMP 0
  84 +#define JMP_DIRECT 1
  85 +#define JMP_INDIRECT 2
  86 + unsigned int jmp;
  87 + uint32_t jmp_pc;
  88 +
  89 + int abort_at_next_insn;
  90 + int nr_nops;
  91 + struct TranslationBlock *tb;
  92 + int singlestep_enabled;
  93 +} DisasContext;
  94 +
  95 +const static char *regnames[] =
  96 +{
  97 + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  98 + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  99 + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
  100 + "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
  101 +};
  102 +
  103 +const static char *special_regnames[] =
  104 +{
  105 + "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
  106 + "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
  107 + "sr16", "sr17", "sr18"
  108 +};
  109 +
  110 +/* Sign extend at translation time. */
  111 +static inline int sign_extend(unsigned int val, unsigned int width)
  112 +{
  113 + int sval;
  114 +
  115 + /* LSL. */
  116 + val <<= 31 - width;
  117 + sval = val;
  118 + /* ASR. */
  119 + sval >>= 31 - width;
  120 + return sval;
  121 +}
  122 +
  123 +static inline void t_sync_flags(DisasContext *dc)
  124 +{
  125 + /* Synch the tb dependant flags between translator and runtime. */
  126 + if (dc->tb_flags != dc->synced_flags) {
  127 + tcg_gen_movi_tl(env_iflags, dc->tb_flags);
  128 + dc->synced_flags = dc->tb_flags;
  129 + }
  130 +}
  131 +
  132 +static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
  133 +{
  134 + TCGv_i32 tmp = tcg_const_i32(index);
  135 +
  136 + t_sync_flags(dc);
  137 + tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
  138 + gen_helper_raise_exception(tmp);
  139 + tcg_temp_free_i32(tmp);
  140 + dc->is_jmp = DISAS_UPDATE;
  141 +}
  142 +
  143 +static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
  144 +{
  145 + TranslationBlock *tb;
  146 + tb = dc->tb;
  147 + if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
  148 + tcg_gen_goto_tb(n);
  149 + tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
  150 + tcg_gen_exit_tb((long)tb + n);
  151 + } else {
  152 + tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
  153 + tcg_gen_exit_tb(0);
  154 + }
  155 +}
  156 +
  157 +static inline TCGv *dec_alu_op_b(DisasContext *dc)
  158 +{
  159 + if (dc->type_b) {
  160 + if (dc->tb_flags & IMM_FLAG)
  161 + tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
  162 + else
  163 + tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
  164 + return &env_imm;
  165 + } else
  166 + return &cpu_R[dc->rb];
  167 +}
  168 +
  169 +static void dec_add(DisasContext *dc)
  170 +{
  171 + unsigned int k, c;
  172 +
  173 + k = dc->opcode & 4;
  174 + c = dc->opcode & 2;
  175 +
  176 + LOG_DIS("add%s%s%s r%d r%d r%d\n",
  177 + dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
  178 + dc->rd, dc->ra, dc->rb);
  179 +
  180 + if (k && !c && dc->rd)
  181 + tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
  182 + else if (dc->rd)
  183 + gen_helper_addkc(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)),
  184 + tcg_const_tl(k), tcg_const_tl(c));
  185 + else {
  186 + TCGv d = tcg_temp_new();
  187 + gen_helper_addkc(d, cpu_R[dc->ra], *(dec_alu_op_b(dc)),
  188 + tcg_const_tl(k), tcg_const_tl(c));
  189 + tcg_temp_free(d);
  190 + }
  191 +}
  192 +
  193 +static void dec_sub(DisasContext *dc)
  194 +{
  195 + unsigned int u, cmp, k, c;
  196 +
  197 + u = dc->imm & 2;
  198 + k = dc->opcode & 4;
  199 + c = dc->opcode & 2;
  200 + cmp = (dc->imm & 1) && (!dc->type_b) && k;
  201 +
  202 + if (cmp) {
  203 + LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
  204 + if (dc->rd) {
  205 + if (u)
  206 + gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
  207 + else
  208 + gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
  209 + }
  210 + } else {
  211 + LOG_DIS("sub%s%s r%d, r%d r%d\n",
  212 + k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
  213 +
  214 + if (!k || c) {
  215 + TCGv t;
  216 + t = tcg_temp_new();
  217 + if (dc->rd)
  218 + gen_helper_subkc(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)),
  219 + tcg_const_tl(k), tcg_const_tl(c));
  220 + else
  221 + gen_helper_subkc(t, cpu_R[dc->ra], *(dec_alu_op_b(dc)),
  222 + tcg_const_tl(k), tcg_const_tl(c));
  223 + tcg_temp_free(t);
  224 + }
  225 + else if (dc->rd)
  226 + tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
  227 + }
  228 +}
  229 +
  230 +static void dec_pattern(DisasContext *dc)
  231 +{
  232 + unsigned int mode;
  233 + int l1;
  234 +
  235 + mode = dc->opcode & 3;
  236 + switch (mode) {
  237 + case 0:
  238 + /* pcmpbf. */
  239 + LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
  240 + if (dc->rd)
  241 + gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
  242 + break;
  243 + case 2:
  244 + LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
  245 + if (dc->rd) {
  246 + TCGv t0 = tcg_temp_local_new();
  247 + l1 = gen_new_label();
  248 + tcg_gen_movi_tl(t0, 1);
  249 + tcg_gen_brcond_tl(TCG_COND_EQ,
  250 + cpu_R[dc->ra], cpu_R[dc->rb], l1);
  251 + tcg_gen_movi_tl(t0, 0);
  252 + gen_set_label(l1);
  253 + tcg_gen_mov_tl(cpu_R[dc->rd], t0);
  254 + tcg_temp_free(t0);
  255 + }
  256 + break;
  257 + case 3:
  258 + LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
  259 + l1 = gen_new_label();
  260 + if (dc->rd) {
  261 + TCGv t0 = tcg_temp_local_new();
  262 + tcg_gen_movi_tl(t0, 1);
  263 + tcg_gen_brcond_tl(TCG_COND_NE,
  264 + cpu_R[dc->ra], cpu_R[dc->rb], l1);
  265 + tcg_gen_movi_tl(t0, 0);
  266 + gen_set_label(l1);
  267 + tcg_gen_mov_tl(cpu_R[dc->rd], t0);
  268 + tcg_temp_free(t0);
  269 + }
  270 + break;
  271 + default:
  272 + cpu_abort(dc->env,
  273 + "unsupported pattern insn opcode=%x\n", dc->opcode);
  274 + break;
  275 + }
  276 +}
  277 +
  278 +static void dec_and(DisasContext *dc)
  279 +{
  280 + unsigned int not;
  281 +
  282 + if (!dc->type_b && (dc->imm & (1 << 10))) {
  283 + dec_pattern(dc);
  284 + return;
  285 + }
  286 +
  287 + not = dc->opcode & (1 << 1);
  288 + LOG_DIS("and%s\n", not ? "n" : "");
  289 +
  290 + if (!dc->rd)
  291 + return;
  292 +
  293 + if (not) {
  294 + TCGv t = tcg_temp_new();
  295 + tcg_gen_not_tl(t, *(dec_alu_op_b(dc)));
  296 + tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], t);
  297 + tcg_temp_free(t);
  298 + } else
  299 + tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
  300 +}
  301 +
  302 +static void dec_or(DisasContext *dc)
  303 +{
  304 + if (!dc->type_b && (dc->imm & (1 << 10))) {
  305 + dec_pattern(dc);
  306 + return;
  307 + }
  308 +
  309 + LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
  310 + if (dc->rd)
  311 + tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
  312 +}
  313 +
  314 +static void dec_xor(DisasContext *dc)
  315 +{
  316 + if (!dc->type_b && (dc->imm & (1 << 10))) {
  317 + dec_pattern(dc);
  318 + return;
  319 + }
  320 +
  321 + LOG_DIS("xor r%d\n", dc->rd);
  322 + if (dc->rd)
  323 + tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
  324 +}
  325 +
  326 +static void read_carry(DisasContext *dc, TCGv d)
  327 +{
  328 + tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
  329 +}
  330 +
  331 +static void write_carry(DisasContext *dc, TCGv v)
  332 +{
  333 + TCGv t0 = tcg_temp_new();
  334 + tcg_gen_shli_tl(t0, v, 31);
  335 + tcg_gen_sari_tl(t0, t0, 31);
  336 + tcg_gen_mov_tl(env_debug, t0);
  337 + tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
  338 + tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
  339 + ~(MSR_C | MSR_CC));
  340 + tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
  341 + tcg_temp_free(t0);
  342 +}
  343 +
  344 +
  345 +static inline void msr_read(DisasContext *dc, TCGv d)
  346 +{
  347 + tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
  348 +}
  349 +
  350 +static inline void msr_write(DisasContext *dc, TCGv v)
  351 +{
  352 + dc->cpustate_changed = 1;
  353 + tcg_gen_mov_tl(cpu_SR[SR_MSR], v);
  354 + /* PVR, we have a processor version register. */
  355 + tcg_gen_ori_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], (1 << 10));
  356 +}
  357 +
  358 +static void dec_msr(DisasContext *dc)
  359 +{
  360 + TCGv t0, t1;
  361 + unsigned int sr, to, rn;
  362 +
  363 + sr = dc->imm & ((1 << 14) - 1);
  364 + to = dc->imm & (1 << 14);
  365 + dc->type_b = 1;
  366 + if (to)
  367 + dc->cpustate_changed = 1;
  368 +
  369 + /* msrclr and msrset. */
  370 + if (!(dc->imm & (1 << 15))) {
  371 + unsigned int clr = dc->ir & (1 << 16);
  372 +
  373 + LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
  374 + dc->rd, dc->imm);
  375 + if (dc->rd)
  376 + msr_read(dc, cpu_R[dc->rd]);
  377 +
  378 + t0 = tcg_temp_new();
  379 + t1 = tcg_temp_new();
  380 + msr_read(dc, t0);
  381 + tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
  382 +
  383 + if (clr) {
  384 + tcg_gen_not_tl(t1, t1);
  385 + tcg_gen_and_tl(t0, t0, t1);
  386 + } else
  387 + tcg_gen_or_tl(t0, t0, t1);
  388 + msr_write(dc, t0);
  389 + tcg_temp_free(t0);
  390 + tcg_temp_free(t1);
  391 + tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
  392 + dc->is_jmp = DISAS_UPDATE;
  393 + return;
  394 + }
  395 +
  396 +#if !defined(CONFIG_USER_ONLY)
  397 + /* Catch read/writes to the mmu block. */
  398 + if ((sr & ~0xff) == 0x1000) {
  399 + sr &= 7;
  400 + LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
  401 + if (to)
  402 + gen_helper_mmu_write(tcg_const_tl(sr), cpu_R[dc->ra]);
  403 + else
  404 + gen_helper_mmu_read(cpu_R[dc->rd], tcg_const_tl(sr));
  405 + return;
  406 + }
  407 +#endif
  408 +
  409 + if (to) {
  410 + LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
  411 + switch (sr) {
  412 + case 0:
  413 + break;
  414 + case 1:
  415 + msr_write(dc, cpu_R[dc->ra]);
  416 + break;
  417 + case 0x3:
  418 + tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
  419 + break;
  420 + case 0x5:
  421 + tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
  422 + break;
  423 + case 0x7:
  424 + /* Ignored at the moment. */
  425 + break;
  426 + default:
  427 + cpu_abort(dc->env, "unknown mts reg %x\n", sr);
  428 + break;
  429 + }
  430 + } else {
  431 + LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
  432 +
  433 + switch (sr) {
  434 + case 0:
  435 + tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
  436 + break;
  437 + case 1:
  438 + msr_read(dc, cpu_R[dc->rd]);
  439 + break;
  440 + case 0x3:
  441 + tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
  442 + break;
  443 + case 0x5:
  444 + tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
  445 + break;
  446 + case 0x7:
  447 + tcg_gen_movi_tl(cpu_R[dc->rd], 0);
  448 + break;
  449 + case 0xb:
  450 + tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
  451 + break;
  452 + case 0x2000:
  453 + case 0x2001:
  454 + case 0x2002:
  455 + case 0x2003:
  456 + case 0x2004:
  457 + case 0x2005:
  458 + case 0x2006:
  459 + case 0x2007:
  460 + case 0x2008:
  461 + case 0x2009:
  462 + case 0x200a:
  463 + case 0x200b:
  464 + case 0x200c:
  465 + rn = sr & 0xf;
  466 + tcg_gen_ld_tl(cpu_R[dc->rd],
  467 + cpu_env, offsetof(CPUState, pvr.regs[rn]));
  468 + break;
  469 + default:
  470 + cpu_abort(dc->env, "unknown mfs reg %x\n", sr);
  471 + break;
  472 + }
  473 + }
  474 +}
  475 +
  476 +/* 64-bit signed mul, lower result in d and upper in d2. */
  477 +static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
  478 +{
  479 + TCGv_i64 t0, t1;
  480 +
  481 + t0 = tcg_temp_new_i64();
  482 + t1 = tcg_temp_new_i64();
  483 +
  484 + tcg_gen_ext_i32_i64(t0, a);
  485 + tcg_gen_ext_i32_i64(t1, b);
  486 + tcg_gen_mul_i64(t0, t0, t1);
  487 +
  488 + tcg_gen_trunc_i64_i32(d, t0);
  489 + tcg_gen_shri_i64(t0, t0, 32);
  490 + tcg_gen_trunc_i64_i32(d2, t0);
  491 +
  492 + tcg_temp_free_i64(t0);
  493 + tcg_temp_free_i64(t1);
  494 +}
  495 +
  496 +/* 64-bit unsigned muls, lower result in d and upper in d2. */
  497 +static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
  498 +{
  499 + TCGv_i64 t0, t1;
  500 +
  501 + t0 = tcg_temp_new_i64();
  502 + t1 = tcg_temp_new_i64();
  503 +
  504 + tcg_gen_extu_i32_i64(t0, a);
  505 + tcg_gen_extu_i32_i64(t1, b);
  506 + tcg_gen_mul_i64(t0, t0, t1);
  507 +
  508 + tcg_gen_trunc_i64_i32(d, t0);
  509 + tcg_gen_shri_i64(t0, t0, 32);
  510 + tcg_gen_trunc_i64_i32(d2, t0);
  511 +
  512 + tcg_temp_free_i64(t0);
  513 + tcg_temp_free_i64(t1);
  514 +}
  515 +
  516 +/* Multiplier unit. */
  517 +static void dec_mul(DisasContext *dc)
  518 +{
  519 + TCGv d[2];
  520 + unsigned int subcode;
  521 +
  522 + subcode = dc->imm & 3;
  523 + d[0] = tcg_temp_new();
  524 + d[1] = tcg_temp_new();
  525 +
  526 + if (dc->type_b) {
  527 + LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
  528 + t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
  529 + goto done;
  530 + }
  531 +
  532 + switch (subcode) {
  533 + case 0:
  534 + LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
  535 + t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
  536 + break;
  537 + case 1:
  538 + LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
  539 + t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
  540 + break;
  541 + case 2:
  542 + LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
  543 + t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
  544 + break;
  545 + case 3:
  546 + LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
  547 + t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
  548 + break;
  549 + default:
  550 + cpu_abort(dc->env, "unknown MUL insn %x\n", subcode);
  551 + break;
  552 + }
  553 +done:
  554 + tcg_temp_free(d[0]);
  555 + tcg_temp_free(d[1]);
  556 +}
  557 +
  558 +/* Div unit. */
  559 +static void dec_div(DisasContext *dc)
  560 +{
  561 + unsigned int u;
  562 +
  563 + u = dc->imm & 2;
  564 + LOG_DIS("div\n");
  565 +
  566 + /* FIXME: support div by zero exceptions. */
  567 + if (u)
  568 + gen_helper_divu(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
  569 + else
  570 + gen_helper_divs(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
  571 + if (!dc->rd)
  572 + tcg_gen_movi_tl(cpu_R[dc->rd], 0);
  573 +}
  574 +
  575 +static void dec_barrel(DisasContext *dc)
  576 +{
  577 + TCGv t0;
  578 + unsigned int s, t;
  579 +
  580 + s = dc->imm & (1 << 10);
  581 + t = dc->imm & (1 << 9);
  582 +
  583 + LOG_DIS("bs%s%s r%d r%d r%d\n",
  584 + s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
  585 +
  586 + t0 = tcg_temp_new();
  587 +
  588 + tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
  589 + tcg_gen_andi_tl(t0, t0, 31);
  590 +
  591 + if (s)
  592 + tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
  593 + else {
  594 + if (t)
  595 + tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
  596 + else
  597 + tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
  598 + }
  599 +}
  600 +
  601 +static void dec_bit(DisasContext *dc)
  602 +{
  603 + TCGv t0, t1;
  604 + unsigned int op;
  605 +
  606 + op = dc->ir & ((1 << 8) - 1);
  607 + switch (op) {
  608 + case 0x21:
  609 + /* src. */
  610 + t0 = tcg_temp_new();
  611 +
  612 + LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
  613 + tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1);
  614 + if (dc->rd) {
  615 + t1 = tcg_temp_new();
  616 + read_carry(dc, t1);
  617 + tcg_gen_shli_tl(t1, t1, 31);
  618 +
  619 + tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
  620 + tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t1);
  621 + tcg_temp_free(t1);
  622 + }
  623 +
  624 + /* Update carry. */
  625 + write_carry(dc, t0);
  626 + tcg_temp_free(t0);
  627 + break;
  628 +
  629 + case 0x1:
  630 + case 0x41:
  631 + /* srl. */
  632 + t0 = tcg_temp_new();
  633 + LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
  634 +
  635 + /* Update carry. */
  636 + tcg_gen_andi_tl(t0, cpu_R[dc->ra], 1);
  637 + write_carry(dc, t0);
  638 + tcg_temp_free(t0);
  639 + if (dc->rd) {
  640 + if (op == 0x41)
  641 + tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
  642 + else
  643 + tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
  644 + }
  645 + break;
  646 + case 0x60:
  647 + LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
  648 + tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
  649 + break;
  650 + case 0x61:
  651 + LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
  652 + tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
  653 + break;
  654 + case 0x64:
  655 + /* wdc. */
  656 + LOG_DIS("wdc r%d\n", dc->ra);
  657 + break;
  658 + case 0x68:
  659 + /* wic. */
  660 + LOG_DIS("wic r%d\n", dc->ra);
  661 + break;
  662 + default:
  663 + cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
  664 + dc->pc, op, dc->rd, dc->ra, dc->rb);
  665 + break;
  666 + }
  667 +}
  668 +
  669 +static inline void sync_jmpstate(DisasContext *dc)
  670 +{
  671 + if (dc->jmp == JMP_DIRECT) {
  672 + dc->jmp = JMP_INDIRECT;
  673 + tcg_gen_movi_tl(env_btaken, 1);
  674 + tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
  675 + }
  676 +}
  677 +
  678 +static void dec_imm(DisasContext *dc)
  679 +{
  680 + LOG_DIS("imm %x\n", dc->imm << 16);
  681 + tcg_gen_movi_tl(env_imm, (dc->imm << 16));
  682 + dc->tb_flags |= IMM_FLAG;
  683 + dc->clear_imm = 0;
  684 +}
  685 +
  686 +static inline void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
  687 + unsigned int size)
  688 +{
  689 + int mem_index = cpu_mmu_index(dc->env);
  690 +
  691 + if (size == 1) {
  692 + tcg_gen_qemu_ld8u(dst, addr, mem_index);
  693 + } else if (size == 2) {
  694 + tcg_gen_qemu_ld16u(dst, addr, mem_index);
  695 + } else if (size == 4) {
  696 + tcg_gen_qemu_ld32u(dst, addr, mem_index);
  697 + } else
  698 + cpu_abort(dc->env, "Incorrect load size %d\n", size);
  699 +}
  700 +
  701 +static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
  702 +{
  703 + unsigned int extimm = dc->tb_flags & IMM_FLAG;
  704 +
  705 + /* Treat the fast cases first. */
  706 + if (!dc->type_b) {
  707 + *t = tcg_temp_new();
  708 + tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
  709 + return t;
  710 + }
  711 + /* Immediate. */
  712 + if (!extimm) {
  713 + if (dc->imm == 0) {
  714 + return &cpu_R[dc->ra];
  715 + }
  716 + *t = tcg_temp_new();
  717 + tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
  718 + tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
  719 + } else {
  720 + *t = tcg_temp_new();
  721 + tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
  722 + }
  723 +
  724 + return t;
  725 +}
  726 +
  727 +static void dec_load(DisasContext *dc)
  728 +{
  729 + TCGv t, *addr;
  730 + unsigned int size;
  731 +
  732 + size = 1 << (dc->opcode & 3);
  733 +
  734 + LOG_DIS("l %x %d\n", dc->opcode, size);
  735 + t_sync_flags(dc);
  736 + addr = compute_ldst_addr(dc, &t);
  737 +
  738 + /* If we get a fault on a dslot, the jmpstate better be in sync. */
  739 + sync_jmpstate(dc);
  740 + if (dc->rd)
  741 + gen_load(dc, cpu_R[dc->rd], *addr, size);
  742 + else {
  743 + gen_load(dc, env_imm, *addr, size);
  744 + }
  745 +
  746 + if (addr == &t)
  747 + tcg_temp_free(t);
  748 +}
  749 +
  750 +static void gen_store(DisasContext *dc, TCGv addr, TCGv val,
  751 + unsigned int size)
  752 +{
  753 + int mem_index = cpu_mmu_index(dc->env);
  754 +
  755 + if (size == 1)
  756 + tcg_gen_qemu_st8(val, addr, mem_index);
  757 + else if (size == 2) {
  758 + tcg_gen_qemu_st16(val, addr, mem_index);
  759 + } else if (size == 4) {
  760 + tcg_gen_qemu_st32(val, addr, mem_index);
  761 + } else
  762 + cpu_abort(dc->env, "Incorrect store size %d\n", size);
  763 +}
  764 +
  765 +static void dec_store(DisasContext *dc)
  766 +{
  767 + TCGv t, *addr;
  768 + unsigned int size;
  769 +
  770 + size = 1 << (dc->opcode & 3);
  771 +
  772 + LOG_DIS("s%d%s\n", size, dc->type_b ? "i" : "");
  773 + t_sync_flags(dc);
  774 + /* If we get a fault on a dslot, the jmpstate better be in sync. */
  775 + sync_jmpstate(dc);
  776 + addr = compute_ldst_addr(dc, &t);
  777 + gen_store(dc, *addr, cpu_R[dc->rd], size);
  778 + if (addr == &t)
  779 + tcg_temp_free(t);
  780 +}
  781 +
  782 +static inline void eval_cc(DisasContext *dc, unsigned int cc,
  783 + TCGv d, TCGv a, TCGv b)
  784 +{
  785 + int l1;
  786 +
  787 + switch (cc) {
  788 + case CC_EQ:
  789 + l1 = gen_new_label();
  790 + tcg_gen_movi_tl(env_btaken, 1);
  791 + tcg_gen_brcond_tl(TCG_COND_EQ, a, b, l1);
  792 + tcg_gen_movi_tl(env_btaken, 0);
  793 + gen_set_label(l1);
  794 + break;
  795 + case CC_NE:
  796 + l1 = gen_new_label();
  797 + tcg_gen_movi_tl(env_btaken, 1);
  798 + tcg_gen_brcond_tl(TCG_COND_NE, a, b, l1);
  799 + tcg_gen_movi_tl(env_btaken, 0);
  800 + gen_set_label(l1);
  801 + break;
  802 + case CC_LT:
  803 + l1 = gen_new_label();
  804 + tcg_gen_movi_tl(env_btaken, 1);
  805 + tcg_gen_brcond_tl(TCG_COND_LT, a, b, l1);
  806 + tcg_gen_movi_tl(env_btaken, 0);
  807 + gen_set_label(l1);
  808 + break;
  809 + case CC_LE:
  810 + l1 = gen_new_label();
  811 + tcg_gen_movi_tl(env_btaken, 1);
  812 + tcg_gen_brcond_tl(TCG_COND_LE, a, b, l1);
  813 + tcg_gen_movi_tl(env_btaken, 0);
  814 + gen_set_label(l1);
  815 + break;
  816 + case CC_GE:
  817 + l1 = gen_new_label();
  818 + tcg_gen_movi_tl(env_btaken, 1);
  819 + tcg_gen_brcond_tl(TCG_COND_GE, a, b, l1);
  820 + tcg_gen_movi_tl(env_btaken, 0);
  821 + gen_set_label(l1);
  822 + break;
  823 + case CC_GT:
  824 + l1 = gen_new_label();
  825 + tcg_gen_movi_tl(env_btaken, 1);
  826 + tcg_gen_brcond_tl(TCG_COND_GT, a, b, l1);
  827 + tcg_gen_movi_tl(env_btaken, 0);
  828 + gen_set_label(l1);
  829 + break;
  830 + default:
  831 + cpu_abort(dc->env, "Unknown condition code %x.\n", cc);
  832 + break;
  833 + }
  834 +}
  835 +
  836 +static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
  837 +{
  838 + int l1;
  839 +
  840 + l1 = gen_new_label();
  841 + /* Conditional jmp. */
  842 + tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
  843 + tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
  844 + tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
  845 + gen_set_label(l1);
  846 +}
  847 +
  848 +static void dec_bcc(DisasContext *dc)
  849 +{
  850 + unsigned int cc;
  851 + unsigned int dslot;
  852 +
  853 + cc = EXTRACT_FIELD(dc->ir, 21, 23);
  854 + dslot = dc->ir & (1 << 25);
  855 + LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
  856 +
  857 + dc->delayed_branch = 1;
  858 + if (dslot) {
  859 + dc->delayed_branch = 2;
  860 + dc->tb_flags |= D_FLAG;
  861 + tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
  862 + cpu_env, offsetof(CPUState, bimm));
  863 + }
  864 +
  865 + tcg_gen_movi_tl(env_btarget, dc->pc);
  866 + tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
  867 + eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
  868 + dc->jmp = JMP_INDIRECT;
  869 +}
  870 +
  871 +static void dec_br(DisasContext *dc)
  872 +{
  873 + unsigned int dslot, link, abs;
  874 +
  875 + dslot = dc->ir & (1 << 20);
  876 + abs = dc->ir & (1 << 19);
  877 + link = dc->ir & (1 << 18);
  878 + LOG_DIS("br%s%s%s%s imm=%x\n",
  879 + abs ? "a" : "", link ? "l" : "",
  880 + dc->type_b ? "i" : "", dslot ? "d" : "",
  881 + dc->imm);
  882 +
  883 + dc->delayed_branch = 1;
  884 + if (dslot) {
  885 + dc->delayed_branch = 2;
  886 + dc->tb_flags |= D_FLAG;
  887 + tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
  888 + cpu_env, offsetof(CPUState, bimm));
  889 + }
  890 + if (link && dc->rd)
  891 + tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
  892 +
  893 + dc->jmp = JMP_INDIRECT;
  894 + if (abs) {
  895 + tcg_gen_movi_tl(env_btaken, 1);
  896 + tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
  897 + if (link && !(dc->tb_flags & IMM_FLAG)
  898 + && (dc->imm == 8 || dc->imm == 0x18))
  899 + t_gen_raise_exception(dc, EXCP_BREAK);
  900 + if (dc->imm == 0)
  901 + t_gen_raise_exception(dc, EXCP_DEBUG);
  902 + } else {
  903 + if (dc->tb_flags & IMM_FLAG) {
  904 + tcg_gen_movi_tl(env_btaken, 1);
  905 + tcg_gen_movi_tl(env_btarget, dc->pc);
  906 + tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
  907 + } else {
  908 + dc->jmp = JMP_DIRECT;
  909 + dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
  910 + }
  911 + }
  912 +}
  913 +
  914 +static inline void do_rti(DisasContext *dc)
  915 +{
  916 + TCGv t0, t1;
  917 + t0 = tcg_temp_new();
  918 + t1 = tcg_temp_new();
  919 + tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
  920 + tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
  921 + tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
  922 +
  923 + tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
  924 + tcg_gen_or_tl(t1, t1, t0);
  925 + msr_write(dc, t1);
  926 + tcg_temp_free(t1);
  927 + tcg_temp_free(t0);
  928 + dc->tb_flags &= ~DRTI_FLAG;
  929 +}
  930 +
  931 +static inline void do_rtb(DisasContext *dc)
  932 +{
  933 + TCGv t0, t1;
  934 + t0 = tcg_temp_new();
  935 + t1 = tcg_temp_new();
  936 + tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
  937 + tcg_gen_shri_tl(t0, t1, 1);
  938 + tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
  939 +
  940 + tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
  941 + tcg_gen_or_tl(t1, t1, t0);
  942 + msr_write(dc, t1);
  943 + tcg_temp_free(t1);
  944 + tcg_temp_free(t0);
  945 + dc->tb_flags &= ~DRTB_FLAG;
  946 +}
  947 +
  948 +static inline void do_rte(DisasContext *dc)
  949 +{
  950 + TCGv t0, t1;
  951 + t0 = tcg_temp_new();
  952 + t1 = tcg_temp_new();
  953 +
  954 + tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
  955 + tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
  956 + tcg_gen_shri_tl(t0, t1, 1);
  957 + tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
  958 +
  959 + tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
  960 + tcg_gen_or_tl(t1, t1, t0);
  961 + msr_write(dc, t1);
  962 + tcg_temp_free(t1);
  963 + tcg_temp_free(t0);
  964 + dc->tb_flags &= ~DRTE_FLAG;
  965 +}
  966 +
  967 +static void dec_rts(DisasContext *dc)
  968 +{
  969 + unsigned int b_bit, i_bit, e_bit;
  970 +
  971 + i_bit = dc->ir & (1 << 21);
  972 + b_bit = dc->ir & (1 << 22);
  973 + e_bit = dc->ir & (1 << 23);
  974 +
  975 + dc->delayed_branch = 2;
  976 + dc->tb_flags |= D_FLAG;
  977 + tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
  978 + cpu_env, offsetof(CPUState, bimm));
  979 +
  980 + if (i_bit) {
  981 + LOG_DIS("rtid ir=%x\n", dc->ir);
  982 + dc->tb_flags |= DRTI_FLAG;
  983 + } else if (b_bit) {
  984 + LOG_DIS("rtbd ir=%x\n", dc->ir);
  985 + dc->tb_flags |= DRTB_FLAG;
  986 + } else if (e_bit) {
  987 + LOG_DIS("rted ir=%x\n", dc->ir);
  988 + dc->tb_flags |= DRTE_FLAG;
  989 + } else
  990 + LOG_DIS("rts ir=%x\n", dc->ir);
  991 +
  992 + tcg_gen_movi_tl(env_btaken, 1);
  993 + tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
  994 +}
  995 +
  996 +static void dec_null(DisasContext *dc)
  997 +{
  998 + qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
  999 + dc->abort_at_next_insn = 1;
  1000 +}
  1001 +
  1002 +static struct decoder_info {
  1003 + struct {
  1004 + uint32_t bits;
  1005 + uint32_t mask;
  1006 + };
  1007 + void (*dec)(DisasContext *dc);
  1008 +} decinfo[] = {
  1009 + {DEC_ADD, dec_add},
  1010 + {DEC_SUB, dec_sub},
  1011 + {DEC_AND, dec_and},
  1012 + {DEC_XOR, dec_xor},
  1013 + {DEC_OR, dec_or},
  1014 + {DEC_BIT, dec_bit},
  1015 + {DEC_BARREL, dec_barrel},
  1016 + {DEC_LD, dec_load},
  1017 + {DEC_ST, dec_store},
  1018 + {DEC_IMM, dec_imm},
  1019 + {DEC_BR, dec_br},
  1020 + {DEC_BCC, dec_bcc},
  1021 + {DEC_RTS, dec_rts},
  1022 + {DEC_MUL, dec_mul},
  1023 + {DEC_DIV, dec_div},
  1024 + {DEC_MSR, dec_msr},
  1025 + {{0, 0}, dec_null}
  1026 +};
  1027 +
  1028 +static inline void decode(DisasContext *dc)
  1029 +{
  1030 + uint32_t ir;
  1031 + int i;
  1032 +
  1033 + if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
  1034 + tcg_gen_debug_insn_start(dc->pc);
  1035 +
  1036 + dc->ir = ir = ldl_code(dc->pc);
  1037 + LOG_DIS("%8.8x\t", dc->ir);
  1038 +
  1039 + if (dc->ir)
  1040 + dc->nr_nops = 0;
  1041 + else {
  1042 + LOG_DIS("nr_nops=%d\t", dc->nr_nops);
  1043 + dc->nr_nops++;
  1044 + if (dc->nr_nops > 4)
  1045 + cpu_abort(dc->env, "fetching nop sequence\n");
  1046 + }
  1047 + /* bit 2 seems to indicate insn type. */
  1048 + dc->type_b = ir & (1 << 29);
  1049 +
  1050 + dc->opcode = EXTRACT_FIELD(ir, 26, 31);
  1051 + dc->rd = EXTRACT_FIELD(ir, 21, 25);
  1052 + dc->ra = EXTRACT_FIELD(ir, 16, 20);
  1053 + dc->rb = EXTRACT_FIELD(ir, 11, 15);
  1054 + dc->imm = EXTRACT_FIELD(ir, 0, 15);
  1055 +
  1056 + /* Large switch for all insns. */
  1057 + for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
  1058 + if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
  1059 + decinfo[i].dec(dc);
  1060 + break;
  1061 + }
  1062 + }
  1063 +}
  1064 +
  1065 +
  1066 +static void check_breakpoint(CPUState *env, DisasContext *dc)
  1067 +{
  1068 + CPUBreakpoint *bp;
  1069 +
  1070 + if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
  1071 + TAILQ_FOREACH(bp, &env->breakpoints, entry) {
  1072 + if (bp->pc == dc->pc) {
  1073 + t_gen_raise_exception(dc, EXCP_DEBUG);
  1074 + dc->is_jmp = DISAS_UPDATE;
  1075 + }
  1076 + }
  1077 + }
  1078 +}
  1079 +
  1080 +/* generate intermediate code for basic block 'tb'. */
  1081 +static void
  1082 +gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
  1083 + int search_pc)
  1084 +{
  1085 + uint16_t *gen_opc_end;
  1086 + uint32_t pc_start;
  1087 + int j, lj;
  1088 + struct DisasContext ctx;
  1089 + struct DisasContext *dc = &ctx;
  1090 + uint32_t next_page_start, org_flags;
  1091 + target_ulong npc;
  1092 + int num_insns;
  1093 + int max_insns;
  1094 +
  1095 + qemu_log_try_set_file(stderr);
  1096 +
  1097 + pc_start = tb->pc;
  1098 + dc->env = env;
  1099 + dc->tb = tb;
  1100 + org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
  1101 +
  1102 + gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
  1103 +
  1104 + dc->is_jmp = DISAS_NEXT;
  1105 + dc->jmp = 0;
  1106 + dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
  1107 + dc->ppc = pc_start;
  1108 + dc->pc = pc_start;
  1109 + dc->cache_pc = -1;
  1110 + dc->singlestep_enabled = env->singlestep_enabled;
  1111 + dc->cpustate_changed = 0;
  1112 + dc->abort_at_next_insn = 0;
  1113 + dc->nr_nops = 0;
  1114 +
  1115 + if (pc_start & 3)
  1116 + cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start);
  1117 +
  1118 + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
  1119 +#if !SIM_COMPAT
  1120 + qemu_log("--------------\n");
  1121 + log_cpu_state(env, 0);
  1122 +#endif
  1123 + }
  1124 +
  1125 + next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
  1126 + lj = -1;
  1127 + num_insns = 0;
  1128 + max_insns = tb->cflags & CF_COUNT_MASK;
  1129 + if (max_insns == 0)
  1130 + max_insns = CF_COUNT_MASK;
  1131 +
  1132 + gen_icount_start();
  1133 + do
  1134 + {
  1135 +#if SIM_COMPAT
  1136 + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
  1137 + tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
  1138 + gen_helper_debug();
  1139 + }
  1140 +#endif
  1141 + check_breakpoint(env, dc);
  1142 +
  1143 + if (search_pc) {
  1144 + j = gen_opc_ptr - gen_opc_buf;
  1145 + if (lj < j) {
  1146 + lj++;
  1147 + while (lj < j)
  1148 + gen_opc_instr_start[lj++] = 0;
  1149 + }
  1150 + gen_opc_pc[lj] = dc->pc;
  1151 + gen_opc_instr_start[lj] = 1;
  1152 + gen_opc_icount[lj] = num_insns;
  1153 + }
  1154 +
  1155 + /* Pretty disas. */
  1156 + LOG_DIS("%8.8x:\t", dc->pc);
  1157 +
  1158 + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
  1159 + gen_io_start();
  1160 +
  1161 + dc->clear_imm = 1;
  1162 + decode(dc);
  1163 + if (dc->clear_imm)
  1164 + dc->tb_flags &= ~IMM_FLAG;
  1165 + dc->ppc = dc->pc;
  1166 + dc->pc += 4;
  1167 + num_insns++;
  1168 +
  1169 + if (dc->delayed_branch) {
  1170 + dc->delayed_branch--;
  1171 + if (!dc->delayed_branch) {
  1172 + if (dc->tb_flags & DRTI_FLAG)
  1173 + do_rti(dc);
  1174 + if (dc->tb_flags & DRTB_FLAG)
  1175 + do_rtb(dc);
  1176 + if (dc->tb_flags & DRTE_FLAG)
  1177 + do_rte(dc);
  1178 + /* Clear the delay slot flag. */
  1179 + dc->tb_flags &= ~D_FLAG;
  1180 + /* If it is a direct jump, try direct chaining. */
  1181 + if (dc->jmp != JMP_DIRECT) {
  1182 + eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
  1183 + dc->is_jmp = DISAS_JUMP;
  1184 + }
  1185 + break;
  1186 + }
  1187 + }
  1188 + if (env->singlestep_enabled)
  1189 + break;
  1190 + } while (!dc->is_jmp && !dc->cpustate_changed
  1191 + && gen_opc_ptr < gen_opc_end
  1192 + && !singlestep
  1193 + && (dc->pc < next_page_start)
  1194 + && num_insns < max_insns);
  1195 +
  1196 + npc = dc->pc;
  1197 + if (dc->jmp == JMP_DIRECT) {
  1198 + if (dc->tb_flags & D_FLAG) {
  1199 + dc->is_jmp = DISAS_UPDATE;
  1200 + tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
  1201 + sync_jmpstate(dc);
  1202 + } else
  1203 + npc = dc->jmp_pc;
  1204 + }
  1205 +
  1206 + if (tb->cflags & CF_LAST_IO)
  1207 + gen_io_end();
  1208 + /* Force an update if the per-tb cpu state has changed. */
  1209 + if (dc->is_jmp == DISAS_NEXT
  1210 + && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
  1211 + dc->is_jmp = DISAS_UPDATE;
  1212 + tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
  1213 + }
  1214 + t_sync_flags(dc);
  1215 +
  1216 + if (unlikely(env->singlestep_enabled)) {
  1217 + t_gen_raise_exception(dc, EXCP_DEBUG);
  1218 + if (dc->is_jmp == DISAS_NEXT)
  1219 + tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
  1220 + } else {
  1221 + switch(dc->is_jmp) {
  1222 + case DISAS_NEXT:
  1223 + gen_goto_tb(dc, 1, npc);
  1224 + break;
  1225 + default:
  1226 + case DISAS_JUMP:
  1227 + case DISAS_UPDATE:
  1228 + /* indicate that the hash table must be used
  1229 + to find the next TB */
  1230 + tcg_gen_exit_tb(0);
  1231 + break;
  1232 + case DISAS_TB_JUMP:
  1233 + /* nothing more to generate */
  1234 + break;
  1235 + }
  1236 + }
  1237 + gen_icount_end(tb, num_insns);
  1238 + *gen_opc_ptr = INDEX_op_end;
  1239 + if (search_pc) {
  1240 + j = gen_opc_ptr - gen_opc_buf;
  1241 + lj++;
  1242 + while (lj <= j)
  1243 + gen_opc_instr_start[lj++] = 0;
  1244 + } else {
  1245 + tb->size = dc->pc - pc_start;
  1246 + tb->icount = num_insns;
  1247 + }
  1248 +
  1249 +#ifdef DEBUG_DISAS
  1250 +#if !SIM_COMPAT
  1251 + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
  1252 + qemu_log("\n");
  1253 +#if DISAS_GNU
  1254 + log_target_disas(pc_start, dc->pc - pc_start, 0);
  1255 +#endif
  1256 + qemu_log("\nisize=%d osize=%zd\n",
  1257 + dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
  1258 + }
  1259 +#endif
  1260 +#endif
  1261 + assert(!dc->abort_at_next_insn);
  1262 +}
  1263 +
  1264 +void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
  1265 +{
  1266 + gen_intermediate_code_internal(env, tb, 0);
  1267 +}
  1268 +
  1269 +void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
  1270 +{
  1271 + gen_intermediate_code_internal(env, tb, 1);
  1272 +}
  1273 +
  1274 +void cpu_dump_state (CPUState *env, FILE *f,
  1275 + int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
  1276 + int flags)
  1277 +{
  1278 + int i;
  1279 +
  1280 + if (!env || !f)
  1281 + return;
  1282 +
  1283 + cpu_fprintf(f, "IN: PC=%x %s\n",
  1284 + env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
  1285 + cpu_fprintf(f, "rmsr=%x resr=%x debug[%x] imm=%x iflags=%x\n",
  1286 + env->sregs[SR_MSR], env->sregs[SR_ESR],
  1287 + env->debug, env->imm, env->iflags);
  1288 + cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s)\n",
  1289 + env->btaken, env->btarget,
  1290 + (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
  1291 + (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel");
  1292 + for (i = 0; i < 32; i++) {
  1293 + cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
  1294 + if ((i + 1) % 4 == 0)
  1295 + cpu_fprintf(f, "\n");
  1296 + }
  1297 + cpu_fprintf(f, "\n\n");
  1298 +}
  1299 +
  1300 +CPUState *cpu_mb_init (const char *cpu_model)
  1301 +{
  1302 + CPUState *env;
  1303 + static int tcg_initialized = 0;
  1304 + int i;
  1305 +
  1306 + env = qemu_mallocz(sizeof(CPUState));
  1307 +
  1308 + cpu_exec_init(env);
  1309 + cpu_reset(env);
  1310 +
  1311 + env->pvr.regs[0] = PVR0_PVR_FULL_MASK \
  1312 + | PVR0_USE_BARREL_MASK \
  1313 + | PVR0_USE_DIV_MASK \
  1314 + | PVR0_USE_HW_MUL_MASK \
  1315 + | PVR0_USE_EXC_MASK \
  1316 + | PVR0_USE_ICACHE_MASK \
  1317 + | PVR0_USE_DCACHE_MASK \
  1318 + | PVR0_USE_MMU \
  1319 + | (0xb << 8);
  1320 + env->pvr.regs[2] = PVR2_D_OPB_MASK \
  1321 + | PVR2_D_LMB_MASK \
  1322 + | PVR2_I_OPB_MASK \
  1323 + | PVR2_I_LMB_MASK \
  1324 + | PVR2_USE_MSR_INSTR \
  1325 + | PVR2_USE_PCMP_INSTR \
  1326 + | PVR2_USE_BARREL_MASK \
  1327 + | PVR2_USE_DIV_MASK \
  1328 + | PVR2_USE_HW_MUL_MASK \
  1329 + | PVR2_USE_MUL64_MASK \
  1330 + | 0;
  1331 + env->pvr.regs[10] = 0x0c000000; /* Default to spartan 3a dsp family. */
  1332 + env->pvr.regs[11] = PVR11_USE_MMU;
  1333 +
  1334 + if (tcg_initialized)
  1335 + return env;
  1336 +
  1337 + tcg_initialized = 1;
  1338 +
  1339 + cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
  1340 +
  1341 + env_debug = tcg_global_mem_new(TCG_AREG0,
  1342 + offsetof(CPUState, debug),
  1343 + "debug0");
  1344 + env_iflags = tcg_global_mem_new(TCG_AREG0,
  1345 + offsetof(CPUState, iflags),
  1346 + "iflags");
  1347 + env_imm = tcg_global_mem_new(TCG_AREG0,
  1348 + offsetof(CPUState, imm),
  1349 + "imm");
  1350 + env_btarget = tcg_global_mem_new(TCG_AREG0,
  1351 + offsetof(CPUState, btarget),
  1352 + "btarget");
  1353 + env_btaken = tcg_global_mem_new(TCG_AREG0,
  1354 + offsetof(CPUState, btaken),
  1355 + "btaken");
  1356 + for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
  1357 + cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
  1358 + offsetof(CPUState, regs[i]),
  1359 + regnames[i]);
  1360 + }
  1361 + for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
  1362 + cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
  1363 + offsetof(CPUState, sregs[i]),
  1364 + special_regnames[i]);
  1365 + }
  1366 +#define GEN_HELPER 2
  1367 +#include "helper.h"
  1368 +
  1369 + return env;
  1370 +}
  1371 +
  1372 +void cpu_reset (CPUState *env)
  1373 +{
  1374 + if (qemu_loglevel_mask(CPU_LOG_RESET)) {
  1375 + qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
  1376 + log_cpu_state(env, 0);
  1377 + }
  1378 +
  1379 + memset(env, 0, offsetof(CPUMBState, breakpoints));
  1380 + tlb_flush(env, 1);
  1381 +
  1382 + env->sregs[SR_MSR] = 0;
  1383 +#if defined(CONFIG_USER_ONLY)
  1384 + /* start in user mode with interrupts enabled. */
  1385 + env->pvr.regs[10] = 0x0c000000; /* Spartan 3a dsp. */
  1386 +#else
  1387 + mmu_init(&env->mmu);
  1388 +#endif
  1389 +}
  1390 +
  1391 +void gen_pc_load(CPUState *env, struct TranslationBlock *tb,
  1392 + unsigned long searched_pc, int pc_pos, void *puc)
  1393 +{
  1394 + env->sregs[SR_PC] = gen_opc_pc[pc_pos];
  1395 +}
... ...