Commit 811d4cf4b08ba141b7f9c3481c5ab50d47123499
1 parent
6b4c11cd
ARM host support for TCG targets.
Updated from previous version to use the tcg prologue/epilogue mechanism, may be slower than direct call. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4500 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
4 changed files
with
1639 additions
and
2 deletions
exec-all.h
| ... | ... | @@ -142,7 +142,7 @@ static inline int tlb_set_page(CPUState *env1, target_ulong vaddr, |
| 142 | 142 | |
| 143 | 143 | #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE) |
| 144 | 144 | |
| 145 | -#if defined(__powerpc__) || defined(__x86_64__) | |
| 145 | +#if defined(__powerpc__) || defined(__x86_64__) || defined(__arm__) | |
| 146 | 146 | #define USE_DIRECT_JUMP |
| 147 | 147 | #endif |
| 148 | 148 | #if defined(__i386__) && !defined(_WIN32) |
| ... | ... | @@ -240,6 +240,22 @@ static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr |
| 240 | 240 | *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); |
| 241 | 241 | /* no need to flush icache explicitely */ |
| 242 | 242 | } |
| 243 | +#elif defined(__arm__) | |
| 244 | +static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) | |
| 245 | +{ | |
| 246 | + register unsigned long _beg __asm ("a1"); | |
| 247 | + register unsigned long _end __asm ("a2"); | |
| 248 | + register unsigned long _flg __asm ("a3"); | |
| 249 | + | |
| 250 | + /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */ | |
| 251 | + *(uint32_t *)jmp_addr |= ((addr - (jmp_addr + 8)) >> 2) & 0xffffff; | |
| 252 | + | |
| 253 | + /* flush icache */ | |
| 254 | + _beg = jmp_addr; | |
| 255 | + _end = jmp_addr + 4; | |
| 256 | + _flg = 0; | |
| 257 | + __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); | |
| 258 | +} | |
| 243 | 259 | #endif |
| 244 | 260 | |
| 245 | 261 | static inline void tb_set_jmp_target(TranslationBlock *tb, | ... | ... |
tcg/README
| ... | ... | @@ -75,7 +75,7 @@ depending on their declarations). |
| 75 | 75 | * Helpers: |
| 76 | 76 | |
| 77 | 77 | Using the tcg_gen_helper_x_y it is possible to call any function |
| 78 | -taking i32, i64 or pointer types types. Before calling an helper, all | |
| 78 | +taking i32, i64 or pointer types. Before calling an helper, all | |
| 79 | 79 | globals are stored at their canonical location and it is assumed that |
| 80 | 80 | the function can modify them. In the future, function modifiers will |
| 81 | 81 | be allowed to tell that the helper does not read or write some globals. | ... | ... |
tcg/arm/tcg-target.c
0 → 100644
| 1 | +/* | |
| 2 | + * Tiny Code Generator for QEMU | |
| 3 | + * | |
| 4 | + * Copyright (c) 2008 Andrzej Zaborowski | |
| 5 | + * | |
| 6 | + * Permission is hereby granted, free of charge, to any person obtaining a copy | |
| 7 | + * of this software and associated documentation files (the "Software"), to deal | |
| 8 | + * in the Software without restriction, including without limitation the rights | |
| 9 | + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
| 10 | + * copies of the Software, and to permit persons to whom the Software is | |
| 11 | + * furnished to do so, subject to the following conditions: | |
| 12 | + * | |
| 13 | + * The above copyright notice and this permission notice shall be included in | |
| 14 | + * all copies or substantial portions of the Software. | |
| 15 | + * | |
| 16 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
| 17 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
| 18 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
| 19 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
| 20 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
| 21 | + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
| 22 | + * THE SOFTWARE. | |
| 23 | + */ | |
| 24 | +const char *tcg_target_reg_names[TCG_TARGET_NB_REGS] = { | |
| 25 | + "%r0", | |
| 26 | + "%r1", | |
| 27 | + "%r2", | |
| 28 | + "%r3", | |
| 29 | + "%r4", | |
| 30 | + "%r5", | |
| 31 | + "%r6", | |
| 32 | + "%r7", | |
| 33 | + "%r8", | |
| 34 | + "%r9", | |
| 35 | + "%r10", | |
| 36 | + "%r11", | |
| 37 | + "%r12", | |
| 38 | + "%r13", | |
| 39 | + "%r14", | |
| 40 | +}; | |
| 41 | + | |
| 42 | +int tcg_target_reg_alloc_order[] = { | |
| 43 | + TCG_REG_R0, | |
| 44 | + TCG_REG_R1, | |
| 45 | + TCG_REG_R2, | |
| 46 | + TCG_REG_R3, | |
| 47 | + TCG_REG_R4, | |
| 48 | + TCG_REG_R5, | |
| 49 | + TCG_REG_R6, | |
| 50 | + TCG_REG_R7, | |
| 51 | + TCG_REG_R8, | |
| 52 | + TCG_REG_R9, | |
| 53 | + TCG_REG_R10, | |
| 54 | + TCG_REG_R11, | |
| 55 | + TCG_REG_R12, | |
| 56 | + TCG_REG_R13, | |
| 57 | + TCG_REG_R14, | |
| 58 | +}; | |
| 59 | + | |
| 60 | +const int tcg_target_call_iarg_regs[4] = { | |
| 61 | + TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3 | |
| 62 | +}; | |
| 63 | +const int tcg_target_call_oarg_regs[2] = { | |
| 64 | + TCG_REG_R0, TCG_REG_R1 | |
| 65 | +}; | |
| 66 | + | |
| 67 | +static void patch_reloc(uint8_t *code_ptr, int type, | |
| 68 | + tcg_target_long value, tcg_target_long addend) | |
| 69 | +{ | |
| 70 | + switch (type) { | |
| 71 | + case R_ARM_ABS32: | |
| 72 | + *(uint32_t *) code_ptr = value; | |
| 73 | + break; | |
| 74 | + | |
| 75 | + case R_ARM_CALL: | |
| 76 | + case R_ARM_JUMP24: | |
| 77 | + default: | |
| 78 | + tcg_abort(); | |
| 79 | + | |
| 80 | + case R_ARM_PC24: | |
| 81 | + *(uint32_t *) code_ptr |= | |
| 82 | + ((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff; | |
| 83 | + break; | |
| 84 | + } | |
| 85 | +} | |
| 86 | + | |
| 87 | +/* maximum number of register used for input function arguments */ | |
| 88 | +static inline int tcg_target_get_call_iarg_regs_count(int flags) | |
| 89 | +{ | |
| 90 | + return 4; | |
| 91 | +} | |
| 92 | + | |
| 93 | +#define USE_TLB | |
| 94 | + | |
| 95 | +/* parse target specific constraints */ | |
| 96 | +int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) | |
| 97 | +{ | |
| 98 | + const char *ct_str; | |
| 99 | + | |
| 100 | + ct_str = *pct_str; | |
| 101 | + switch (ct_str[0]) { | |
| 102 | + case 'r': | |
| 103 | +#ifndef CONFIG_SOFTMMU | |
| 104 | + case 'd': | |
| 105 | + case 'D': | |
| 106 | + case 'x': | |
| 107 | + case 'X': | |
| 108 | +#endif | |
| 109 | + ct->ct |= TCG_CT_REG; | |
| 110 | + tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
| 111 | + break; | |
| 112 | + | |
| 113 | +#ifdef CONFIG_SOFTMMU | |
| 114 | + /* qemu_ld/st inputs (unless 'd', 'D' or 'X') */ | |
| 115 | + case 'x': | |
| 116 | + ct->ct |= TCG_CT_REG; | |
| 117 | + tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
| 118 | +# ifdef USE_TLB | |
| 119 | + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
| 120 | + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
| 121 | +# endif | |
| 122 | + break; | |
| 123 | + | |
| 124 | + /* qemu_ld/st data_reg */ | |
| 125 | + case 'd': | |
| 126 | + ct->ct |= TCG_CT_REG; | |
| 127 | + tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
| 128 | + /* r0 and optionally r1 will be overwritten by the address | |
| 129 | + * so don't use these. */ | |
| 130 | + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
| 131 | +# if TARGET_LONG_BITS == 64 || defined(USE_TLB) | |
| 132 | + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
| 133 | +# endif | |
| 134 | + break; | |
| 135 | + | |
| 136 | + /* qemu_ld/st64 data_reg2 */ | |
| 137 | + case 'D': | |
| 138 | + ct->ct |= TCG_CT_REG; | |
| 139 | + tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
| 140 | + /* r0, r1 and optionally r2 will be overwritten by the address | |
| 141 | + * and the low word of data, so don't use these. */ | |
| 142 | + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
| 143 | + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
| 144 | +# if TARGET_LONG_BITS == 64 | |
| 145 | + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); | |
| 146 | +# endif | |
| 147 | + break; | |
| 148 | + | |
| 149 | +# if TARGET_LONG_BITS == 64 | |
| 150 | + /* qemu_ld/st addr_reg2 */ | |
| 151 | + case 'X': | |
| 152 | + ct->ct |= TCG_CT_REG; | |
| 153 | + tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
| 154 | + /* r0 will be overwritten by the low word of base, so don't use it. */ | |
| 155 | + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
| 156 | +# ifdef USE_TLB | |
| 157 | + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
| 158 | +# endif | |
| 159 | + break; | |
| 160 | +# endif | |
| 161 | +#endif | |
| 162 | + | |
| 163 | + case '1': | |
| 164 | + ct->ct |= TCG_CT_REG; | |
| 165 | + tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
| 166 | + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
| 167 | + break; | |
| 168 | + | |
| 169 | + case '2': | |
| 170 | + ct->ct |= TCG_CT_REG; | |
| 171 | + tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
| 172 | + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
| 173 | + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
| 174 | + break; | |
| 175 | + | |
| 176 | + default: | |
| 177 | + return -1; | |
| 178 | + } | |
| 179 | + ct_str++; | |
| 180 | + *pct_str = ct_str; | |
| 181 | + | |
| 182 | + return 0; | |
| 183 | +} | |
| 184 | + | |
| 185 | +/* Test if a constant matches the constraint. | |
| 186 | + * TODO: define constraints for: | |
| 187 | + * | |
| 188 | + * ldr/str offset: between -0xfff and 0xfff | |
| 189 | + * ldrh/strh offset: between -0xff and 0xff | |
| 190 | + * mov operand2: values represented with x << (2 * y), x < 0x100 | |
| 191 | + * add, sub, eor...: ditto | |
| 192 | + */ | |
| 193 | +static inline int tcg_target_const_match(tcg_target_long val, | |
| 194 | + const TCGArgConstraint *arg_ct) | |
| 195 | +{ | |
| 196 | + int ct; | |
| 197 | + ct = arg_ct->ct; | |
| 198 | + if (ct & TCG_CT_CONST) | |
| 199 | + return 1; | |
| 200 | + else | |
| 201 | + return 0; | |
| 202 | +} | |
| 203 | + | |
| 204 | +enum arm_data_opc_e { | |
| 205 | + ARITH_AND = 0x0, | |
| 206 | + ARITH_EOR = 0x1, | |
| 207 | + ARITH_SUB = 0x2, | |
| 208 | + ARITH_RSB = 0x3, | |
| 209 | + ARITH_ADD = 0x4, | |
| 210 | + ARITH_ADC = 0x5, | |
| 211 | + ARITH_SBC = 0x6, | |
| 212 | + ARITH_RSC = 0x7, | |
| 213 | + ARITH_CMP = 0xa, | |
| 214 | + ARITH_CMN = 0xb, | |
| 215 | + ARITH_ORR = 0xc, | |
| 216 | + ARITH_MOV = 0xd, | |
| 217 | + ARITH_BIC = 0xe, | |
| 218 | + ARITH_MVN = 0xf, | |
| 219 | +}; | |
| 220 | + | |
| 221 | +#define TO_CPSR(opc) ((opc == ARITH_CMP || opc == ARITH_CMN) << 20) | |
| 222 | + | |
| 223 | +#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) | |
| 224 | +#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) | |
| 225 | +#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) | |
| 226 | +#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) | |
| 227 | +#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) | |
| 228 | +#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) | |
| 229 | +#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) | |
| 230 | +#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) | |
| 231 | + | |
| 232 | +enum arm_cond_code_e { | |
| 233 | + COND_EQ = 0x0, | |
| 234 | + COND_NE = 0x1, | |
| 235 | + COND_CS = 0x2, /* Unsigned greater or equal */ | |
| 236 | + COND_CC = 0x3, /* Unsigned less than */ | |
| 237 | + COND_MI = 0x4, /* Negative */ | |
| 238 | + COND_PL = 0x5, /* Zero or greater */ | |
| 239 | + COND_VS = 0x6, /* Overflow */ | |
| 240 | + COND_VC = 0x7, /* No overflow */ | |
| 241 | + COND_HI = 0x8, /* Unsigned greater than */ | |
| 242 | + COND_LS = 0x9, /* Unsigned less or equal */ | |
| 243 | + COND_GE = 0xa, | |
| 244 | + COND_LT = 0xb, | |
| 245 | + COND_GT = 0xc, | |
| 246 | + COND_LE = 0xd, | |
| 247 | + COND_AL = 0xe, | |
| 248 | +}; | |
| 249 | + | |
| 250 | +static const uint8_t tcg_cond_to_arm_cond[10] = { | |
| 251 | + [TCG_COND_EQ] = COND_EQ, | |
| 252 | + [TCG_COND_NE] = COND_NE, | |
| 253 | + [TCG_COND_LT] = COND_LT, | |
| 254 | + [TCG_COND_GE] = COND_GE, | |
| 255 | + [TCG_COND_LE] = COND_LE, | |
| 256 | + [TCG_COND_GT] = COND_GT, | |
| 257 | + /* unsigned */ | |
| 258 | + [TCG_COND_LTU] = COND_CC, | |
| 259 | + [TCG_COND_GEU] = COND_CS, | |
| 260 | + [TCG_COND_LEU] = COND_LS, | |
| 261 | + [TCG_COND_GTU] = COND_HI, | |
| 262 | +}; | |
| 263 | + | |
| 264 | +static inline void tcg_out_bx(TCGContext *s, int cond, int rn) | |
| 265 | +{ | |
| 266 | + tcg_out32(s, (cond << 28) | 0x012fff10 | rn); | |
| 267 | +} | |
| 268 | + | |
| 269 | +static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset) | |
| 270 | +{ | |
| 271 | + tcg_out32(s, (cond << 28) | 0x0a000000 | | |
| 272 | + (((offset - 8) >> 2) & 0x00ffffff)); | |
| 273 | +} | |
| 274 | + | |
| 275 | +static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset) | |
| 276 | +{ | |
| 277 | + tcg_out32(s, (cond << 28) | 0x0b000000 | | |
| 278 | + (((offset - 8) >> 2) & 0x00ffffff)); | |
| 279 | +} | |
| 280 | + | |
| 281 | +static inline void tcg_out_dat_reg(TCGContext *s, | |
| 282 | + int cond, int opc, int rd, int rn, int rm, int shift) | |
| 283 | +{ | |
| 284 | + tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) | | |
| 285 | + (rn << 16) | (rd << 12) | shift | rm); | |
| 286 | +} | |
| 287 | + | |
| 288 | +static inline void tcg_out_dat_reg2(TCGContext *s, | |
| 289 | + int cond, int opc0, int opc1, int rd0, int rd1, | |
| 290 | + int rn0, int rn1, int rm0, int rm1, int shift) | |
| 291 | +{ | |
| 292 | + tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) | | |
| 293 | + (rn0 << 16) | (rd0 << 12) | shift | rm0); | |
| 294 | + tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) | | |
| 295 | + (rn1 << 16) | (rd1 << 12) | shift | rm1); | |
| 296 | +} | |
| 297 | + | |
| 298 | +static inline void tcg_out_dat_imm(TCGContext *s, | |
| 299 | + int cond, int opc, int rd, int rn, int im) | |
| 300 | +{ | |
| 301 | + tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | | |
| 302 | + (rn << 16) | (rd << 12) | im); | |
| 303 | +} | |
| 304 | + | |
| 305 | +static inline void tcg_out_movi32(TCGContext *s, | |
| 306 | + int cond, int rd, int32_t arg) | |
| 307 | +{ | |
| 308 | + int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8); | |
| 309 | + | |
| 310 | + /* TODO: This is very suboptimal, we can easily have a constant | |
| 311 | + * pool somewhere after all the instructions. */ | |
| 312 | + | |
| 313 | + if (arg < 0 && arg > -0x100) | |
| 314 | + return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff); | |
| 315 | + | |
| 316 | + if (offset < 0x100 && offset > -0x100) | |
| 317 | + return offset >= 0 ? | |
| 318 | + tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) : | |
| 319 | + tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset); | |
| 320 | + | |
| 321 | + tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff); | |
| 322 | + if (arg & 0x0000ff00) | |
| 323 | + tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd, | |
| 324 | + ((arg >> 8) & 0xff) | 0xc00); | |
| 325 | + if (arg & 0x00ff0000) | |
| 326 | + tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd, | |
| 327 | + ((arg >> 16) & 0xff) | 0x800); | |
| 328 | + if (arg & 0xff000000) | |
| 329 | + tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd, | |
| 330 | + ((arg >> 24) & 0xff) | 0x400); | |
| 331 | +} | |
| 332 | + | |
| 333 | +static inline void tcg_out_mul32(TCGContext *s, | |
| 334 | + int cond, int rd, int rs, int rm) | |
| 335 | +{ | |
| 336 | + if (rd != rm) | |
| 337 | + tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) | | |
| 338 | + (rs << 8) | 0x90 | rm); | |
| 339 | + else if (rd != rs) | |
| 340 | + tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) | | |
| 341 | + (rm << 8) | 0x90 | rs); | |
| 342 | + else { | |
| 343 | + tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) | | |
| 344 | + (rs << 8) | 0x90 | rm); | |
| 345 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 346 | + rd, 0, 8, SHIFT_IMM_LSL(0)); | |
| 347 | + } | |
| 348 | +} | |
| 349 | + | |
| 350 | +static inline void tcg_out_umull32(TCGContext *s, | |
| 351 | + int cond, int rd0, int rd1, int rs, int rm) | |
| 352 | +{ | |
| 353 | + if (rd0 != rm && rd1 != rm) | |
| 354 | + tcg_out32(s, (cond << 28) | 0x800090 | | |
| 355 | + (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm); | |
| 356 | + else if (rd0 != rs && rd1 != rs) | |
| 357 | + tcg_out32(s, (cond << 28) | 0x800090 | | |
| 358 | + (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs); | |
| 359 | + else { | |
| 360 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 361 | + TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0)); | |
| 362 | + tcg_out32(s, (cond << 28) | 0x800098 | | |
| 363 | + (rd1 << 16) | (rd0 << 12) | (rs << 8)); | |
| 364 | + } | |
| 365 | +} | |
| 366 | + | |
| 367 | +static inline void tcg_out_smull32(TCGContext *s, | |
| 368 | + int cond, int rd0, int rd1, int rs, int rm) | |
| 369 | +{ | |
| 370 | + if (rd0 != rm && rd1 != rm) | |
| 371 | + tcg_out32(s, (cond << 28) | 0xc00090 | | |
| 372 | + (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm); | |
| 373 | + else if (rd0 != rs && rd1 != rs) | |
| 374 | + tcg_out32(s, (cond << 28) | 0xc00090 | | |
| 375 | + (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs); | |
| 376 | + else { | |
| 377 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 378 | + TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0)); | |
| 379 | + tcg_out32(s, (cond << 28) | 0xc00098 | | |
| 380 | + (rd1 << 16) | (rd0 << 12) | (rs << 8)); | |
| 381 | + } | |
| 382 | +} | |
| 383 | + | |
| 384 | +static inline void tcg_out_ld32_12(TCGContext *s, int cond, | |
| 385 | + int rd, int rn, tcg_target_long im) | |
| 386 | +{ | |
| 387 | + if (im >= 0) | |
| 388 | + tcg_out32(s, (cond << 28) | 0x05900000 | | |
| 389 | + (rn << 16) | (rd << 12) | (im & 0xfff)); | |
| 390 | + else | |
| 391 | + tcg_out32(s, (cond << 28) | 0x05100000 | | |
| 392 | + (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
| 393 | +} | |
| 394 | + | |
| 395 | +static inline void tcg_out_st32_12(TCGContext *s, int cond, | |
| 396 | + int rd, int rn, tcg_target_long im) | |
| 397 | +{ | |
| 398 | + if (im >= 0) | |
| 399 | + tcg_out32(s, (cond << 28) | 0x05800000 | | |
| 400 | + (rn << 16) | (rd << 12) | (im & 0xfff)); | |
| 401 | + else | |
| 402 | + tcg_out32(s, (cond << 28) | 0x05000000 | | |
| 403 | + (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
| 404 | +} | |
| 405 | + | |
| 406 | +static inline void tcg_out_ld32_r(TCGContext *s, int cond, | |
| 407 | + int rd, int rn, int rm) | |
| 408 | +{ | |
| 409 | + tcg_out32(s, (cond << 28) | 0x07900000 | | |
| 410 | + (rn << 16) | (rd << 12) | rm); | |
| 411 | +} | |
| 412 | + | |
| 413 | +static inline void tcg_out_st32_r(TCGContext *s, int cond, | |
| 414 | + int rd, int rn, int rm) | |
| 415 | +{ | |
| 416 | + tcg_out32(s, (cond << 28) | 0x07800000 | | |
| 417 | + (rn << 16) | (rd << 12) | rm); | |
| 418 | +} | |
| 419 | + | |
| 420 | +static inline void tcg_out_ld16u_8(TCGContext *s, int cond, | |
| 421 | + int rd, int rn, tcg_target_long im) | |
| 422 | +{ | |
| 423 | + if (im >= 0) | |
| 424 | + tcg_out32(s, (cond << 28) | 0x01d000b0 | | |
| 425 | + (rn << 16) | (rd << 12) | | |
| 426 | + ((im & 0xf0) << 4) | (im & 0xf)); | |
| 427 | + else | |
| 428 | + tcg_out32(s, (cond << 28) | 0x015000b0 | | |
| 429 | + (rn << 16) | (rd << 12) | | |
| 430 | + (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
| 431 | +} | |
| 432 | + | |
| 433 | +static inline void tcg_out_st16u_8(TCGContext *s, int cond, | |
| 434 | + int rd, int rn, tcg_target_long im) | |
| 435 | +{ | |
| 436 | + if (im >= 0) | |
| 437 | + tcg_out32(s, (cond << 28) | 0x01c000b0 | | |
| 438 | + (rn << 16) | (rd << 12) | | |
| 439 | + ((im & 0xf0) << 4) | (im & 0xf)); | |
| 440 | + else | |
| 441 | + tcg_out32(s, (cond << 28) | 0x014000b0 | | |
| 442 | + (rn << 16) | (rd << 12) | | |
| 443 | + (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
| 444 | +} | |
| 445 | + | |
| 446 | +static inline void tcg_out_ld16u_r(TCGContext *s, int cond, | |
| 447 | + int rd, int rn, int rm) | |
| 448 | +{ | |
| 449 | + tcg_out32(s, (cond << 28) | 0x019000b0 | | |
| 450 | + (rn << 16) | (rd << 12) | rm); | |
| 451 | +} | |
| 452 | + | |
| 453 | +static inline void tcg_out_st16u_r(TCGContext *s, int cond, | |
| 454 | + int rd, int rn, int rm) | |
| 455 | +{ | |
| 456 | + tcg_out32(s, (cond << 28) | 0x018000b0 | | |
| 457 | + (rn << 16) | (rd << 12) | rm); | |
| 458 | +} | |
| 459 | + | |
| 460 | +static inline void tcg_out_ld16s_8(TCGContext *s, int cond, | |
| 461 | + int rd, int rn, tcg_target_long im) | |
| 462 | +{ | |
| 463 | + if (im >= 0) | |
| 464 | + tcg_out32(s, (cond << 28) | 0x01d000f0 | | |
| 465 | + (rn << 16) | (rd << 12) | | |
| 466 | + ((im & 0xf0) << 4) | (im & 0xf)); | |
| 467 | + else | |
| 468 | + tcg_out32(s, (cond << 28) | 0x015000f0 | | |
| 469 | + (rn << 16) | (rd << 12) | | |
| 470 | + (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
| 471 | +} | |
| 472 | + | |
| 473 | +static inline void tcg_out_st16s_8(TCGContext *s, int cond, | |
| 474 | + int rd, int rn, tcg_target_long im) | |
| 475 | +{ | |
| 476 | + if (im >= 0) | |
| 477 | + tcg_out32(s, (cond << 28) | 0x01c000f0 | | |
| 478 | + (rn << 16) | (rd << 12) | | |
| 479 | + ((im & 0xf0) << 4) | (im & 0xf)); | |
| 480 | + else | |
| 481 | + tcg_out32(s, (cond << 28) | 0x014000f0 | | |
| 482 | + (rn << 16) | (rd << 12) | | |
| 483 | + (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
| 484 | +} | |
| 485 | + | |
| 486 | +static inline void tcg_out_ld16s_r(TCGContext *s, int cond, | |
| 487 | + int rd, int rn, int rm) | |
| 488 | +{ | |
| 489 | + tcg_out32(s, (cond << 28) | 0x019000f0 | | |
| 490 | + (rn << 16) | (rd << 12) | rm); | |
| 491 | +} | |
| 492 | + | |
| 493 | +static inline void tcg_out_st16s_r(TCGContext *s, int cond, | |
| 494 | + int rd, int rn, int rm) | |
| 495 | +{ | |
| 496 | + tcg_out32(s, (cond << 28) | 0x018000f0 | | |
| 497 | + (rn << 16) | (rd << 12) | rm); | |
| 498 | +} | |
| 499 | + | |
| 500 | +static inline void tcg_out_ld8_12(TCGContext *s, int cond, | |
| 501 | + int rd, int rn, tcg_target_long im) | |
| 502 | +{ | |
| 503 | + if (im >= 0) | |
| 504 | + tcg_out32(s, (cond << 28) | 0x05d00000 | | |
| 505 | + (rn << 16) | (rd << 12) | (im & 0xfff)); | |
| 506 | + else | |
| 507 | + tcg_out32(s, (cond << 28) | 0x05500000 | | |
| 508 | + (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
| 509 | +} | |
| 510 | + | |
| 511 | +static inline void tcg_out_st8_12(TCGContext *s, int cond, | |
| 512 | + int rd, int rn, tcg_target_long im) | |
| 513 | +{ | |
| 514 | + if (im >= 0) | |
| 515 | + tcg_out32(s, (cond << 28) | 0x05c00000 | | |
| 516 | + (rn << 16) | (rd << 12) | (im & 0xfff)); | |
| 517 | + else | |
| 518 | + tcg_out32(s, (cond << 28) | 0x05400000 | | |
| 519 | + (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
| 520 | +} | |
| 521 | + | |
| 522 | +static inline void tcg_out_ld8_r(TCGContext *s, int cond, | |
| 523 | + int rd, int rn, int rm) | |
| 524 | +{ | |
| 525 | + tcg_out32(s, (cond << 28) | 0x07d00000 | | |
| 526 | + (rn << 16) | (rd << 12) | rm); | |
| 527 | +} | |
| 528 | + | |
| 529 | +static inline void tcg_out_st8_r(TCGContext *s, int cond, | |
| 530 | + int rd, int rn, int rm) | |
| 531 | +{ | |
| 532 | + tcg_out32(s, (cond << 28) | 0x07c00000 | | |
| 533 | + (rn << 16) | (rd << 12) | rm); | |
| 534 | +} | |
| 535 | + | |
| 536 | +static inline void tcg_out_ld8s_8(TCGContext *s, int cond, | |
| 537 | + int rd, int rn, tcg_target_long im) | |
| 538 | +{ | |
| 539 | + if (im >= 0) | |
| 540 | + tcg_out32(s, (cond << 28) | 0x01d000d0 | | |
| 541 | + (rn << 16) | (rd << 12) | | |
| 542 | + ((im & 0xf0) << 4) | (im & 0xf)); | |
| 543 | + else | |
| 544 | + tcg_out32(s, (cond << 28) | 0x015000d0 | | |
| 545 | + (rn << 16) | (rd << 12) | | |
| 546 | + (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
| 547 | +} | |
| 548 | + | |
| 549 | +static inline void tcg_out_st8s_8(TCGContext *s, int cond, | |
| 550 | + int rd, int rn, tcg_target_long im) | |
| 551 | +{ | |
| 552 | + if (im >= 0) | |
| 553 | + tcg_out32(s, (cond << 28) | 0x01c000d0 | | |
| 554 | + (rn << 16) | (rd << 12) | | |
| 555 | + ((im & 0xf0) << 4) | (im & 0xf)); | |
| 556 | + else | |
| 557 | + tcg_out32(s, (cond << 28) | 0x014000d0 | | |
| 558 | + (rn << 16) | (rd << 12) | | |
| 559 | + (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
| 560 | +} | |
| 561 | + | |
| 562 | +static inline void tcg_out_ld8s_r(TCGContext *s, int cond, | |
| 563 | + int rd, int rn, int rm) | |
| 564 | +{ | |
| 565 | + tcg_out32(s, (cond << 28) | 0x019000f0 | | |
| 566 | + (rn << 16) | (rd << 12) | rm); | |
| 567 | +} | |
| 568 | + | |
| 569 | +static inline void tcg_out_st8s_r(TCGContext *s, int cond, | |
| 570 | + int rd, int rn, int rm) | |
| 571 | +{ | |
| 572 | + tcg_out32(s, (cond << 28) | 0x018000f0 | | |
| 573 | + (rn << 16) | (rd << 12) | rm); | |
| 574 | +} | |
| 575 | + | |
| 576 | +static inline void tcg_out_ld32u(TCGContext *s, int cond, | |
| 577 | + int rd, int rn, int32_t offset) | |
| 578 | +{ | |
| 579 | + if (offset > 0xfff || offset < -0xfff) { | |
| 580 | + tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
| 581 | + tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8); | |
| 582 | + } else | |
| 583 | + tcg_out_ld32_12(s, cond, rd, rn, offset); | |
| 584 | +} | |
| 585 | + | |
| 586 | +static inline void tcg_out_st32(TCGContext *s, int cond, | |
| 587 | + int rd, int rn, int32_t offset) | |
| 588 | +{ | |
| 589 | + if (offset > 0xfff || offset < -0xfff) { | |
| 590 | + tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
| 591 | + tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8); | |
| 592 | + } else | |
| 593 | + tcg_out_st32_12(s, cond, rd, rn, offset); | |
| 594 | +} | |
| 595 | + | |
| 596 | +static inline void tcg_out_ld16u(TCGContext *s, int cond, | |
| 597 | + int rd, int rn, int32_t offset) | |
| 598 | +{ | |
| 599 | + if (offset > 0xff || offset < -0xff) { | |
| 600 | + tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
| 601 | + tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8); | |
| 602 | + } else | |
| 603 | + tcg_out_ld16u_8(s, cond, rd, rn, offset); | |
| 604 | +} | |
| 605 | + | |
| 606 | +static inline void tcg_out_ld16s(TCGContext *s, int cond, | |
| 607 | + int rd, int rn, int32_t offset) | |
| 608 | +{ | |
| 609 | + if (offset > 0xff || offset < -0xff) { | |
| 610 | + tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
| 611 | + tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8); | |
| 612 | + } else | |
| 613 | + tcg_out_ld16s_8(s, cond, rd, rn, offset); | |
| 614 | +} | |
| 615 | + | |
| 616 | +static inline void tcg_out_st16u(TCGContext *s, int cond, | |
| 617 | + int rd, int rn, int32_t offset) | |
| 618 | +{ | |
| 619 | + if (offset > 0xff || offset < -0xff) { | |
| 620 | + tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
| 621 | + tcg_out_st16u_r(s, cond, rd, rn, TCG_REG_R8); | |
| 622 | + } else | |
| 623 | + tcg_out_st16u_8(s, cond, rd, rn, offset); | |
| 624 | +} | |
| 625 | + | |
| 626 | +static inline void tcg_out_ld8u(TCGContext *s, int cond, | |
| 627 | + int rd, int rn, int32_t offset) | |
| 628 | +{ | |
| 629 | + if (offset > 0xfff || offset < -0xfff) { | |
| 630 | + tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
| 631 | + tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8); | |
| 632 | + } else | |
| 633 | + tcg_out_ld8_12(s, cond, rd, rn, offset); | |
| 634 | +} | |
| 635 | + | |
| 636 | +static inline void tcg_out_ld8s(TCGContext *s, int cond, | |
| 637 | + int rd, int rn, int32_t offset) | |
| 638 | +{ | |
| 639 | + if (offset > 0xff || offset < -0xff) { | |
| 640 | + tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
| 641 | + tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8); | |
| 642 | + } else | |
| 643 | + tcg_out_ld8s_8(s, cond, rd, rn, offset); | |
| 644 | +} | |
| 645 | + | |
| 646 | +static inline void tcg_out_st8u(TCGContext *s, int cond, | |
| 647 | + int rd, int rn, int32_t offset) | |
| 648 | +{ | |
| 649 | + if (offset > 0xfff || offset < -0xfff) { | |
| 650 | + tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
| 651 | + tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8); | |
| 652 | + } else | |
| 653 | + tcg_out_st8_12(s, cond, rd, rn, offset); | |
| 654 | +} | |
| 655 | + | |
| 656 | +static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr) | |
| 657 | +{ | |
| 658 | + int32_t val; | |
| 659 | + | |
| 660 | + val = addr - (tcg_target_long) s->code_ptr; | |
| 661 | + if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd) | |
| 662 | + tcg_out_b(s, cond, val); | |
| 663 | + else { | |
| 664 | +#if 1 | |
| 665 | + tcg_abort(); | |
| 666 | +#else | |
| 667 | + if (cond == COND_AL) { | |
| 668 | + tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
| 669 | + tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */ | |
| 670 | + } else { | |
| 671 | + tcg_out_movi32(s, cond, TCG_REG_R8, val - 8); | |
| 672 | + tcg_out_dat_reg(s, cond, ARITH_ADD, | |
| 673 | + 15, 15, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
| 674 | + } | |
| 675 | +#endif | |
| 676 | + } | |
| 677 | +} | |
| 678 | + | |
| 679 | +static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr) | |
| 680 | +{ | |
| 681 | + int32_t val; | |
| 682 | + | |
| 683 | +#ifdef SAVE_LR | |
| 684 | + tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0)); | |
| 685 | +#endif | |
| 686 | + | |
| 687 | + val = addr - (tcg_target_long) s->code_ptr; | |
| 688 | + if (val < 0x01fffffd && val > -0x01fffffd) | |
| 689 | + tcg_out_bl(s, cond, val); | |
| 690 | + else { | |
| 691 | +#if 1 | |
| 692 | + tcg_abort(); | |
| 693 | +#else | |
| 694 | + if (cond == COND_AL) { | |
| 695 | + tcg_out_dat_imm(s, cond, ARITH_ADD, 14, 15, 4); | |
| 696 | + tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
| 697 | + tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */ | |
| 698 | + } else { | |
| 699 | + tcg_out_movi32(s, cond, TCG_REG_R9, addr); | |
| 700 | + tcg_out_dat_imm(s, cond, ARITH_MOV, 14, 0, 15); | |
| 701 | + tcg_out_bx(s, cond, TCG_REG_R9); | |
| 702 | + } | |
| 703 | +#endif | |
| 704 | + } | |
| 705 | + | |
| 706 | +#ifdef SAVE_LR | |
| 707 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
| 708 | +#endif | |
| 709 | +} | |
| 710 | + | |
| 711 | +static inline void tcg_out_callr(TCGContext *s, int cond, int arg) | |
| 712 | +{ | |
| 713 | +#ifdef SAVE_LR | |
| 714 | + tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0)); | |
| 715 | +#endif | |
| 716 | + /* TODO: on ARMv5 and ARMv6 replace with tcg_out_blx(s, cond, arg); */ | |
| 717 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 15, SHIFT_IMM_LSL(0)); | |
| 718 | + tcg_out_bx(s, cond, arg); | |
| 719 | +#ifdef SAVE_LR | |
| 720 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
| 721 | +#endif | |
| 722 | +} | |
| 723 | + | |
| 724 | +static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index) | |
| 725 | +{ | |
| 726 | + TCGLabel *l = &s->labels[label_index]; | |
| 727 | + | |
| 728 | + if (l->has_value) | |
| 729 | + tcg_out_goto(s, cond, l->u.value); | |
| 730 | + else if (cond == COND_AL) { | |
| 731 | + tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
| 732 | + tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337); | |
| 733 | + s->code_ptr += 4; | |
| 734 | + } else { | |
| 735 | + /* Probably this should be preferred even for COND_AL... */ | |
| 736 | + tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337); | |
| 737 | + tcg_out_b(s, cond, 8); | |
| 738 | + } | |
| 739 | +} | |
| 740 | + | |
| 741 | +static void tcg_out_div_helper(TCGContext *s, int cond, const TCGArg *args, | |
| 742 | + void *helper_div, void *helper_rem, int shift) | |
| 743 | +{ | |
| 744 | + int div_reg = args[0]; | |
| 745 | + int rem_reg = args[1]; | |
| 746 | + | |
| 747 | + /* stmdb sp!, { r0 - r3, ip, lr } */ | |
| 748 | + /* (Note that we need an even number of registers as per EABI) */ | |
| 749 | + tcg_out32(s, (cond << 28) | 0x092d500f); | |
| 750 | + | |
| 751 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0)); | |
| 752 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0)); | |
| 753 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0)); | |
| 754 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift); | |
| 755 | + | |
| 756 | + tcg_out_call(s, cond, (uint32_t) helper_div); | |
| 757 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 0, SHIFT_IMM_LSL(0)); | |
| 758 | + | |
| 759 | + /* ldmia sp, { r0 - r3, fp, lr } */ | |
| 760 | + tcg_out32(s, (cond << 28) | 0x089d500f); | |
| 761 | + | |
| 762 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0)); | |
| 763 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0)); | |
| 764 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0)); | |
| 765 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift); | |
| 766 | + | |
| 767 | + tcg_out_call(s, cond, (uint32_t) helper_rem); | |
| 768 | + | |
| 769 | + tcg_out_dat_reg(s, cond, ARITH_MOV, rem_reg, 0, 0, SHIFT_IMM_LSL(0)); | |
| 770 | + tcg_out_dat_reg(s, cond, ARITH_MOV, div_reg, 0, 8, SHIFT_IMM_LSL(0)); | |
| 771 | + | |
| 772 | + /* ldr r0, [sp], #4 */ | |
| 773 | + if (rem_reg != 0 && div_reg != 0) | |
| 774 | + tcg_out32(s, (cond << 28) | 0x04bd0004); | |
| 775 | + /* ldr r1, [sp], #4 */ | |
| 776 | + if (rem_reg != 1 && div_reg != 1) | |
| 777 | + tcg_out32(s, (cond << 28) | 0x04bd1004); | |
| 778 | + /* ldr r2, [sp], #4 */ | |
| 779 | + if (rem_reg != 2 && div_reg != 2) | |
| 780 | + tcg_out32(s, (cond << 28) | 0x04bd2004); | |
| 781 | + /* ldr r3, [sp], #4 */ | |
| 782 | + if (rem_reg != 3 && div_reg != 3) | |
| 783 | + tcg_out32(s, (cond << 28) | 0x04bd3004); | |
| 784 | + /* ldr ip, [sp], #4 */ | |
| 785 | + if (rem_reg != 12 && div_reg != 12) | |
| 786 | + tcg_out32(s, (cond << 28) | 0x04bdc004); | |
| 787 | + /* ldr lr, [sp], #4 */ | |
| 788 | + if (rem_reg != 14 && div_reg != 14) | |
| 789 | + tcg_out32(s, (cond << 28) | 0x04bde004); | |
| 790 | +} | |
| 791 | + | |
| 792 | +#ifdef CONFIG_SOFTMMU | |
| 793 | +extern void __ldb_mmu(void); | |
| 794 | +extern void __ldw_mmu(void); | |
| 795 | +extern void __ldl_mmu(void); | |
| 796 | +extern void __ldq_mmu(void); | |
| 797 | + | |
| 798 | +extern void __stb_mmu(void); | |
| 799 | +extern void __stw_mmu(void); | |
| 800 | +extern void __stl_mmu(void); | |
| 801 | +extern void __stq_mmu(void); | |
| 802 | + | |
| 803 | +static void *qemu_ld_helpers[4] = { | |
| 804 | + __ldb_mmu, | |
| 805 | + __ldw_mmu, | |
| 806 | + __ldl_mmu, | |
| 807 | + __ldq_mmu, | |
| 808 | +}; | |
| 809 | + | |
| 810 | +static void *qemu_st_helpers[4] = { | |
| 811 | + __stb_mmu, | |
| 812 | + __stw_mmu, | |
| 813 | + __stl_mmu, | |
| 814 | + __stq_mmu, | |
| 815 | +}; | |
| 816 | +#endif | |
| 817 | + | |
| 818 | +static inline void tcg_out_qemu_ld(TCGContext *s, int cond, | |
| 819 | + const TCGArg *args, int opc) | |
| 820 | +{ | |
| 821 | + int addr_reg, data_reg, data_reg2; | |
| 822 | +#ifdef CONFIG_SOFTMMU | |
| 823 | + int mem_index, s_bits; | |
| 824 | +# if TARGET_LONG_BITS == 64 | |
| 825 | + int addr_reg2; | |
| 826 | +# endif | |
| 827 | +# ifdef USE_TLB | |
| 828 | + uint32_t *label_ptr; | |
| 829 | +# endif | |
| 830 | +#endif | |
| 831 | + | |
| 832 | + data_reg = *args++; | |
| 833 | + if (opc == 3) | |
| 834 | + data_reg2 = *args++; | |
| 835 | + else | |
| 836 | + data_reg2 = 0; /* surpress warning */ | |
| 837 | + addr_reg = *args++; | |
| 838 | +#if TARGET_LONG_BITS == 64 | |
| 839 | + addr_reg2 = *args++; | |
| 840 | +#endif | |
| 841 | +#ifdef CONFIG_SOFTMMU | |
| 842 | + mem_index = *args; | |
| 843 | + s_bits = opc & 3; | |
| 844 | + | |
| 845 | +# ifdef USE_TLB | |
| 846 | + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
| 847 | + 8, 0, addr_reg, SHIFT_IMM_ROR(TARGET_PAGE_BITS)); | |
| 848 | + tcg_out_dat_imm(s, COND_AL, ARITH_AND, | |
| 849 | + 0, 8, CPU_TLB_SIZE - 1); | |
| 850 | + tcg_out_dat_reg(s, COND_AL, ARITH_ADD, | |
| 851 | + 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); | |
| 852 | + tcg_out_ld32_12(s, COND_AL, 1, 0, | |
| 853 | + offsetof(CPUState, tlb_table[mem_index][0].addr_read)); | |
| 854 | + tcg_out_dat_reg(s, COND_AL, ARITH_CMP, | |
| 855 | + 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); | |
| 856 | + /* TODO: alignment check? | |
| 857 | + * if (s_bits) | |
| 858 | + * tcg_out_data_reg(s, COND_EQ, ARITH_EOR, | |
| 859 | + * 0, 1, 8, SHIFT_IMM_LSR(32 - s_bits)); | |
| 860 | + */ | |
| 861 | +# if TARGET_LONG_BITS == 64 | |
| 862 | + /* XXX: possibly we could use a block data load or writeback in | |
| 863 | + * the first access. */ | |
| 864 | + tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
| 865 | + offsetof(CPUState, tlb_table[mem_index][0].addr_read) + 4); | |
| 866 | + tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, | |
| 867 | + 0, 1, addr_reg2, SHIFT_IMM_LSL(0)); | |
| 868 | +# endif | |
| 869 | + tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
| 870 | + offsetof(CPUState, tlb_table[mem_index][0].addend)); | |
| 871 | + | |
| 872 | + switch (opc) { | |
| 873 | + case 0: | |
| 874 | + tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, 1); | |
| 875 | + break; | |
| 876 | + case 0 | 4: | |
| 877 | + tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
| 878 | + break; | |
| 879 | + case 1: | |
| 880 | + tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, 1); | |
| 881 | + break; | |
| 882 | + case 1 | 4: | |
| 883 | + tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
| 884 | + break; | |
| 885 | + case 2: | |
| 886 | + default: | |
| 887 | + tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, 1); | |
| 888 | + break; | |
| 889 | + case 3: | |
| 890 | + /* TODO: must write back */ | |
| 891 | + tcg_out_ld32_r(s, COND_EQ, data_reg, 1, addr_reg); | |
| 892 | + tcg_out_ld32_12(s, COND_EQ, data_reg2, 1, 4); | |
| 893 | + break; | |
| 894 | + } | |
| 895 | + | |
| 896 | + label_ptr = (void *) s->code_ptr; | |
| 897 | + tcg_out_b(s, COND_EQ, 8); | |
| 898 | +# endif | |
| 899 | + | |
| 900 | +# ifdef SAVE_LR | |
| 901 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0)); | |
| 902 | +# endif | |
| 903 | + | |
| 904 | + /* TODO: move this code to where the constants pool will be */ | |
| 905 | + if (addr_reg) | |
| 906 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 907 | + 0, 0, addr_reg, SHIFT_IMM_LSL(0)); | |
| 908 | +# if TARGET_LONG_BITS == 32 | |
| 909 | + tcg_out_dat_imm(s, cond, ARITH_MOV, 1, 0, mem_index); | |
| 910 | +# else | |
| 911 | + if (addr_reg2 != 1) | |
| 912 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 913 | + 1, 0, addr_reg2, SHIFT_IMM_LSL(0)); | |
| 914 | + tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
| 915 | +# endif | |
| 916 | + tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] - | |
| 917 | + (tcg_target_long) s->code_ptr); | |
| 918 | + | |
| 919 | + switch (opc) { | |
| 920 | + case 0 | 4: | |
| 921 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 922 | + 0, 0, 0, SHIFT_IMM_LSL(24)); | |
| 923 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 924 | + data_reg, 0, 0, SHIFT_IMM_ASR(24)); | |
| 925 | + break; | |
| 926 | + case 1 | 4: | |
| 927 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 928 | + 0, 0, 0, SHIFT_IMM_LSL(16)); | |
| 929 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 930 | + data_reg, 0, 0, SHIFT_IMM_ASR(16)); | |
| 931 | + break; | |
| 932 | + case 0: | |
| 933 | + case 1: | |
| 934 | + case 2: | |
| 935 | + default: | |
| 936 | + if (data_reg) | |
| 937 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 938 | + data_reg, 0, 0, SHIFT_IMM_LSL(0)); | |
| 939 | + break; | |
| 940 | + case 3: | |
| 941 | + if (data_reg2 != 1) | |
| 942 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 943 | + data_reg2, 0, 1, SHIFT_IMM_LSL(0)); | |
| 944 | + if (data_reg != 0) | |
| 945 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 946 | + data_reg, 0, 0, SHIFT_IMM_LSL(0)); | |
| 947 | + break; | |
| 948 | + } | |
| 949 | + | |
| 950 | +# ifdef SAVE_LR | |
| 951 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0)); | |
| 952 | +# endif | |
| 953 | + | |
| 954 | +# ifdef USE_TLB | |
| 955 | + *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2; | |
| 956 | +# endif | |
| 957 | +#else | |
| 958 | + switch (opc) { | |
| 959 | + case 0: | |
| 960 | + tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0); | |
| 961 | + break; | |
| 962 | + case 0 | 4: | |
| 963 | + tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0); | |
| 964 | + break; | |
| 965 | + case 1: | |
| 966 | + tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0); | |
| 967 | + break; | |
| 968 | + case 1 | 4: | |
| 969 | + tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0); | |
| 970 | + break; | |
| 971 | + case 2: | |
| 972 | + default: | |
| 973 | + tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0); | |
| 974 | + break; | |
| 975 | + case 3: | |
| 976 | + /* TODO: use block load */ | |
| 977 | + tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0); | |
| 978 | + tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4); | |
| 979 | + break; | |
| 980 | + } | |
| 981 | +#endif | |
| 982 | +} | |
| 983 | + | |
| 984 | +static inline void tcg_out_qemu_st(TCGContext *s, int cond, | |
| 985 | + const TCGArg *args, int opc) | |
| 986 | +{ | |
| 987 | + int addr_reg, data_reg, data_reg2; | |
| 988 | +#ifdef CONFIG_SOFTMMU | |
| 989 | + int mem_index, s_bits; | |
| 990 | +# if TARGET_LONG_BITS == 64 | |
| 991 | + int addr_reg2; | |
| 992 | +# endif | |
| 993 | +# ifdef USE_TLB | |
| 994 | + uint32_t *label_ptr; | |
| 995 | +# endif | |
| 996 | +#endif | |
| 997 | + | |
| 998 | + data_reg = *args++; | |
| 999 | + if (opc == 3) | |
| 1000 | + data_reg2 = *args++; | |
| 1001 | + else | |
| 1002 | + data_reg2 = 0; /* surpress warning */ | |
| 1003 | + addr_reg = *args++; | |
| 1004 | +#if TARGET_LONG_BITS == 64 | |
| 1005 | + addr_reg2 = *args++; | |
| 1006 | +#endif | |
| 1007 | +#ifdef CONFIG_SOFTMMU | |
| 1008 | + mem_index = *args; | |
| 1009 | + s_bits = opc & 3; | |
| 1010 | + | |
| 1011 | +# ifdef USE_TLB | |
| 1012 | + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
| 1013 | + 8, 0, addr_reg, SHIFT_IMM_ROR(TARGET_PAGE_BITS)); | |
| 1014 | + tcg_out_dat_imm(s, COND_AL, ARITH_AND, | |
| 1015 | + 0, 8, CPU_TLB_SIZE - 1); | |
| 1016 | + tcg_out_dat_reg(s, COND_AL, ARITH_ADD, | |
| 1017 | + 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); | |
| 1018 | + tcg_out_ld32_12(s, COND_AL, 1, 0, | |
| 1019 | + offsetof(CPUState, tlb_table[mem_index][0].addr_write)); | |
| 1020 | + tcg_out_dat_reg(s, COND_AL, ARITH_CMP, | |
| 1021 | + 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); | |
| 1022 | + /* TODO: alignment check? | |
| 1023 | + * if (s_bits) | |
| 1024 | + * tcg_out_data_reg(s, COND_EQ, ARITH_EOR, | |
| 1025 | + * 0, 1, 8, SHIFT_IMM_LSR(32 - s_bits)); | |
| 1026 | + */ | |
| 1027 | +# if TARGET_LONG_BITS == 64 | |
| 1028 | + /* XXX: possibly we could use a block data load or writeback in | |
| 1029 | + * the first access. */ | |
| 1030 | + tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
| 1031 | + offsetof(CPUState, tlb_table[mem_index][0].addr_write) | |
| 1032 | + + 4); | |
| 1033 | + tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, | |
| 1034 | + 0, 1, addr_reg2, SHIFT_IMM_LSL(0)); | |
| 1035 | +# endif | |
| 1036 | + tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
| 1037 | + offsetof(CPUState, tlb_table[mem_index][0].addend)); | |
| 1038 | + | |
| 1039 | + switch (opc) { | |
| 1040 | + case 0: | |
| 1041 | + tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, 1); | |
| 1042 | + break; | |
| 1043 | + case 0 | 4: | |
| 1044 | + tcg_out_st8s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
| 1045 | + break; | |
| 1046 | + case 1: | |
| 1047 | + tcg_out_st16u_r(s, COND_EQ, data_reg, addr_reg, 1); | |
| 1048 | + break; | |
| 1049 | + case 1 | 4: | |
| 1050 | + tcg_out_st16s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
| 1051 | + break; | |
| 1052 | + case 2: | |
| 1053 | + default: | |
| 1054 | + tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, 1); | |
| 1055 | + break; | |
| 1056 | + case 3: | |
| 1057 | + /* TODO: must write back */ | |
| 1058 | + tcg_out_st32_r(s, COND_EQ, data_reg, 1, addr_reg); | |
| 1059 | + tcg_out_st32_12(s, COND_EQ, data_reg2, 1, 4); | |
| 1060 | + break; | |
| 1061 | + } | |
| 1062 | + | |
| 1063 | + label_ptr = (void *) s->code_ptr; | |
| 1064 | + tcg_out_b(s, COND_EQ, 8); | |
| 1065 | +# endif | |
| 1066 | + | |
| 1067 | +# ifdef SAVE_LR | |
| 1068 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0)); | |
| 1069 | +# endif | |
| 1070 | + | |
| 1071 | + /* TODO: move this code to where the constants pool will be */ | |
| 1072 | + if (addr_reg) | |
| 1073 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 1074 | + 0, 0, addr_reg, SHIFT_IMM_LSL(0)); | |
| 1075 | +# if TARGET_LONG_BITS == 32 | |
| 1076 | + switch (opc) { | |
| 1077 | + case 0: | |
| 1078 | + tcg_out_dat_imm(s, cond, ARITH_AND, 1, data_reg, 0xff); | |
| 1079 | + tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
| 1080 | + break; | |
| 1081 | + case 1: | |
| 1082 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 1083 | + 1, 0, data_reg, SHIFT_IMM_LSL(16)); | |
| 1084 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 1085 | + 1, 0, 1, SHIFT_IMM_LSR(16)); | |
| 1086 | + tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
| 1087 | + break; | |
| 1088 | + case 2: | |
| 1089 | + if (data_reg != 1) | |
| 1090 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 1091 | + 1, 0, data_reg, SHIFT_IMM_LSL(0)); | |
| 1092 | + tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
| 1093 | + break; | |
| 1094 | + case 3: | |
| 1095 | + if (data_reg != 1) | |
| 1096 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 1097 | + 1, 0, data_reg, SHIFT_IMM_LSL(0)); | |
| 1098 | + if (data_reg2 != 2) | |
| 1099 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 1100 | + 2, 0, data_reg2, SHIFT_IMM_LSL(0)); | |
| 1101 | + tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
| 1102 | + break; | |
| 1103 | + } | |
| 1104 | +# else | |
| 1105 | + if (addr_reg2 != 1) | |
| 1106 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 1107 | + 1, 0, addr_reg2, SHIFT_IMM_LSL(0)); | |
| 1108 | + switch (opc) { | |
| 1109 | + case 0: | |
| 1110 | + tcg_out_dat_imm(s, cond, ARITH_AND, 2, data_reg, 0xff); | |
| 1111 | + tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
| 1112 | + break; | |
| 1113 | + case 1: | |
| 1114 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 1115 | + 2, 0, data_reg, SHIFT_IMM_LSL(16)); | |
| 1116 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 1117 | + 2, 0, 2, SHIFT_IMM_LSR(16)); | |
| 1118 | + tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
| 1119 | + break; | |
| 1120 | + case 2: | |
| 1121 | + if (data_reg != 2) | |
| 1122 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 1123 | + 2, 0, data_reg, SHIFT_IMM_LSL(0)); | |
| 1124 | + tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
| 1125 | + break; | |
| 1126 | + case 3: | |
| 1127 | + tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
| 1128 | + tcg_out32(s, (cond << 28) | 0x052d3010); /* str r3, [sp, #-0x10]! */ | |
| 1129 | + if (data_reg != 2) | |
| 1130 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 1131 | + 2, 0, data_reg, SHIFT_IMM_LSL(0)); | |
| 1132 | + if (data_reg2 != 3) | |
| 1133 | + tcg_out_dat_reg(s, cond, ARITH_MOV, | |
| 1134 | + 3, 0, data_reg2, SHIFT_IMM_LSL(0)); | |
| 1135 | + break; | |
| 1136 | + } | |
| 1137 | +# endif | |
| 1138 | + | |
| 1139 | + tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] - | |
| 1140 | + (tcg_target_long) s->code_ptr); | |
| 1141 | + | |
| 1142 | +# if TARGET_LONG_BITS == 64 | |
| 1143 | + if (opc == 3) | |
| 1144 | + tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 0x10); | |
| 1145 | +# endif | |
| 1146 | + | |
| 1147 | +# ifdef SAVE_LR | |
| 1148 | + tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0)); | |
| 1149 | +# endif | |
| 1150 | + | |
| 1151 | +# ifdef USE_TLB | |
| 1152 | + *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2; | |
| 1153 | +# endif | |
| 1154 | +#else | |
| 1155 | + switch (opc) { | |
| 1156 | + case 0: | |
| 1157 | + tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0); | |
| 1158 | + break; | |
| 1159 | + case 0 | 4: | |
| 1160 | + tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0); | |
| 1161 | + break; | |
| 1162 | + case 1: | |
| 1163 | + tcg_out_st16u_8(s, COND_AL, data_reg, addr_reg, 0); | |
| 1164 | + break; | |
| 1165 | + case 1 | 4: | |
| 1166 | + tcg_out_st16s_8(s, COND_AL, data_reg, addr_reg, 0); | |
| 1167 | + break; | |
| 1168 | + case 2: | |
| 1169 | + default: | |
| 1170 | + tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0); | |
| 1171 | + break; | |
| 1172 | + case 3: | |
| 1173 | + /* TODO: use block store */ | |
| 1174 | + tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0); | |
| 1175 | + tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4); | |
| 1176 | + break; | |
| 1177 | + } | |
| 1178 | +#endif | |
| 1179 | +} | |
| 1180 | + | |
| 1181 | +extern void exec_loop; | |
| 1182 | +static uint8_t *tb_ret_addr; | |
| 1183 | + | |
| 1184 | +static inline void tcg_out_op(TCGContext *s, int opc, | |
| 1185 | + const TCGArg *args, const int *const_args) | |
| 1186 | +{ | |
| 1187 | + int c; | |
| 1188 | + | |
| 1189 | + switch (opc) { | |
| 1190 | + case INDEX_op_exit_tb: | |
| 1191 | +#ifdef SAVE_LR | |
| 1192 | + if (args[0] >> 8) | |
| 1193 | + tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0); | |
| 1194 | + else | |
| 1195 | + tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]); | |
| 1196 | + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 15, 0, 14, SHIFT_IMM_LSL(0)); | |
| 1197 | + if (args[0] >> 8) | |
| 1198 | + tcg_out32(s, args[0]); | |
| 1199 | +#else | |
| 1200 | + if (args[0] >> 8) | |
| 1201 | + tcg_out_ld32_12(s, COND_AL, 0, 15, 0); | |
| 1202 | + else | |
| 1203 | + tcg_out_dat_imm(s, COND_AL, ARITH_MOV, 0, 0, args[0]); | |
| 1204 | + tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr); | |
| 1205 | + if (args[0] >> 8) | |
| 1206 | + tcg_out32(s, args[0]); | |
| 1207 | +#endif | |
| 1208 | + break; | |
| 1209 | + case INDEX_op_goto_tb: | |
| 1210 | + if (s->tb_jmp_offset) { | |
| 1211 | + /* Direct jump method */ | |
| 1212 | +#if 1 | |
| 1213 | + s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; | |
| 1214 | + tcg_out_b(s, COND_AL, 8); | |
| 1215 | +#else | |
| 1216 | + tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
| 1217 | + s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; | |
| 1218 | + tcg_out32(s, 0); | |
| 1219 | +#endif | |
| 1220 | + } else { | |
| 1221 | + /* Indirect jump method */ | |
| 1222 | +#if 1 | |
| 1223 | + c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8); | |
| 1224 | + if (c > 0xfff || c < -0xfff) { | |
| 1225 | + tcg_out_movi32(s, COND_AL, TCG_REG_R0, | |
| 1226 | + (tcg_target_long) (s->tb_next + args[0])); | |
| 1227 | + tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0); | |
| 1228 | + } else | |
| 1229 | + tcg_out_ld32_12(s, COND_AL, 15, 15, c); | |
| 1230 | +#else | |
| 1231 | + tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0); | |
| 1232 | + tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0); | |
| 1233 | + tcg_out32(s, (tcg_target_long) (s->tb_next + args[0])); | |
| 1234 | +#endif | |
| 1235 | + } | |
| 1236 | + s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; | |
| 1237 | + break; | |
| 1238 | + case INDEX_op_call: | |
| 1239 | + if (const_args[0]) | |
| 1240 | + tcg_out_call(s, COND_AL, args[0]); | |
| 1241 | + else | |
| 1242 | + tcg_out_callr(s, COND_AL, args[0]); | |
| 1243 | + break; | |
| 1244 | + case INDEX_op_jmp: | |
| 1245 | + if (const_args[0]) | |
| 1246 | + tcg_out_goto(s, COND_AL, args[0]); | |
| 1247 | + else | |
| 1248 | + tcg_out_bx(s, COND_AL, args[0]); | |
| 1249 | + break; | |
| 1250 | + case INDEX_op_br: | |
| 1251 | + tcg_out_goto_label(s, COND_AL, args[0]); | |
| 1252 | + break; | |
| 1253 | + | |
| 1254 | + case INDEX_op_ld8u_i32: | |
| 1255 | + tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); | |
| 1256 | + break; | |
| 1257 | + case INDEX_op_ld8s_i32: | |
| 1258 | + tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); | |
| 1259 | + break; | |
| 1260 | + case INDEX_op_ld16u_i32: | |
| 1261 | + tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); | |
| 1262 | + break; | |
| 1263 | + case INDEX_op_ld16s_i32: | |
| 1264 | + tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); | |
| 1265 | + break; | |
| 1266 | + case INDEX_op_ld_i32: | |
| 1267 | + tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); | |
| 1268 | + break; | |
| 1269 | + case INDEX_op_st8_i32: | |
| 1270 | + tcg_out_st8u(s, COND_AL, args[0], args[1], args[2]); | |
| 1271 | + break; | |
| 1272 | + case INDEX_op_st16_i32: | |
| 1273 | + tcg_out_st16u(s, COND_AL, args[0], args[1], args[2]); | |
| 1274 | + break; | |
| 1275 | + case INDEX_op_st_i32: | |
| 1276 | + tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); | |
| 1277 | + break; | |
| 1278 | + | |
| 1279 | + case INDEX_op_mov_i32: | |
| 1280 | + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
| 1281 | + args[0], 0, args[1], SHIFT_IMM_LSL(0)); | |
| 1282 | + break; | |
| 1283 | + case INDEX_op_movi_i32: | |
| 1284 | + tcg_out_movi32(s, COND_AL, args[0], args[1]); | |
| 1285 | + break; | |
| 1286 | + case INDEX_op_add_i32: | |
| 1287 | + c = ARITH_ADD; | |
| 1288 | + goto gen_arith; | |
| 1289 | + case INDEX_op_sub_i32: | |
| 1290 | + c = ARITH_SUB; | |
| 1291 | + goto gen_arith; | |
| 1292 | + case INDEX_op_and_i32: | |
| 1293 | + c = ARITH_AND; | |
| 1294 | + goto gen_arith; | |
| 1295 | + case INDEX_op_or_i32: | |
| 1296 | + c = ARITH_ORR; | |
| 1297 | + goto gen_arith; | |
| 1298 | + case INDEX_op_xor_i32: | |
| 1299 | + c = ARITH_EOR; | |
| 1300 | + /* Fall through. */ | |
| 1301 | + gen_arith: | |
| 1302 | + tcg_out_dat_reg(s, COND_AL, c, | |
| 1303 | + args[0], args[1], args[2], SHIFT_IMM_LSL(0)); | |
| 1304 | + break; | |
| 1305 | + case INDEX_op_add2_i32: | |
| 1306 | + tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC, | |
| 1307 | + args[0], args[1], args[2], args[3], | |
| 1308 | + args[4], args[5], SHIFT_IMM_LSL(0)); | |
| 1309 | + break; | |
| 1310 | + case INDEX_op_sub2_i32: | |
| 1311 | + tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC, | |
| 1312 | + args[0], args[1], args[2], args[3], | |
| 1313 | + args[4], args[5], SHIFT_IMM_LSL(0)); | |
| 1314 | + break; | |
| 1315 | + case INDEX_op_mul_i32: | |
| 1316 | + tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]); | |
| 1317 | + break; | |
| 1318 | + case INDEX_op_mulu2_i32: | |
| 1319 | + tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]); | |
| 1320 | + break; | |
| 1321 | + case INDEX_op_div2_i32: | |
| 1322 | + tcg_out_div_helper(s, COND_AL, args, | |
| 1323 | + tcg_helper_div_i64, tcg_helper_rem_i64, | |
| 1324 | + SHIFT_IMM_ASR(31)); | |
| 1325 | + break; | |
| 1326 | + case INDEX_op_divu2_i32: | |
| 1327 | + tcg_out_div_helper(s, COND_AL, args, | |
| 1328 | + tcg_helper_divu_i64, tcg_helper_remu_i64, | |
| 1329 | + SHIFT_IMM_LSR(31)); | |
| 1330 | + break; | |
| 1331 | + /* XXX: Perhaps args[2] & 0x1f is wrong */ | |
| 1332 | + case INDEX_op_shl_i32: | |
| 1333 | + c = const_args[2] ? | |
| 1334 | + SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]); | |
| 1335 | + goto gen_shift32; | |
| 1336 | + case INDEX_op_shr_i32: | |
| 1337 | + c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) : | |
| 1338 | + SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]); | |
| 1339 | + goto gen_shift32; | |
| 1340 | + case INDEX_op_sar_i32: | |
| 1341 | + c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) : | |
| 1342 | + SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]); | |
| 1343 | + /* Fall through. */ | |
| 1344 | + gen_shift32: | |
| 1345 | + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c); | |
| 1346 | + break; | |
| 1347 | + | |
| 1348 | + case INDEX_op_brcond_i32: | |
| 1349 | + tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, | |
| 1350 | + args[0], args[1], SHIFT_IMM_LSL(0)); | |
| 1351 | + tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]); | |
| 1352 | + break; | |
| 1353 | + case INDEX_op_brcond2_i32: | |
| 1354 | + /* The resulting conditions are: | |
| 1355 | + * TCG_COND_EQ --> a0 == a2 && a1 == a3, | |
| 1356 | + * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3, | |
| 1357 | + * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3, | |
| 1358 | + * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3), | |
| 1359 | + * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3), | |
| 1360 | + * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3, | |
| 1361 | + */ | |
| 1362 | + tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, | |
| 1363 | + args[1], args[3], SHIFT_IMM_LSL(0)); | |
| 1364 | + tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, | |
| 1365 | + args[0], args[2], SHIFT_IMM_LSL(0)); | |
| 1366 | + tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]); | |
| 1367 | + break; | |
| 1368 | + | |
| 1369 | + case INDEX_op_qemu_ld8u: | |
| 1370 | + tcg_out_qemu_ld(s, COND_AL, args, 0); | |
| 1371 | + break; | |
| 1372 | + case INDEX_op_qemu_ld8s: | |
| 1373 | + tcg_out_qemu_ld(s, COND_AL, args, 0 | 4); | |
| 1374 | + break; | |
| 1375 | + case INDEX_op_qemu_ld16u: | |
| 1376 | + tcg_out_qemu_ld(s, COND_AL, args, 1); | |
| 1377 | + break; | |
| 1378 | + case INDEX_op_qemu_ld16s: | |
| 1379 | + tcg_out_qemu_ld(s, COND_AL, args, 1 | 4); | |
| 1380 | + break; | |
| 1381 | + case INDEX_op_qemu_ld32u: | |
| 1382 | + tcg_out_qemu_ld(s, COND_AL, args, 2); | |
| 1383 | + break; | |
| 1384 | + case INDEX_op_qemu_ld64: | |
| 1385 | + tcg_out_qemu_ld(s, COND_AL, args, 3); | |
| 1386 | + break; | |
| 1387 | + | |
| 1388 | + case INDEX_op_qemu_st8: | |
| 1389 | + tcg_out_qemu_st(s, COND_AL, args, 0); | |
| 1390 | + break; | |
| 1391 | + case INDEX_op_qemu_st16: | |
| 1392 | + tcg_out_qemu_st(s, COND_AL, args, 1); | |
| 1393 | + break; | |
| 1394 | + case INDEX_op_qemu_st32: | |
| 1395 | + tcg_out_qemu_st(s, COND_AL, args, 2); | |
| 1396 | + break; | |
| 1397 | + case INDEX_op_qemu_st64: | |
| 1398 | + tcg_out_qemu_st(s, COND_AL, args, 3); | |
| 1399 | + break; | |
| 1400 | + | |
| 1401 | + case INDEX_op_ext8s_i32: | |
| 1402 | + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
| 1403 | + args[0], 0, args[1], SHIFT_IMM_LSL(24)); | |
| 1404 | + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
| 1405 | + args[0], 0, args[0], SHIFT_IMM_ASR(24)); | |
| 1406 | + break; | |
| 1407 | + case INDEX_op_ext16s_i32: | |
| 1408 | + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
| 1409 | + args[0], 0, args[1], SHIFT_IMM_LSL(16)); | |
| 1410 | + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
| 1411 | + args[0], 0, args[0], SHIFT_IMM_ASR(16)); | |
| 1412 | + break; | |
| 1413 | + | |
| 1414 | + default: | |
| 1415 | + tcg_abort(); | |
| 1416 | + } | |
| 1417 | +} | |
| 1418 | + | |
| 1419 | +static const TCGTargetOpDef arm_op_defs[] = { | |
| 1420 | + { INDEX_op_exit_tb, { } }, | |
| 1421 | + { INDEX_op_goto_tb, { } }, | |
| 1422 | + { INDEX_op_call, { "ri" } }, | |
| 1423 | + { INDEX_op_jmp, { "ri" } }, | |
| 1424 | + { INDEX_op_br, { } }, | |
| 1425 | + | |
| 1426 | + { INDEX_op_mov_i32, { "r", "r" } }, | |
| 1427 | + { INDEX_op_movi_i32, { "r" } }, | |
| 1428 | + | |
| 1429 | + { INDEX_op_ld8u_i32, { "r", "r" } }, | |
| 1430 | + { INDEX_op_ld8s_i32, { "r", "r" } }, | |
| 1431 | + { INDEX_op_ld16u_i32, { "r", "r" } }, | |
| 1432 | + { INDEX_op_ld16s_i32, { "r", "r" } }, | |
| 1433 | + { INDEX_op_ld_i32, { "r", "r" } }, | |
| 1434 | + { INDEX_op_st8_i32, { "r", "r" } }, | |
| 1435 | + { INDEX_op_st16_i32, { "r", "r" } }, | |
| 1436 | + { INDEX_op_st_i32, { "r", "r" } }, | |
| 1437 | + | |
| 1438 | + /* TODO: "r", "r", "ri" */ | |
| 1439 | + { INDEX_op_add_i32, { "r", "r", "r" } }, | |
| 1440 | + { INDEX_op_sub_i32, { "r", "r", "r" } }, | |
| 1441 | + { INDEX_op_mul_i32, { "r", "r", "r" } }, | |
| 1442 | + { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } }, | |
| 1443 | + { INDEX_op_div2_i32, { "r", "r", "r", "1", "2" } }, | |
| 1444 | + { INDEX_op_divu2_i32, { "r", "r", "r", "1", "2" } }, | |
| 1445 | + { INDEX_op_and_i32, { "r", "r", "r" } }, | |
| 1446 | + { INDEX_op_or_i32, { "r", "r", "r" } }, | |
| 1447 | + { INDEX_op_xor_i32, { "r", "r", "r" } }, | |
| 1448 | + | |
| 1449 | + { INDEX_op_shl_i32, { "r", "r", "ri" } }, | |
| 1450 | + { INDEX_op_shr_i32, { "r", "r", "ri" } }, | |
| 1451 | + { INDEX_op_sar_i32, { "r", "r", "ri" } }, | |
| 1452 | + | |
| 1453 | + { INDEX_op_brcond_i32, { "r", "r" } }, | |
| 1454 | + | |
| 1455 | + /* TODO: "r", "r", "r", "r", "ri", "ri" */ | |
| 1456 | + { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } }, | |
| 1457 | + { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } }, | |
| 1458 | + { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } }, | |
| 1459 | + | |
| 1460 | + { INDEX_op_qemu_ld8u, { "r", "x", "X" } }, | |
| 1461 | + { INDEX_op_qemu_ld8s, { "r", "x", "X" } }, | |
| 1462 | + { INDEX_op_qemu_ld16u, { "r", "x", "X" } }, | |
| 1463 | + { INDEX_op_qemu_ld16s, { "r", "x", "X" } }, | |
| 1464 | + { INDEX_op_qemu_ld32u, { "r", "x", "X" } }, | |
| 1465 | + { INDEX_op_qemu_ld64, { "r", "d", "x", "X" } }, | |
| 1466 | + | |
| 1467 | + { INDEX_op_qemu_st8, { "d", "x", "X" } }, | |
| 1468 | + { INDEX_op_qemu_st16, { "d", "x", "X" } }, | |
| 1469 | + { INDEX_op_qemu_st32, { "d", "x", "X" } }, | |
| 1470 | + { INDEX_op_qemu_st64, { "d", "D", "x", "X" } }, | |
| 1471 | + | |
| 1472 | + { INDEX_op_ext8s_i32, { "r", "r" } }, | |
| 1473 | + { INDEX_op_ext16s_i32, { "r", "r" } }, | |
| 1474 | + | |
| 1475 | + { -1 }, | |
| 1476 | +}; | |
| 1477 | + | |
| 1478 | +void tcg_target_init(TCGContext *s) | |
| 1479 | +{ | |
| 1480 | + /* fail safe */ | |
| 1481 | + if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) | |
| 1482 | + tcg_abort(); | |
| 1483 | + | |
| 1484 | + tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, | |
| 1485 | + ((2 << TCG_REG_R14) - 1) & ~(1 << TCG_REG_R8)); | |
| 1486 | + tcg_regset_set32(tcg_target_call_clobber_regs, 0, | |
| 1487 | + ((2 << TCG_REG_R3) - 1) | | |
| 1488 | + (1 << TCG_REG_R12) | (1 << TCG_REG_R14)); | |
| 1489 | + | |
| 1490 | + tcg_regset_clear(s->reserved_regs); | |
| 1491 | +#ifdef SAVE_LR | |
| 1492 | + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R14); | |
| 1493 | +#endif | |
| 1494 | + tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); | |
| 1495 | + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8); | |
| 1496 | + | |
| 1497 | + tcg_add_target_add_op_defs(arm_op_defs); | |
| 1498 | +} | |
| 1499 | + | |
| 1500 | +static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg, | |
| 1501 | + int arg1, tcg_target_long arg2) | |
| 1502 | +{ | |
| 1503 | + tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); | |
| 1504 | +} | |
| 1505 | + | |
| 1506 | +static inline void tcg_out_st(TCGContext *s, TCGType type, int arg, | |
| 1507 | + int arg1, tcg_target_long arg2) | |
| 1508 | +{ | |
| 1509 | + tcg_out_st32(s, COND_AL, arg, arg1, arg2); | |
| 1510 | +} | |
| 1511 | + | |
| 1512 | +void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) | |
| 1513 | +{ | |
| 1514 | + if (val > 0) | |
| 1515 | + if (val < 0x100) | |
| 1516 | + tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val); | |
| 1517 | + else | |
| 1518 | + tcg_abort(); | |
| 1519 | + else if (val < 0) { | |
| 1520 | + if (val > -0x100) | |
| 1521 | + tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val); | |
| 1522 | + else | |
| 1523 | + tcg_abort(); | |
| 1524 | + } | |
| 1525 | +} | |
| 1526 | + | |
| 1527 | +static inline void tcg_out_mov(TCGContext *s, int ret, int arg) | |
| 1528 | +{ | |
| 1529 | + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0)); | |
| 1530 | +} | |
| 1531 | + | |
| 1532 | +static inline void tcg_out_movi(TCGContext *s, TCGType type, | |
| 1533 | + int ret, tcg_target_long arg) | |
| 1534 | +{ | |
| 1535 | + tcg_out_movi32(s, COND_AL, ret, arg); | |
| 1536 | +} | |
| 1537 | + | |
| 1538 | +void tcg_target_qemu_prologue(TCGContext *s) | |
| 1539 | +{ | |
| 1540 | + /* stmdb sp!, { r9 - r11, lr } */ | |
| 1541 | + tcg_out32(s, (COND_AL << 28) | 0x092d4e00); | |
| 1542 | + | |
| 1543 | + tcg_out_bx(s, COND_AL, TCG_REG_R0); | |
| 1544 | + tb_ret_addr = s->code_ptr; | |
| 1545 | + | |
| 1546 | + /* ldmia sp!, { r9 - r11, pc } */ | |
| 1547 | + tcg_out32(s, (COND_AL << 28) | 0x08bd8e00); | |
| 1548 | +} | ... | ... |
tcg/arm/tcg-target.h
0 → 100644
| 1 | +/* | |
| 2 | + * Tiny Code Generator for QEMU | |
| 3 | + * | |
| 4 | + * Copyright (c) 2008 Fabrice Bellard | |
| 5 | + * Copyright (c) 2008 Andrzej Zaborowski | |
| 6 | + * | |
| 7 | + * Permission is hereby granted, free of charge, to any person obtaining a copy | |
| 8 | + * of this software and associated documentation files (the "Software"), to deal | |
| 9 | + * in the Software without restriction, including without limitation the rights | |
| 10 | + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
| 11 | + * copies of the Software, and to permit persons to whom the Software is | |
| 12 | + * furnished to do so, subject to the following conditions: | |
| 13 | + * | |
| 14 | + * The above copyright notice and this permission notice shall be included in | |
| 15 | + * all copies or substantial portions of the Software. | |
| 16 | + * | |
| 17 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
| 18 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
| 19 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
| 20 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
| 21 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
| 22 | + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
| 23 | + * THE SOFTWARE. | |
| 24 | + */ | |
| 25 | +#define TCG_TARGET_ARM 1 | |
| 26 | + | |
| 27 | +#define TCG_TARGET_REG_BITS 32 | |
| 28 | +#undef TCG_TARGET_WORDS_BIGENDIAN | |
| 29 | +#undef TCG_TARGET_HAS_div_i32 | |
| 30 | +#undef TCG_TARGET_HAS_div_i64 | |
| 31 | +#undef TCG_TARGET_HAS_bswap_i32 | |
| 32 | +#define TCG_TARGET_HAS_ext8s_i32 | |
| 33 | +#define TCG_TARGET_HAS_ext16s_i32 | |
| 34 | +#undef TCG_TARGET_STACK_GROWSUP | |
| 35 | + | |
| 36 | +enum { | |
| 37 | + TCG_REG_R0 = 0, | |
| 38 | + TCG_REG_R1, | |
| 39 | + TCG_REG_R2, | |
| 40 | + TCG_REG_R3, | |
| 41 | + TCG_REG_R4, | |
| 42 | + TCG_REG_R5, | |
| 43 | + TCG_REG_R6, | |
| 44 | + TCG_REG_R7, | |
| 45 | + TCG_REG_R8, | |
| 46 | + TCG_REG_R9, | |
| 47 | + TCG_REG_R10, | |
| 48 | + TCG_REG_R11, | |
| 49 | + TCG_REG_R12, | |
| 50 | + TCG_REG_R13, | |
| 51 | + TCG_REG_R14, | |
| 52 | + TCG_TARGET_NB_REGS | |
| 53 | +}; | |
| 54 | + | |
| 55 | +/* used for function call generation */ | |
| 56 | +#define TCG_REG_CALL_STACK TCG_REG_R13 | |
| 57 | +#define TCG_TARGET_STACK_ALIGN 8 | |
| 58 | + | |
| 59 | +enum { | |
| 60 | + /* Note: must be synced with dyngen-exec.h */ | |
| 61 | + TCG_AREG0 = TCG_REG_R7, | |
| 62 | + TCG_AREG1 = TCG_REG_R4, | |
| 63 | + TCG_AREG2 = TCG_REG_R5, | |
| 64 | + TCG_AREG3 = TCG_REG_R6, | |
| 65 | +}; | |
| 66 | + | |
| 67 | +static inline void flush_icache_range(unsigned long start, unsigned long stop) | |
| 68 | +{ | |
| 69 | + register unsigned long _beg __asm ("a1") = start; | |
| 70 | + register unsigned long _end __asm ("a2") = stop; | |
| 71 | + register unsigned long _flg __asm ("a3") = 0; | |
| 72 | + __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); | |
| 73 | +} | ... | ... |