Commit d2efb32fdc3367823765e97a017ef072f906a596
1 parent
752e628c
Replace thumb usage of cpu_T registers by proper register allocations.
The goal is eventually to get rid of all cpu_T register usage and to use just short-lived tmp/tmp2 registers. This patch converts all the places where cpu_T was used in the Thumb code and replaces it with explicit TCG register allocation. Signed-off-by: Filip Navara <filip.navara@gmail.com>
Showing
1 changed file
with
138 additions
and
129 deletions
target-arm/translate.c
| @@ -191,19 +191,11 @@ static void store_reg(DisasContext *s, int reg, TCGv var) | @@ -191,19 +191,11 @@ static void store_reg(DisasContext *s, int reg, TCGv var) | ||
| 191 | #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1]) | 191 | #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
| 192 | #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0]) | 192 | #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0]) |
| 193 | 193 | ||
| 194 | -#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1]) | ||
| 195 | -#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1]) | ||
| 196 | -#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1]) | ||
| 197 | -#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1]) | ||
| 198 | -#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0]) | ||
| 199 | - | ||
| 200 | #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1]) | 194 | #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
| 201 | #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1]) | 195 | #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
| 202 | #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1]) | 196 | #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1]) |
| 203 | #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0]) | 197 | #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0]) |
| 204 | #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1]) | 198 | #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1]) |
| 205 | -#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]); | ||
| 206 | -#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]); | ||
| 207 | 199 | ||
| 208 | #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im) | 200 | #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im) |
| 209 | #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im) | 201 | #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im) |
| @@ -339,17 +331,17 @@ static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b) | @@ -339,17 +331,17 @@ static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b) | ||
| 339 | } | 331 | } |
| 340 | 332 | ||
| 341 | /* Unsigned 32x32->64 multiply. */ | 333 | /* Unsigned 32x32->64 multiply. */ |
| 342 | -static void gen_op_mull_T0_T1(void) | 334 | +static void gen_mull(TCGv a, TCGv b) |
| 343 | { | 335 | { |
| 344 | TCGv_i64 tmp1 = tcg_temp_new_i64(); | 336 | TCGv_i64 tmp1 = tcg_temp_new_i64(); |
| 345 | TCGv_i64 tmp2 = tcg_temp_new_i64(); | 337 | TCGv_i64 tmp2 = tcg_temp_new_i64(); |
| 346 | 338 | ||
| 347 | - tcg_gen_extu_i32_i64(tmp1, cpu_T[0]); | ||
| 348 | - tcg_gen_extu_i32_i64(tmp2, cpu_T[1]); | 339 | + tcg_gen_extu_i32_i64(tmp1, a); |
| 340 | + tcg_gen_extu_i32_i64(tmp2, b); | ||
| 349 | tcg_gen_mul_i64(tmp1, tmp1, tmp2); | 341 | tcg_gen_mul_i64(tmp1, tmp1, tmp2); |
| 350 | - tcg_gen_trunc_i64_i32(cpu_T[0], tmp1); | 342 | + tcg_gen_trunc_i64_i32(a, tmp1); |
| 351 | tcg_gen_shri_i64(tmp1, tmp1, 32); | 343 | tcg_gen_shri_i64(tmp1, tmp1, 32); |
| 352 | - tcg_gen_trunc_i64_i32(cpu_T[1], tmp1); | 344 | + tcg_gen_trunc_i64_i32(b, tmp1); |
| 353 | } | 345 | } |
| 354 | 346 | ||
| 355 | /* Signed 32x32->64 multiply. */ | 347 | /* Signed 32x32->64 multiply. */ |
| @@ -415,12 +407,12 @@ static inline void gen_logic_CC(TCGv var) | @@ -415,12 +407,12 @@ static inline void gen_logic_CC(TCGv var) | ||
| 415 | } | 407 | } |
| 416 | 408 | ||
| 417 | /* T0 += T1 + CF. */ | 409 | /* T0 += T1 + CF. */ |
| 418 | -static void gen_adc_T0_T1(void) | 410 | +static void gen_adc(TCGv t0, TCGv t1) |
| 419 | { | 411 | { |
| 420 | TCGv tmp; | 412 | TCGv tmp; |
| 421 | - gen_op_addl_T0_T1(); | 413 | + tcg_gen_add_i32(t0, t0, t1); |
| 422 | tmp = load_cpu_field(CF); | 414 | tmp = load_cpu_field(CF); |
| 423 | - tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp); | 415 | + tcg_gen_add_i32(t0, t0, tmp); |
| 424 | dead_tmp(tmp); | 416 | dead_tmp(tmp); |
| 425 | } | 417 | } |
| 426 | 418 | ||
| @@ -445,9 +437,6 @@ static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1) | @@ -445,9 +437,6 @@ static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1) | ||
| 445 | dead_tmp(tmp); | 437 | dead_tmp(tmp); |
| 446 | } | 438 | } |
| 447 | 439 | ||
| 448 | -#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1]) | ||
| 449 | -#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0]) | ||
| 450 | - | ||
| 451 | /* T0 &= ~T1. Clobbers T1. */ | 440 | /* T0 &= ~T1. Clobbers T1. */ |
| 452 | /* FIXME: Implement bic natively. */ | 441 | /* FIXME: Implement bic natively. */ |
| 453 | static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1) | 442 | static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1) |
| @@ -7091,70 +7080,70 @@ thumb2_logic_op(int op) | @@ -7091,70 +7080,70 @@ thumb2_logic_op(int op) | ||
| 7091 | Returns zero if the opcode is valid. */ | 7080 | Returns zero if the opcode is valid. */ |
| 7092 | 7081 | ||
| 7093 | static int | 7082 | static int |
| 7094 | -gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out) | 7083 | +gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1) |
| 7095 | { | 7084 | { |
| 7096 | int logic_cc; | 7085 | int logic_cc; |
| 7097 | 7086 | ||
| 7098 | logic_cc = 0; | 7087 | logic_cc = 0; |
| 7099 | switch (op) { | 7088 | switch (op) { |
| 7100 | case 0: /* and */ | 7089 | case 0: /* and */ |
| 7101 | - gen_op_andl_T0_T1(); | 7090 | + tcg_gen_and_i32(t0, t0, t1); |
| 7102 | logic_cc = conds; | 7091 | logic_cc = conds; |
| 7103 | break; | 7092 | break; |
| 7104 | case 1: /* bic */ | 7093 | case 1: /* bic */ |
| 7105 | - gen_op_bicl_T0_T1(); | 7094 | + tcg_gen_bic_i32(t0, t0, t1); |
| 7106 | logic_cc = conds; | 7095 | logic_cc = conds; |
| 7107 | break; | 7096 | break; |
| 7108 | case 2: /* orr */ | 7097 | case 2: /* orr */ |
| 7109 | - gen_op_orl_T0_T1(); | 7098 | + tcg_gen_or_i32(t0, t0, t1); |
| 7110 | logic_cc = conds; | 7099 | logic_cc = conds; |
| 7111 | break; | 7100 | break; |
| 7112 | case 3: /* orn */ | 7101 | case 3: /* orn */ |
| 7113 | - gen_op_notl_T1(); | ||
| 7114 | - gen_op_orl_T0_T1(); | 7102 | + tcg_gen_not_i32(t1, t1); |
| 7103 | + tcg_gen_or_i32(t0, t0, t1); | ||
| 7115 | logic_cc = conds; | 7104 | logic_cc = conds; |
| 7116 | break; | 7105 | break; |
| 7117 | case 4: /* eor */ | 7106 | case 4: /* eor */ |
| 7118 | - gen_op_xorl_T0_T1(); | 7107 | + tcg_gen_xor_i32(t0, t0, t1); |
| 7119 | logic_cc = conds; | 7108 | logic_cc = conds; |
| 7120 | break; | 7109 | break; |
| 7121 | case 8: /* add */ | 7110 | case 8: /* add */ |
| 7122 | if (conds) | 7111 | if (conds) |
| 7123 | - gen_op_addl_T0_T1_cc(); | 7112 | + gen_helper_add_cc(t0, t0, t1); |
| 7124 | else | 7113 | else |
| 7125 | - gen_op_addl_T0_T1(); | 7114 | + tcg_gen_add_i32(t0, t0, t1); |
| 7126 | break; | 7115 | break; |
| 7127 | case 10: /* adc */ | 7116 | case 10: /* adc */ |
| 7128 | if (conds) | 7117 | if (conds) |
| 7129 | - gen_op_adcl_T0_T1_cc(); | 7118 | + gen_helper_adc_cc(t0, t0, t1); |
| 7130 | else | 7119 | else |
| 7131 | - gen_adc_T0_T1(); | 7120 | + gen_adc(t0, t1); |
| 7132 | break; | 7121 | break; |
| 7133 | case 11: /* sbc */ | 7122 | case 11: /* sbc */ |
| 7134 | if (conds) | 7123 | if (conds) |
| 7135 | - gen_op_sbcl_T0_T1_cc(); | 7124 | + gen_helper_sbc_cc(t0, t0, t1); |
| 7136 | else | 7125 | else |
| 7137 | - gen_sbc_T0_T1(); | 7126 | + gen_sub_carry(t0, t0, t1); |
| 7138 | break; | 7127 | break; |
| 7139 | case 13: /* sub */ | 7128 | case 13: /* sub */ |
| 7140 | if (conds) | 7129 | if (conds) |
| 7141 | - gen_op_subl_T0_T1_cc(); | 7130 | + gen_helper_sub_cc(t0, t0, t1); |
| 7142 | else | 7131 | else |
| 7143 | - gen_op_subl_T0_T1(); | 7132 | + tcg_gen_sub_i32(t0, t0, t1); |
| 7144 | break; | 7133 | break; |
| 7145 | case 14: /* rsb */ | 7134 | case 14: /* rsb */ |
| 7146 | if (conds) | 7135 | if (conds) |
| 7147 | - gen_op_rsbl_T0_T1_cc(); | 7136 | + gen_helper_sub_cc(t0, t1, t0); |
| 7148 | else | 7137 | else |
| 7149 | - gen_op_rsbl_T0_T1(); | 7138 | + tcg_gen_sub_i32(t0, t1, t0); |
| 7150 | break; | 7139 | break; |
| 7151 | default: /* 5, 6, 7, 9, 12, 15. */ | 7140 | default: /* 5, 6, 7, 9, 12, 15. */ |
| 7152 | return 1; | 7141 | return 1; |
| 7153 | } | 7142 | } |
| 7154 | if (logic_cc) { | 7143 | if (logic_cc) { |
| 7155 | - gen_op_logic_T0_cc(); | 7144 | + gen_logic_CC(t0); |
| 7156 | if (shifter_out) | 7145 | if (shifter_out) |
| 7157 | - gen_set_CF_bit31(cpu_T[1]); | 7146 | + gen_set_CF_bit31(t1); |
| 7158 | } | 7147 | } |
| 7159 | return 0; | 7148 | return 0; |
| 7160 | } | 7149 | } |
| @@ -7211,8 +7200,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) | @@ -7211,8 +7200,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) | ||
| 7211 | 16-bit instructions in case the second half causes an | 7200 | 16-bit instructions in case the second half causes an |
| 7212 | prefetch abort. */ | 7201 | prefetch abort. */ |
| 7213 | offset = ((int32_t)insn << 21) >> 9; | 7202 | offset = ((int32_t)insn << 21) >> 9; |
| 7214 | - gen_op_movl_T0_im(s->pc + 2 + offset); | ||
| 7215 | - gen_movl_reg_T0(s, 14); | 7203 | + tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset); |
| 7216 | return 0; | 7204 | return 0; |
| 7217 | } | 7205 | } |
| 7218 | /* Fall through to 32-bit decode. */ | 7206 | /* Fall through to 32-bit decode. */ |
| @@ -7492,7 +7480,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) | @@ -7492,7 +7480,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) | ||
| 7492 | conds = (insn & (1 << 20)) != 0; | 7480 | conds = (insn & (1 << 20)) != 0; |
| 7493 | logic_cc = (conds && thumb2_logic_op(op)); | 7481 | logic_cc = (conds && thumb2_logic_op(op)); |
| 7494 | gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc); | 7482 | gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc); |
| 7495 | - if (gen_thumb2_data_op(s, op, conds, 0)) | 7483 | + if (gen_thumb2_data_op(s, op, conds, 0, cpu_T[0], cpu_T[1])) |
| 7496 | goto illegal_op; | 7484 | goto illegal_op; |
| 7497 | if (rd != 15) | 7485 | if (rd != 15) |
| 7498 | gen_movl_reg_T0(s, rd); | 7486 | gen_movl_reg_T0(s, rd); |
| @@ -8055,7 +8043,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) | @@ -8055,7 +8043,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) | ||
| 8055 | gen_movl_T0_reg(s, rn); | 8043 | gen_movl_T0_reg(s, rn); |
| 8056 | op = (insn >> 21) & 0xf; | 8044 | op = (insn >> 21) & 0xf; |
| 8057 | if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0, | 8045 | if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0, |
| 8058 | - shifter_out)) | 8046 | + shifter_out, cpu_T[0], cpu_T[1])) |
| 8059 | goto illegal_op; | 8047 | goto illegal_op; |
| 8060 | rd = (insn >> 8) & 0xf; | 8048 | rd = (insn >> 8) & 0xf; |
| 8061 | if (rd != 15) { | 8049 | if (rd != 15) { |
| @@ -8204,32 +8192,35 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) | @@ -8204,32 +8192,35 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) | ||
| 8204 | 8192 | ||
| 8205 | switch (insn >> 12) { | 8193 | switch (insn >> 12) { |
| 8206 | case 0: case 1: | 8194 | case 0: case 1: |
| 8195 | + | ||
| 8207 | rd = insn & 7; | 8196 | rd = insn & 7; |
| 8208 | op = (insn >> 11) & 3; | 8197 | op = (insn >> 11) & 3; |
| 8209 | if (op == 3) { | 8198 | if (op == 3) { |
| 8210 | /* add/subtract */ | 8199 | /* add/subtract */ |
| 8211 | rn = (insn >> 3) & 7; | 8200 | rn = (insn >> 3) & 7; |
| 8212 | - gen_movl_T0_reg(s, rn); | 8201 | + tmp = load_reg(s, rn); |
| 8213 | if (insn & (1 << 10)) { | 8202 | if (insn & (1 << 10)) { |
| 8214 | /* immediate */ | 8203 | /* immediate */ |
| 8215 | - gen_op_movl_T1_im((insn >> 6) & 7); | 8204 | + tmp2 = new_tmp(); |
| 8205 | + tcg_gen_movi_i32(tmp2, (insn >> 6) & 7); | ||
| 8216 | } else { | 8206 | } else { |
| 8217 | /* reg */ | 8207 | /* reg */ |
| 8218 | rm = (insn >> 6) & 7; | 8208 | rm = (insn >> 6) & 7; |
| 8219 | - gen_movl_T1_reg(s, rm); | 8209 | + tmp2 = load_reg(s, rm); |
| 8220 | } | 8210 | } |
| 8221 | if (insn & (1 << 9)) { | 8211 | if (insn & (1 << 9)) { |
| 8222 | if (s->condexec_mask) | 8212 | if (s->condexec_mask) |
| 8223 | - gen_op_subl_T0_T1(); | 8213 | + tcg_gen_sub_i32(tmp, tmp, tmp2); |
| 8224 | else | 8214 | else |
| 8225 | - gen_op_subl_T0_T1_cc(); | 8215 | + gen_helper_sub_cc(tmp, tmp, tmp2); |
| 8226 | } else { | 8216 | } else { |
| 8227 | if (s->condexec_mask) | 8217 | if (s->condexec_mask) |
| 8228 | - gen_op_addl_T0_T1(); | 8218 | + tcg_gen_add_i32(tmp, tmp, tmp2); |
| 8229 | else | 8219 | else |
| 8230 | - gen_op_addl_T0_T1_cc(); | 8220 | + gen_helper_add_cc(tmp, tmp, tmp2); |
| 8231 | } | 8221 | } |
| 8232 | - gen_movl_reg_T0(s, rd); | 8222 | + dead_tmp(tmp2); |
| 8223 | + store_reg(s, rd, tmp); | ||
| 8233 | } else { | 8224 | } else { |
| 8234 | /* shift immediate */ | 8225 | /* shift immediate */ |
| 8235 | rm = (insn >> 3) & 7; | 8226 | rm = (insn >> 3) & 7; |
| @@ -8245,35 +8236,40 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) | @@ -8245,35 +8236,40 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) | ||
| 8245 | /* arithmetic large immediate */ | 8236 | /* arithmetic large immediate */ |
| 8246 | op = (insn >> 11) & 3; | 8237 | op = (insn >> 11) & 3; |
| 8247 | rd = (insn >> 8) & 0x7; | 8238 | rd = (insn >> 8) & 0x7; |
| 8248 | - if (op == 0) { | ||
| 8249 | - gen_op_movl_T0_im(insn & 0xff); | ||
| 8250 | - } else { | ||
| 8251 | - gen_movl_T0_reg(s, rd); | ||
| 8252 | - gen_op_movl_T1_im(insn & 0xff); | ||
| 8253 | - } | ||
| 8254 | - switch (op) { | ||
| 8255 | - case 0: /* mov */ | 8239 | + if (op == 0) { /* mov */ |
| 8240 | + tmp = new_tmp(); | ||
| 8241 | + tcg_gen_movi_i32(tmp, insn & 0xff); | ||
| 8256 | if (!s->condexec_mask) | 8242 | if (!s->condexec_mask) |
| 8257 | - gen_op_logic_T0_cc(); | ||
| 8258 | - break; | ||
| 8259 | - case 1: /* cmp */ | ||
| 8260 | - gen_op_subl_T0_T1_cc(); | ||
| 8261 | - break; | ||
| 8262 | - case 2: /* add */ | ||
| 8263 | - if (s->condexec_mask) | ||
| 8264 | - gen_op_addl_T0_T1(); | ||
| 8265 | - else | ||
| 8266 | - gen_op_addl_T0_T1_cc(); | ||
| 8267 | - break; | ||
| 8268 | - case 3: /* sub */ | ||
| 8269 | - if (s->condexec_mask) | ||
| 8270 | - gen_op_subl_T0_T1(); | ||
| 8271 | - else | ||
| 8272 | - gen_op_subl_T0_T1_cc(); | ||
| 8273 | - break; | 8243 | + gen_logic_CC(tmp); |
| 8244 | + store_reg(s, rd, tmp); | ||
| 8245 | + } else { | ||
| 8246 | + tmp = load_reg(s, rd); | ||
| 8247 | + tmp2 = new_tmp(); | ||
| 8248 | + tcg_gen_movi_i32(tmp2, insn & 0xff); | ||
| 8249 | + switch (op) { | ||
| 8250 | + case 1: /* cmp */ | ||
| 8251 | + gen_helper_sub_cc(tmp, tmp, tmp2); | ||
| 8252 | + dead_tmp(tmp); | ||
| 8253 | + dead_tmp(tmp2); | ||
| 8254 | + break; | ||
| 8255 | + case 2: /* add */ | ||
| 8256 | + if (s->condexec_mask) | ||
| 8257 | + tcg_gen_add_i32(tmp, tmp, tmp2); | ||
| 8258 | + else | ||
| 8259 | + gen_helper_add_cc(tmp, tmp, tmp2); | ||
| 8260 | + dead_tmp(tmp2); | ||
| 8261 | + store_reg(s, rd, tmp); | ||
| 8262 | + break; | ||
| 8263 | + case 3: /* sub */ | ||
| 8264 | + if (s->condexec_mask) | ||
| 8265 | + tcg_gen_sub_i32(tmp, tmp, tmp2); | ||
| 8266 | + else | ||
| 8267 | + gen_helper_sub_cc(tmp, tmp, tmp2); | ||
| 8268 | + dead_tmp(tmp2); | ||
| 8269 | + store_reg(s, rd, tmp); | ||
| 8270 | + break; | ||
| 8271 | + } | ||
| 8274 | } | 8272 | } |
| 8275 | - if (op != 1) | ||
| 8276 | - gen_movl_reg_T0(s, rd); | ||
| 8277 | break; | 8273 | break; |
| 8278 | case 4: | 8274 | case 4: |
| 8279 | if (insn & (1 << 11)) { | 8275 | if (insn & (1 << 11)) { |
| @@ -8295,19 +8291,22 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) | @@ -8295,19 +8291,22 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) | ||
| 8295 | op = (insn >> 8) & 3; | 8291 | op = (insn >> 8) & 3; |
| 8296 | switch (op) { | 8292 | switch (op) { |
| 8297 | case 0: /* add */ | 8293 | case 0: /* add */ |
| 8298 | - gen_movl_T0_reg(s, rd); | ||
| 8299 | - gen_movl_T1_reg(s, rm); | ||
| 8300 | - gen_op_addl_T0_T1(); | ||
| 8301 | - gen_movl_reg_T0(s, rd); | 8294 | + tmp = load_reg(s, rd); |
| 8295 | + tmp2 = load_reg(s, rm); | ||
| 8296 | + tcg_gen_add_i32(tmp, tmp, tmp2); | ||
| 8297 | + dead_tmp(tmp2); | ||
| 8298 | + store_reg(s, rd, tmp); | ||
| 8302 | break; | 8299 | break; |
| 8303 | case 1: /* cmp */ | 8300 | case 1: /* cmp */ |
| 8304 | - gen_movl_T0_reg(s, rd); | ||
| 8305 | - gen_movl_T1_reg(s, rm); | ||
| 8306 | - gen_op_subl_T0_T1_cc(); | 8301 | + tmp = load_reg(s, rd); |
| 8302 | + tmp2 = load_reg(s, rm); | ||
| 8303 | + gen_helper_sub_cc(tmp, tmp, tmp2); | ||
| 8304 | + dead_tmp(tmp2); | ||
| 8305 | + dead_tmp(tmp); | ||
| 8307 | break; | 8306 | break; |
| 8308 | case 2: /* mov/cpy */ | 8307 | case 2: /* mov/cpy */ |
| 8309 | - gen_movl_T0_reg(s, rm); | ||
| 8310 | - gen_movl_reg_T0(s, rd); | 8308 | + tmp = load_reg(s, rm); |
| 8309 | + store_reg(s, rd, tmp); | ||
| 8311 | break; | 8310 | break; |
| 8312 | case 3:/* branch [and link] exchange thumb register */ | 8311 | case 3:/* branch [and link] exchange thumb register */ |
| 8313 | tmp = load_reg(s, rm); | 8312 | tmp = load_reg(s, rm); |
| @@ -8338,114 +8337,124 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) | @@ -8338,114 +8337,124 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) | ||
| 8338 | val = 0; | 8337 | val = 0; |
| 8339 | } | 8338 | } |
| 8340 | 8339 | ||
| 8341 | - if (op == 9) /* neg */ | ||
| 8342 | - gen_op_movl_T0_im(0); | ||
| 8343 | - else if (op != 0xf) /* mvn doesn't read its first operand */ | ||
| 8344 | - gen_movl_T0_reg(s, rd); | 8340 | + if (op == 9) { /* neg */ |
| 8341 | + tmp = new_tmp(); | ||
| 8342 | + tcg_gen_movi_i32(tmp, 0); | ||
| 8343 | + } else if (op != 0xf) { /* mvn doesn't read its first operand */ | ||
| 8344 | + tmp = load_reg(s, rd); | ||
| 8345 | + } else { | ||
| 8346 | + TCGV_UNUSED(tmp); | ||
| 8347 | + } | ||
| 8345 | 8348 | ||
| 8346 | - gen_movl_T1_reg(s, rm); | 8349 | + tmp2 = load_reg(s, rm); |
| 8347 | switch (op) { | 8350 | switch (op) { |
| 8348 | case 0x0: /* and */ | 8351 | case 0x0: /* and */ |
| 8349 | - gen_op_andl_T0_T1(); | 8352 | + tcg_gen_and_i32(tmp, tmp, tmp2); |
| 8350 | if (!s->condexec_mask) | 8353 | if (!s->condexec_mask) |
| 8351 | - gen_op_logic_T0_cc(); | 8354 | + gen_logic_CC(tmp); |
| 8352 | break; | 8355 | break; |
| 8353 | case 0x1: /* eor */ | 8356 | case 0x1: /* eor */ |
| 8354 | - gen_op_xorl_T0_T1(); | 8357 | + tcg_gen_xor_i32(tmp, tmp, tmp2); |
| 8355 | if (!s->condexec_mask) | 8358 | if (!s->condexec_mask) |
| 8356 | - gen_op_logic_T0_cc(); | 8359 | + gen_logic_CC(tmp); |
| 8357 | break; | 8360 | break; |
| 8358 | case 0x2: /* lsl */ | 8361 | case 0x2: /* lsl */ |
| 8359 | if (s->condexec_mask) { | 8362 | if (s->condexec_mask) { |
| 8360 | - gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]); | 8363 | + gen_helper_shl(tmp2, tmp2, tmp); |
| 8361 | } else { | 8364 | } else { |
| 8362 | - gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]); | ||
| 8363 | - gen_op_logic_T1_cc(); | 8365 | + gen_helper_shl_cc(tmp2, tmp2, tmp); |
| 8366 | + gen_logic_CC(tmp2); | ||
| 8364 | } | 8367 | } |
| 8365 | break; | 8368 | break; |
| 8366 | case 0x3: /* lsr */ | 8369 | case 0x3: /* lsr */ |
| 8367 | if (s->condexec_mask) { | 8370 | if (s->condexec_mask) { |
| 8368 | - gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]); | 8371 | + gen_helper_shr(tmp2, tmp2, tmp); |
| 8369 | } else { | 8372 | } else { |
| 8370 | - gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]); | ||
| 8371 | - gen_op_logic_T1_cc(); | 8373 | + gen_helper_shr_cc(tmp2, tmp2, tmp); |
| 8374 | + gen_logic_CC(tmp2); | ||
| 8372 | } | 8375 | } |
| 8373 | break; | 8376 | break; |
| 8374 | case 0x4: /* asr */ | 8377 | case 0x4: /* asr */ |
| 8375 | if (s->condexec_mask) { | 8378 | if (s->condexec_mask) { |
| 8376 | - gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]); | 8379 | + gen_helper_sar(tmp2, tmp2, tmp); |
| 8377 | } else { | 8380 | } else { |
| 8378 | - gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]); | ||
| 8379 | - gen_op_logic_T1_cc(); | 8381 | + gen_helper_sar_cc(tmp2, tmp2, tmp); |
| 8382 | + gen_logic_CC(tmp2); | ||
| 8380 | } | 8383 | } |
| 8381 | break; | 8384 | break; |
| 8382 | case 0x5: /* adc */ | 8385 | case 0x5: /* adc */ |
| 8383 | if (s->condexec_mask) | 8386 | if (s->condexec_mask) |
| 8384 | - gen_adc_T0_T1(); | 8387 | + gen_adc(tmp, tmp2); |
| 8385 | else | 8388 | else |
| 8386 | - gen_op_adcl_T0_T1_cc(); | 8389 | + gen_helper_adc_cc(tmp, tmp, tmp2); |
| 8387 | break; | 8390 | break; |
| 8388 | case 0x6: /* sbc */ | 8391 | case 0x6: /* sbc */ |
| 8389 | if (s->condexec_mask) | 8392 | if (s->condexec_mask) |
| 8390 | - gen_sbc_T0_T1(); | 8393 | + gen_sub_carry(tmp, tmp, tmp2); |
| 8391 | else | 8394 | else |
| 8392 | - gen_op_sbcl_T0_T1_cc(); | 8395 | + gen_helper_sbc_cc(tmp, tmp, tmp2); |
| 8393 | break; | 8396 | break; |
| 8394 | case 0x7: /* ror */ | 8397 | case 0x7: /* ror */ |
| 8395 | if (s->condexec_mask) { | 8398 | if (s->condexec_mask) { |
| 8396 | - gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]); | 8399 | + gen_helper_ror(tmp2, tmp2, tmp); |
| 8397 | } else { | 8400 | } else { |
| 8398 | - gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]); | ||
| 8399 | - gen_op_logic_T1_cc(); | 8401 | + gen_helper_ror_cc(tmp2, tmp2, tmp); |
| 8402 | + gen_logic_CC(tmp2); | ||
| 8400 | } | 8403 | } |
| 8401 | break; | 8404 | break; |
| 8402 | case 0x8: /* tst */ | 8405 | case 0x8: /* tst */ |
| 8403 | - gen_op_andl_T0_T1(); | ||
| 8404 | - gen_op_logic_T0_cc(); | 8406 | + tcg_gen_and_i32(tmp, tmp, tmp2); |
| 8407 | + gen_logic_CC(tmp); | ||
| 8405 | rd = 16; | 8408 | rd = 16; |
| 8406 | break; | 8409 | break; |
| 8407 | case 0x9: /* neg */ | 8410 | case 0x9: /* neg */ |
| 8408 | if (s->condexec_mask) | 8411 | if (s->condexec_mask) |
| 8409 | - tcg_gen_neg_i32(cpu_T[0], cpu_T[1]); | 8412 | + tcg_gen_neg_i32(tmp, tmp2); |
| 8410 | else | 8413 | else |
| 8411 | - gen_op_subl_T0_T1_cc(); | 8414 | + gen_helper_sub_cc(tmp, tmp, tmp2); |
| 8412 | break; | 8415 | break; |
| 8413 | case 0xa: /* cmp */ | 8416 | case 0xa: /* cmp */ |
| 8414 | - gen_op_subl_T0_T1_cc(); | 8417 | + gen_helper_sub_cc(tmp, tmp, tmp2); |
| 8415 | rd = 16; | 8418 | rd = 16; |
| 8416 | break; | 8419 | break; |
| 8417 | case 0xb: /* cmn */ | 8420 | case 0xb: /* cmn */ |
| 8418 | - gen_op_addl_T0_T1_cc(); | 8421 | + gen_helper_add_cc(tmp, tmp, tmp2); |
| 8419 | rd = 16; | 8422 | rd = 16; |
| 8420 | break; | 8423 | break; |
| 8421 | case 0xc: /* orr */ | 8424 | case 0xc: /* orr */ |
| 8422 | - gen_op_orl_T0_T1(); | 8425 | + tcg_gen_or_i32(tmp, tmp, tmp2); |
| 8423 | if (!s->condexec_mask) | 8426 | if (!s->condexec_mask) |
| 8424 | - gen_op_logic_T0_cc(); | 8427 | + gen_logic_CC(tmp); |
| 8425 | break; | 8428 | break; |
| 8426 | case 0xd: /* mul */ | 8429 | case 0xd: /* mul */ |
| 8427 | - gen_op_mull_T0_T1(); | 8430 | + gen_mull(tmp, tmp2); |
| 8428 | if (!s->condexec_mask) | 8431 | if (!s->condexec_mask) |
| 8429 | - gen_op_logic_T0_cc(); | 8432 | + gen_logic_CC(tmp); |
| 8430 | break; | 8433 | break; |
| 8431 | case 0xe: /* bic */ | 8434 | case 0xe: /* bic */ |
| 8432 | - gen_op_bicl_T0_T1(); | 8435 | + tcg_gen_bic_i32(tmp, tmp, tmp2); |
| 8433 | if (!s->condexec_mask) | 8436 | if (!s->condexec_mask) |
| 8434 | - gen_op_logic_T0_cc(); | 8437 | + gen_logic_CC(tmp); |
| 8435 | break; | 8438 | break; |
| 8436 | case 0xf: /* mvn */ | 8439 | case 0xf: /* mvn */ |
| 8437 | - gen_op_notl_T1(); | 8440 | + tcg_gen_not_i32(tmp2, tmp2); |
| 8438 | if (!s->condexec_mask) | 8441 | if (!s->condexec_mask) |
| 8439 | - gen_op_logic_T1_cc(); | 8442 | + gen_logic_CC(tmp2); |
| 8440 | val = 1; | 8443 | val = 1; |
| 8441 | rm = rd; | 8444 | rm = rd; |
| 8442 | break; | 8445 | break; |
| 8443 | } | 8446 | } |
| 8444 | if (rd != 16) { | 8447 | if (rd != 16) { |
| 8445 | - if (val) | ||
| 8446 | - gen_movl_reg_T1(s, rm); | ||
| 8447 | - else | ||
| 8448 | - gen_movl_reg_T0(s, rd); | 8448 | + if (val) { |
| 8449 | + store_reg(s, rm, tmp2); | ||
| 8450 | + dead_tmp(tmp); | ||
| 8451 | + } else { | ||
| 8452 | + store_reg(s, rd, tmp); | ||
| 8453 | + dead_tmp(tmp2); | ||
| 8454 | + } | ||
| 8455 | + } else { | ||
| 8456 | + dead_tmp(tmp); | ||
| 8457 | + dead_tmp(tmp2); | ||
| 8449 | } | 8458 | } |
| 8450 | break; | 8459 | break; |
| 8451 | 8460 |