Commit 8f01245ee768775d5fa4f1dd0930e7062eb5dc27

Authored by pbrook
1 parent 1497c961

ARM TCG conversion 5/16.

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4142 c046a42c-6fe2-441c-8c8c-71466251a162
target-arm/op.c
@@ -222,29 +222,6 @@ void OPPROTO op_movl_cpsr_T0(void) @@ -222,29 +222,6 @@ void OPPROTO op_movl_cpsr_T0(void)
222 FORCE_RET(); 222 FORCE_RET();
223 } 223 }
224 224
225 -void OPPROTO op_mul_T0_T1(void)  
226 -{  
227 - T0 = T0 * T1;  
228 -}  
229 -  
230 -/* 64 bit unsigned mul */  
231 -void OPPROTO op_mull_T0_T1(void)  
232 -{  
233 - uint64_t res;  
234 - res = (uint64_t)T0 * (uint64_t)T1;  
235 - T1 = res >> 32;  
236 - T0 = res;  
237 -}  
238 -  
239 -/* 64 bit signed mul */  
240 -void OPPROTO op_imull_T0_T1(void)  
241 -{  
242 - uint64_t res;  
243 - res = (int64_t)((int32_t)T0) * (int64_t)((int32_t)T1);  
244 - T1 = res >> 32;  
245 - T0 = res;  
246 -}  
247 -  
248 /* 48 bit signed mul, top 32 bits */ 225 /* 48 bit signed mul, top 32 bits */
249 void OPPROTO op_imulw_T0_T1(void) 226 void OPPROTO op_imulw_T0_T1(void)
250 { 227 {
@@ -1058,18 +1035,6 @@ void OPPROTO op_pkhbt_T0_T1(void) @@ -1058,18 +1035,6 @@ void OPPROTO op_pkhbt_T0_T1(void)
1058 { 1035 {
1059 T0 = (T0 & 0xffff) | (T1 & 0xffff0000); 1036 T0 = (T0 & 0xffff) | (T1 & 0xffff0000);
1060 } 1037 }
1061 -void OPPROTO op_rev_T0(void)  
1062 -{  
1063 - T0 = ((T0 & 0xff000000) >> 24)  
1064 - | ((T0 & 0x00ff0000) >> 8)  
1065 - | ((T0 & 0x0000ff00) << 8)  
1066 - | ((T0 & 0x000000ff) << 24);  
1067 -}  
1068 -  
1069 -void OPPROTO op_revh_T0(void)  
1070 -{  
1071 - T0 = (T0 >> 16) | (T0 << 16);  
1072 -}  
1073 1038
1074 void OPPROTO op_rev16_T0(void) 1039 void OPPROTO op_rev16_T0(void)
1075 { 1040 {
@@ -1099,13 +1064,6 @@ void OPPROTO op_rbit_T0(void) @@ -1099,13 +1064,6 @@ void OPPROTO op_rbit_T0(void)
1099 | ((T0 & 0x11111111) << 3); 1064 | ((T0 & 0x11111111) << 3);
1100 } 1065 }
1101 1066
1102 -/* Swap low and high halfwords. */  
1103 -void OPPROTO op_swap_half_T1(void)  
1104 -{  
1105 - T1 = (T1 >> 16) | (T1 << 16);  
1106 - FORCE_RET();  
1107 -}  
1108 -  
1109 /* Dual 16-bit signed multiply. */ 1067 /* Dual 16-bit signed multiply. */
1110 void OPPROTO op_mul_dual_T0_T1(void) 1068 void OPPROTO op_mul_dual_T0_T1(void)
1111 { 1069 {
@@ -1267,22 +1225,6 @@ void OPPROTO op_sbfx_T1(void) @@ -1267,22 +1225,6 @@ void OPPROTO op_sbfx_T1(void)
1267 T1 = val >> (32 - width); 1225 T1 = val >> (32 - width);
1268 } 1226 }
1269 1227
1270 -void OPPROTO op_movtop_T0_im(void)  
1271 -{  
1272 - T0 = (T0 & 0xffff) | PARAM1;  
1273 -}  
1274 -  
1275 -/* Used by table branch instructions. */  
1276 -void OPPROTO op_jmp_T0_im(void)  
1277 -{  
1278 - env->regs[15] = PARAM1 + (T0 << 1);  
1279 -}  
1280 -  
1281 -void OPPROTO op_set_condexec(void)  
1282 -{  
1283 - env->condexec_bits = PARAM1;  
1284 -}  
1285 -  
1286 void OPPROTO op_sdivl_T0_T1(void) 1228 void OPPROTO op_sdivl_T0_T1(void)
1287 { 1229 {
1288 int32_t num; 1230 int32_t num;
target-arm/translate.c
@@ -204,6 +204,9 @@ static void store_reg(DisasContext *s, int reg, TCGv var) @@ -204,6 +204,9 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
204 204
205 #define gen_sxtb16(var) gen_helper_sxtb16(var, var) 205 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
206 #define gen_uxtb16(var) gen_helper_uxtb16(var, var) 206 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
  207 +#define gen_op_rev_T0() tcg_gen_bswap_i32(cpu_T[0], cpu_T[0])
  208 +
  209 +#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
207 210
208 #define gen_op_addl_T0_T1_setq() \ 211 #define gen_op_addl_T0_T1_setq() \
209 gen_helper_add_setq(cpu_T[0], cpu_T[0], cpu_T[1]) 212 gen_helper_add_setq(cpu_T[0], cpu_T[0], cpu_T[1])
@@ -216,6 +219,45 @@ static void store_reg(DisasContext *s, int reg, TCGv var) @@ -216,6 +219,45 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
216 #define gen_op_subl_T0_T1_usaturate() \ 219 #define gen_op_subl_T0_T1_usaturate() \
217 gen_helper_sub_usaturate(cpu_T[0], cpu_T[0], cpu_T[1]) 220 gen_helper_sub_usaturate(cpu_T[0], cpu_T[0], cpu_T[1])
218 221
  222 +/* FIXME: Most targets have native widening multiplication.
  223 + It would be good to use that instead of a full wide multiply. */
  224 +/* Unsigned 32x32->64 multiply. */
  225 +static void gen_op_mull_T0_T1(void)
  226 +{
  227 + TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
  228 + TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
  229 +
  230 + tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
  231 + tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
  232 + tcg_gen_mul_i64(tmp1, tmp1, tmp2);
  233 + tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
  234 + tcg_gen_shri_i64(tmp1, tmp1, 32);
  235 + tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
  236 +}
  237 +
  238 +/* Signed 32x32->64 multiply. */
  239 +static void gen_op_imull_T0_T1(void)
  240 +{
  241 + TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
  242 + TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
  243 +
  244 + tcg_gen_ext_i32_i64(tmp1, cpu_T[0]);
  245 + tcg_gen_ext_i32_i64(tmp2, cpu_T[1]);
  246 + tcg_gen_mul_i64(tmp1, tmp1, tmp2);
  247 + tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
  248 + tcg_gen_shri_i64(tmp1, tmp1, 32);
  249 + tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
  250 +}
  251 +
  252 +/* Swap low and high halfwords. */
  253 +static void gen_swap_half(TCGv var)
  254 +{
  255 + TCGv tmp = new_tmp();
  256 + tcg_gen_shri_i32(tmp, var, 16);
  257 + tcg_gen_shli_i32(var, var, 16);
  258 + tcg_gen_or_i32(var, var, tmp);
  259 +}
  260 +
219 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead. 261 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
220 tmp = (t0 ^ t1) & 0x8000; 262 tmp = (t0 ^ t1) & 0x8000;
221 t0 &= ~0x8000; 263 t0 &= ~0x8000;
@@ -2652,7 +2694,11 @@ static inline void @@ -2652,7 +2694,11 @@ static inline void
2652 gen_set_condexec (DisasContext *s) 2694 gen_set_condexec (DisasContext *s)
2653 { 2695 {
2654 if (s->condexec_mask) { 2696 if (s->condexec_mask) {
2655 - gen_op_set_condexec((s->condexec_cond << 4) | (s->condexec_mask >> 1)); 2697 + uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
  2698 + TCGv tmp = new_tmp();
  2699 + tcg_gen_movi_i32(tmp, val);
  2700 + tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, condexec_bits));
  2701 + dead_tmp(tmp);
2656 } 2702 }
2657 } 2703 }
2658 2704
@@ -4314,7 +4360,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) @@ -4314,7 +4360,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4314 NEON_GET_REG(T1, rm, pass * 2 + 1); 4360 NEON_GET_REG(T1, rm, pass * 2 + 1);
4315 switch (size) { 4361 switch (size) {
4316 case 0: gen_op_rev_T0(); break; 4362 case 0: gen_op_rev_T0(); break;
4317 - case 1: gen_op_revh_T0(); break; 4363 + case 1: gen_swap_half(cpu_T[0]); break;
4318 case 2: /* no-op */ break; 4364 case 2: /* no-op */ break;
4319 default: abort(); 4365 default: abort();
4320 } 4366 }
@@ -4325,7 +4371,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) @@ -4325,7 +4371,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4325 gen_op_movl_T0_T1(); 4371 gen_op_movl_T0_T1();
4326 switch (size) { 4372 switch (size) {
4327 case 0: gen_op_rev_T0(); break; 4373 case 0: gen_op_rev_T0(); break;
4328 - case 1: gen_op_revh_T0(); break; 4374 + case 1: gen_swap_half(cpu_T[0]); break;
4329 default: abort(); 4375 default: abort();
4330 } 4376 }
4331 NEON_SET_REG(T0, rd, pass * 2); 4377 NEON_SET_REG(T0, rd, pass * 2);
@@ -4494,7 +4540,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) @@ -4494,7 +4540,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4494 case 1: /* VREV32 */ 4540 case 1: /* VREV32 */
4495 switch (size) { 4541 switch (size) {
4496 case 0: gen_op_rev_T0(); break; 4542 case 0: gen_op_rev_T0(); break;
4497 - case 1: gen_op_revh_T0(); break; 4543 + case 1: gen_swap_half(cpu_T[0]); break;
4498 default: return 1; 4544 default: return 1;
4499 } 4545 }
4500 break; 4546 break;
@@ -5574,7 +5620,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s) @@ -5574,7 +5620,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5574 gen_movl_reg_T0(s, rd); 5620 gen_movl_reg_T0(s, rd);
5575 } else { 5621 } else {
5576 if (insn & (1 << 5)) 5622 if (insn & (1 << 5))
5577 - gen_op_swap_half_T1(); 5623 + gen_swap_half(cpu_T[1]);
5578 gen_op_mul_dual_T0_T1(); 5624 gen_op_mul_dual_T0_T1();
5579 if (insn & (1 << 22)) { 5625 if (insn & (1 << 22)) {
5580 if (insn & (1 << 6)) { 5626 if (insn & (1 << 6)) {
@@ -6104,8 +6150,9 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) @@ -6104,8 +6150,9 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6104 dead_tmp(tmp); 6150 dead_tmp(tmp);
6105 gen_ldst(ldub, s); 6151 gen_ldst(ldub, s);
6106 } 6152 }
6107 - gen_op_jmp_T0_im(s->pc);  
6108 - s->is_jmp = DISAS_JUMP; 6153 + tcg_gen_shli_i32(cpu_T[0], cpu_T[0], 1);
  6154 + tcg_gen_addi_i32(cpu_T[0], cpu_T[0], s->pc);
  6155 + gen_movl_reg_T0(s, 15);
6109 } else { 6156 } else {
6110 /* Load/store exclusive byte/halfword/doubleword. */ 6157 /* Load/store exclusive byte/halfword/doubleword. */
6111 op = (insn >> 4) & 0x3; 6158 op = (insn >> 4) & 0x3;
@@ -6385,7 +6432,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) @@ -6385,7 +6432,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6385 case 2: /* Dual multiply add. */ 6432 case 2: /* Dual multiply add. */
6386 case 4: /* Dual multiply subtract. */ 6433 case 4: /* Dual multiply subtract. */
6387 if (op) 6434 if (op)
6388 - gen_op_swap_half_T1(); 6435 + gen_swap_half(cpu_T[1]);
6389 gen_op_mul_dual_T0_T1(); 6436 gen_op_mul_dual_T0_T1();
6390 /* This addition cannot overflow. */ 6437 /* This addition cannot overflow. */
6391 if (insn & (1 << 22)) { 6438 if (insn & (1 << 22)) {
@@ -6455,7 +6502,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) @@ -6455,7 +6502,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6455 } else if ((op & 0xe) == 0xc) { 6502 } else if ((op & 0xe) == 0xc) {
6456 /* Dual multiply accumulate long. */ 6503 /* Dual multiply accumulate long. */
6457 if (op & 1) 6504 if (op & 1)
6458 - gen_op_swap_half_T1(); 6505 + gen_swap_half(cpu_T[1]);
6459 gen_op_mul_dual_T0_T1(); 6506 gen_op_mul_dual_T0_T1();
6460 if (op & 0x10) { 6507 if (op & 0x10) {
6461 gen_op_subl_T0_T1(); 6508 gen_op_subl_T0_T1();
@@ -6735,7 +6782,8 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) @@ -6735,7 +6782,8 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6735 if (insn & (1 << 23)) { 6782 if (insn & (1 << 23)) {
6736 /* movt */ 6783 /* movt */
6737 gen_movl_T0_reg(s, rd); 6784 gen_movl_T0_reg(s, rd);
6738 - gen_op_movtop_T0_im(imm << 16); 6785 + tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
  6786 + tcg_gen_ori_i32(cpu_T[0], cpu_T[0], imm << 16);
6739 } else { 6787 } else {
6740 /* movw */ 6788 /* movw */
6741 gen_op_movl_T0_im(imm); 6789 gen_op_movl_T0_im(imm);
@@ -7600,7 +7648,12 @@ static inline int gen_intermediate_code_internal(CPUState *env, @@ -7600,7 +7648,12 @@ static inline int gen_intermediate_code_internal(CPUState *env,
7600 /* Reset the conditional execution bits immediately. This avoids 7648 /* Reset the conditional execution bits immediately. This avoids
7601 complications trying to do it at the end of the block. */ 7649 complications trying to do it at the end of the block. */
7602 if (env->condexec_bits) 7650 if (env->condexec_bits)
7603 - gen_op_set_condexec(0); 7651 + {
  7652 + TCGv tmp = new_tmp();
  7653 + tcg_gen_movi_i32(tmp, 0);
  7654 + tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, condexec_bits));
  7655 + dead_tmp(tmp);
  7656 + }
7604 do { 7657 do {
7605 #ifndef CONFIG_USER_ONLY 7658 #ifndef CONFIG_USER_ONLY
7606 if (dc->pc >= 0xfffffff0 && IS_M(env)) { 7659 if (dc->pc >= 0xfffffff0 && IS_M(env)) {