Commit 3670669ce25ef337a6e5e99b7a97b83997c06721

Authored by pbrook
1 parent 8f01245e

ARM TCG conversion 6/16.

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4143 c046a42c-6fe2-441c-8c8c-71466251a162
target-arm/helper.c
... ... @@ -347,6 +347,35 @@ uint32_t HELPER(clz)(uint32_t x)
347 347 return count;
348 348 }
349 349  
  350 +int32_t HELPER(sdiv)(int32_t num, int32_t den)
  351 +{
  352 + if (den == 0)
  353 + return 0;
  354 + return num / den;
  355 +}
  356 +
  357 +uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
  358 +{
  359 + if (den == 0)
  360 + return 0;
  361 + return num / den;
  362 +}
  363 +
  364 +uint32_t HELPER(rbit)(uint32_t x)
  365 +{
  366 + x = ((x & 0xff000000) >> 24)
  367 + | ((x & 0x00ff0000) >> 8)
  368 + | ((x & 0x0000ff00) << 8)
  369 + | ((x & 0x000000ff) << 24);
  370 + x = ((x & 0xf0f0f0f0) >> 4)
  371 + | ((x & 0x0f0f0f0f) << 4);
  372 + x = ((x & 0x88888888) >> 3)
  373 + | ((x & 0x44444444) >> 1)
  374 + | ((x & 0x22222222) << 1)
  375 + | ((x & 0x11111111) << 3);
  376 + return x;
  377 +}
  378 +
350 379 #if defined(CONFIG_USER_ONLY)
351 380  
352 381 void do_interrupt (CPUState *env)
... ...
target-arm/helpers.h
... ... @@ -29,6 +29,9 @@ DEF_HELPER_1_2(sub_saturate, uint32_t, (uint32_t, uint32_t))
29 29 DEF_HELPER_1_2(add_usaturate, uint32_t, (uint32_t, uint32_t))
30 30 DEF_HELPER_1_2(sub_usaturate, uint32_t, (uint32_t, uint32_t))
31 31 DEF_HELPER_1_1(double_saturate, uint32_t, (int32_t))
  32 +DEF_HELPER_1_2(sdiv, int32_t, (int32_t, int32_t))
  33 +DEF_HELPER_1_2(udiv, uint32_t, (uint32_t, uint32_t))
  34 +DEF_HELPER_1_1(rbit, uint32_t, (uint32_t))
32 35  
33 36 #undef DEF_HELPER
34 37 #undef DEF_HELPER_1_1
... ...
target-arm/op.c
... ... @@ -59,11 +59,6 @@ void OPPROTO op_ ## sub ## l_T0_T1_cc(void) \
59 59 res = T0; \
60 60 } \
61 61 \
62   -void OPPROTO op_ ## sbc ## l_T0_T1(void) \
63   -{ \
64   - res = T0 - T1 + env->CF - 1; \
65   -} \
66   - \
67 62 void OPPROTO op_ ## sbc ## l_T0_T1_cc(void) \
68 63 { \
69 64 unsigned int src1; \
... ... @@ -754,12 +749,6 @@ void OPPROTO op_vfp_fconsts(void)
754 749 FT0s = vfp_itos(PARAM1);
755 750 }
756 751  
757   -/* Copy the most significant bit of T0 to all bits of T1. */
758   -void OPPROTO op_signbit_T1_T0(void)
759   -{
760   - T1 = (int32_t)T0 >> 31;
761   -}
762   -
763 752 void OPPROTO op_movl_cp_T0(void)
764 753 {
765 754 helper_set_cp(env, PARAM1, T0);
... ... @@ -1026,55 +1015,6 @@ static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
1026 1015  
1027 1016 #include "op_addsub.h"
1028 1017  
1029   -void OPPROTO op_pkhtb_T0_T1(void)
1030   -{
1031   - T0 = (T0 & 0xffff0000) | (T1 & 0xffff);
1032   -}
1033   -
1034   -void OPPROTO op_pkhbt_T0_T1(void)
1035   -{
1036   - T0 = (T0 & 0xffff) | (T1 & 0xffff0000);
1037   -}
1038   -
1039   -void OPPROTO op_rev16_T0(void)
1040   -{
1041   - T0 = ((T0 & 0xff000000) >> 8)
1042   - | ((T0 & 0x00ff0000) << 8)
1043   - | ((T0 & 0x0000ff00) >> 8)
1044   - | ((T0 & 0x000000ff) << 8);
1045   -}
1046   -
1047   -void OPPROTO op_revsh_T0(void)
1048   -{
1049   - T0 = (int16_t)( ((T0 & 0x0000ff00) >> 8)
1050   - | ((T0 & 0x000000ff) << 8));
1051   -}
1052   -
1053   -void OPPROTO op_rbit_T0(void)
1054   -{
1055   - T0 = ((T0 & 0xff000000) >> 24)
1056   - | ((T0 & 0x00ff0000) >> 8)
1057   - | ((T0 & 0x0000ff00) << 8)
1058   - | ((T0 & 0x000000ff) << 24);
1059   - T0 = ((T0 & 0xf0f0f0f0) >> 4)
1060   - | ((T0 & 0x0f0f0f0f) << 4);
1061   - T0 = ((T0 & 0x88888888) >> 3)
1062   - | ((T0 & 0x44444444) >> 1)
1063   - | ((T0 & 0x22222222) << 1)
1064   - | ((T0 & 0x11111111) << 3);
1065   -}
1066   -
1067   -/* Dual 16-bit signed multiply. */
1068   -void OPPROTO op_mul_dual_T0_T1(void)
1069   -{
1070   - int32_t low;
1071   - int32_t high;
1072   - low = (int32_t)(int16_t)T0 * (int32_t)(int16_t)T1;
1073   - high = (((int32_t)T0) >> 16) * (((int32_t)T1) >> 16);
1074   - T0 = low;
1075   - T1 = high;
1076   -}
1077   -
1078 1018 void OPPROTO op_sel_T0_T1(void)
1079 1019 {
1080 1020 uint32_t mask;
... ... @@ -1094,11 +1034,6 @@ void OPPROTO op_sel_T0_T1(void)
1094 1034 FORCE_RET();
1095 1035 }
1096 1036  
1097   -void OPPROTO op_roundqd_T0_T1(void)
1098   -{
1099   - T0 = T1 + ((uint32_t)T0 >> 31);
1100   -}
1101   -
1102 1037 /* Signed saturation. */
1103 1038 static inline uint32_t do_ssat(int32_t val, int shift)
1104 1039 {
... ... @@ -1191,66 +1126,6 @@ void OPPROTO op_usad8_T0_T1(void)
1191 1126 T0 = sum;
1192 1127 }
1193 1128  
1194   -/* Thumb-2 instructions. */
1195   -
1196   -/* Insert T1 into T0. Result goes in T1. */
1197   -void OPPROTO op_bfi_T1_T0(void)
1198   -{
1199   - int shift = PARAM1;
1200   - uint32_t mask = PARAM2;
1201   - uint32_t bits;
1202   -
1203   - bits = (T1 << shift) & mask;
1204   - T1 = (T0 & ~mask) | bits;
1205   -}
1206   -
1207   -/* Unsigned bitfield extract. */
1208   -void OPPROTO op_ubfx_T1(void)
1209   -{
1210   - uint32_t shift = PARAM1;
1211   - uint32_t mask = PARAM2;
1212   -
1213   - T1 >>= shift;
1214   - T1 &= mask;
1215   -}
1216   -
1217   -/* Signed bitfield extract. */
1218   -void OPPROTO op_sbfx_T1(void)
1219   -{
1220   - uint32_t shift = PARAM1;
1221   - uint32_t width = PARAM2;
1222   - int32_t val;
1223   -
1224   - val = T1 << (32 - (shift + width));
1225   - T1 = val >> (32 - width);
1226   -}
1227   -
1228   -void OPPROTO op_sdivl_T0_T1(void)
1229   -{
1230   - int32_t num;
1231   - int32_t den;
1232   - num = T0;
1233   - den = T1;
1234   - if (den == 0)
1235   - T0 = 0;
1236   - else
1237   - T0 = num / den;
1238   - FORCE_RET();
1239   -}
1240   -
1241   -void OPPROTO op_udivl_T0_T1(void)
1242   -{
1243   - uint32_t num;
1244   - uint32_t den;
1245   - num = T0;
1246   - den = T1;
1247   - if (den == 0)
1248   - T0 = 0;
1249   - else
1250   - T0 = num / den;
1251   - FORCE_RET();
1252   -}
1253   -
1254 1129 void OPPROTO op_movl_T1_r13_banked(void)
1255 1130 {
1256 1131 T1 = helper_get_r13_banked(env, PARAM1);
... ...
target-arm/translate.c
... ... @@ -219,6 +219,87 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
219 219 #define gen_op_subl_T0_T1_usaturate() \
220 220 gen_helper_sub_usaturate(cpu_T[0], cpu_T[0], cpu_T[1])
221 221  
  222 +/* Copy the most significant bit of T0 to all bits of T1. */
  223 +#define gen_op_signbit_T1_T0() tcg_gen_sari_i32(cpu_T[1], cpu_T[0], 31)
  224 +
  225 +static void gen_smul_dual(TCGv a, TCGv b)
  226 +{
  227 + TCGv tmp1 = new_tmp();
  228 + TCGv tmp2 = new_tmp();
  229 + TCGv res;
  230 + tcg_gen_ext8s_i32(tmp1, a);
  231 + tcg_gen_ext8s_i32(tmp2, b);
  232 + tcg_gen_mul_i32(tmp1, tmp1, tmp2);
  233 + dead_tmp(tmp2);
  234 + tcg_gen_sari_i32(a, a, 16);
  235 + tcg_gen_sari_i32(b, b, 16);
  236 + tcg_gen_mul_i32(b, b, a);
  237 + tcg_gen_mov_i32(a, tmp1);
  238 + dead_tmp(tmp1);
  239 +}
  240 +
  241 +/* Byteswap each halfword. */
  242 +static void gen_rev16(TCGv var)
  243 +{
  244 + TCGv tmp = new_tmp();
  245 + tcg_gen_shri_i32(tmp, var, 8);
  246 + tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
  247 + tcg_gen_shli_i32(var, var, 8);
  248 + tcg_gen_andi_i32(var, var, 0xff00ff00);
  249 + tcg_gen_or_i32(var, var, tmp);
  250 + dead_tmp(tmp);
  251 +}
  252 +
  253 +/* Byteswap low halfword and sign extend. */
  254 +static void gen_revsh(TCGv var)
  255 +{
  256 + TCGv tmp = new_tmp();
  257 + tcg_gen_shri_i32(tmp, var, 8);
  258 + tcg_gen_andi_i32(tmp, tmp, 0x00ff);
  259 + tcg_gen_shli_i32(var, var, 8);
  260 + tcg_gen_ext8s_i32(var, var);
  261 + tcg_gen_or_i32(var, var, tmp);
  262 + dead_tmp(tmp);
  263 +}
  264 +
  265 +/* Unsigned bitfield extract. */
  266 +static void gen_ubfx(TCGv var, int shift, uint32_t mask)
  267 +{
  268 + if (shift)
  269 + tcg_gen_shri_i32(var, var, shift);
  270 + tcg_gen_andi_i32(var, var, mask);
  271 +}
  272 +
  273 +/* Signed bitfield extract. */
  274 +static void gen_sbfx(TCGv var, int shift, int width)
  275 +{
  276 + uint32_t signbit;
  277 +
  278 + if (shift)
  279 + tcg_gen_sari_i32(var, var, shift);
  280 + if (shift + width < 32) {
  281 + signbit = 1u << (width - 1);
  282 + tcg_gen_andi_i32(var, var, (1u << width) - 1);
  283 + tcg_gen_xori_i32(var, var, signbit);
  284 + tcg_gen_subi_i32(var, var, signbit);
  285 + }
  286 +}
  287 +
  288 +/* Bitfield insertion. Insert val into base. Clobbers base and val. */
  289 +static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
  290 +{
  291 + tcg_gen_shli_i32(val, val, shift);
  292 + tcg_gen_andi_i32(val, val, mask);
  293 + tcg_gen_andi_i32(base, base, ~mask);
  294 + tcg_gen_or_i32(dest, base, val);
  295 +}
  296 +
  297 +static void gen_op_roundqd_T0_T1(void)
  298 +{
  299 + tcg_gen_shri_i32(cpu_T[0], cpu_T[0], 31);
  300 + tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1]);
  301 +}
  302 +
222 303 /* FIXME: Most targets have native widening multiplication.
223 304 It would be good to use that instead of a full wide multiply. */
224 305 /* Unsigned 32x32->64 multiply. */
... ... @@ -256,6 +337,7 @@ static void gen_swap_half(TCGv var)
256 337 tcg_gen_shri_i32(tmp, var, 16);
257 338 tcg_gen_shli_i32(var, var, 16);
258 339 tcg_gen_or_i32(var, var, tmp);
  340 + dead_tmp(tmp);
259 341 }
260 342  
261 343 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
... ... @@ -305,6 +387,20 @@ static void gen_adc_T0_T1(void)
305 387 dead_tmp(tmp);
306 388 }
307 389  
  390 +/* dest = T0 - T1 + CF - 1. */
  391 +static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
  392 +{
  393 + TCGv tmp = new_tmp();
  394 + tcg_gen_sub_i32(dest, t0, t1);
  395 + tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUState, CF));
  396 + tcg_gen_add_i32(dest, dest, tmp);
  397 + tcg_gen_subi_i32(dest, dest, 1);
  398 + dead_tmp(tmp);
  399 +}
  400 +
  401 +#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
  402 +#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
  403 +
308 404 /* FIXME: Implement this natively. */
309 405 static inline void tcg_gen_not_i32(TCGv t0, TCGv t1)
310 406 {
... ... @@ -4547,7 +4643,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4547 4643 case 2: /* VREV16 */
4548 4644 if (size != 0)
4549 4645 return 1;
4550   - gen_op_rev16_T0();
  4646 + gen_rev16(cpu_T[0]);
4551 4647 break;
4552 4648 case 4: case 5: /* VPADDL */
4553 4649 case 12: case 13: /* VPADAL */
... ... @@ -4809,6 +4905,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
4809 4905 {
4810 4906 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
4811 4907 TCGv tmp;
  4908 + TCGv tmp2;
4812 4909  
4813 4910 insn = ldl_code(s->pc);
4814 4911 s->pc += 4;
... ... @@ -5261,14 +5358,14 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5261 5358 if (set_cc)
5262 5359 gen_op_sbcl_T0_T1_cc();
5263 5360 else
5264   - gen_op_sbcl_T0_T1();
  5361 + gen_sbc_T0_T1();
5265 5362 gen_movl_reg_T0(s, rd);
5266 5363 break;
5267 5364 case 0x07:
5268 5365 if (set_cc)
5269 5366 gen_op_rscl_T0_T1_cc();
5270 5367 else
5271   - gen_op_rscl_T0_T1();
  5368 + gen_rsc_T0_T1();
5272 5369 gen_movl_reg_T0(s, rd);
5273 5370 break;
5274 5371 case 0x08:
... ... @@ -5505,16 +5602,22 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5505 5602 case 1:
5506 5603 if ((insn & 0x00700020) == 0) {
5507 5604 /* Hafword pack. */
5508   - gen_movl_T0_reg(s, rn);
5509   - gen_movl_T1_reg(s, rm);
  5605 + tmp = load_reg(s, rn);
  5606 + tmp2 = load_reg(s, rm);
5510 5607 shift = (insn >> 7) & 0x1f;
5511 5608 if (shift)
5512   - gen_op_shll_T1_im(shift);
5513   - if (insn & (1 << 6))
5514   - gen_op_pkhtb_T0_T1();
5515   - else
5516   - gen_op_pkhbt_T0_T1();
5517   - gen_movl_reg_T0(s, rd);
  5609 + tcg_gen_shli_i32(tmp2, tmp2, shift);
  5610 + if (insn & (1 << 6)) {
  5611 + /* pkhtb */
  5612 + tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
  5613 + tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
  5614 + } else {
  5615 + /* pkhbt */
  5616 + tcg_gen_andi_i32(tmp, tmp, 0xffff);
  5617 + tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
  5618 + }
  5619 + tcg_gen_or_i32(tmp, tmp, tmp2);
  5620 + store_reg(s, rd, tmp);
5518 5621 } else if ((insn & 0x00200020) == 0x00200000) {
5519 5622 /* [us]sat */
5520 5623 gen_movl_T1_reg(s, rm);
... ... @@ -5583,14 +5686,14 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5583 5686 gen_movl_T0_reg(s, rm);
5584 5687 if (insn & (1 << 22)) {
5585 5688 if (insn & (1 << 7)) {
5586   - gen_op_revsh_T0();
  5689 + gen_revsh(cpu_T[0]);
5587 5690 } else {
5588 5691 ARCH(6T2);
5589   - gen_op_rbit_T0();
  5692 + gen_helper_rbit(cpu_T[0], cpu_T[0]);
5590 5693 }
5591 5694 } else {
5592 5695 if (insn & (1 << 7))
5593   - gen_op_rev16_T0();
  5696 + gen_rev16(cpu_T[0]);
5594 5697 else
5595 5698 gen_op_rev_T0();
5596 5699 }
... ... @@ -5621,7 +5724,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5621 5724 } else {
5622 5725 if (insn & (1 << 5))
5623 5726 gen_swap_half(cpu_T[1]);
5624   - gen_op_mul_dual_T0_T1();
  5727 + gen_smul_dual(cpu_T[0], cpu_T[1]);
5625 5728 if (insn & (1 << 22)) {
5626 5729 if (insn & (1 << 6)) {
5627 5730 /* smlald */
... ... @@ -5675,7 +5778,8 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5675 5778 }
5676 5779 if (i != 32) {
5677 5780 gen_movl_T0_reg(s, rd);
5678   - gen_op_bfi_T1_T0(shift, ((1u << i) - 1) << shift);
  5781 + gen_bfi(cpu_T[1], cpu_T[0], cpu_T[1],
  5782 + shift, ((1u << i) - 1) << shift);
5679 5783 }
5680 5784 gen_movl_reg_T1(s, rd);
5681 5785 break;
... ... @@ -5688,9 +5792,9 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5688 5792 goto illegal_op;
5689 5793 if (i < 32) {
5690 5794 if (op1 & 0x20) {
5691   - gen_op_ubfx_T1(shift, (1u << i) - 1);
  5795 + gen_ubfx(cpu_T[1], shift, (1u << i) - 1);
5692 5796 } else {
5693   - gen_op_sbfx_T1(shift, i);
  5797 + gen_sbfx(cpu_T[1], shift, i);
5694 5798 }
5695 5799 }
5696 5800 gen_movl_reg_T1(s, rd);
... ... @@ -5984,7 +6088,7 @@ gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
5984 6088 if (conds)
5985 6089 gen_op_sbcl_T0_T1_cc();
5986 6090 else
5987   - gen_op_sbcl_T0_T1();
  6091 + gen_sbc_T0_T1();
5988 6092 break;
5989 6093 case 13: /* sub */
5990 6094 if (conds)
... ... @@ -6381,16 +6485,16 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6381 6485 gen_movl_T0_reg(s, rn);
6382 6486 switch (op) {
6383 6487 case 0x0a: /* rbit */
6384   - gen_op_rbit_T0();
  6488 + gen_helper_rbit(cpu_T[0], cpu_T[0]);
6385 6489 break;
6386 6490 case 0x08: /* rev */
6387 6491 gen_op_rev_T0();
6388 6492 break;
6389 6493 case 0x09: /* rev16 */
6390   - gen_op_rev16_T0();
  6494 + gen_rev16(cpu_T[0]);
6391 6495 break;
6392 6496 case 0x0b: /* revsh */
6393   - gen_op_revsh_T0();
  6497 + gen_revsh(cpu_T[0]);
6394 6498 break;
6395 6499 case 0x10: /* sel */
6396 6500 gen_movl_T1_reg(s, rm);
... ... @@ -6433,7 +6537,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6433 6537 case 4: /* Dual multiply subtract. */
6434 6538 if (op)
6435 6539 gen_swap_half(cpu_T[1]);
6436   - gen_op_mul_dual_T0_T1();
  6540 + gen_smul_dual(cpu_T[0], cpu_T[1]);
6437 6541 /* This addition cannot overflow. */
6438 6542 if (insn & (1 << 22)) {
6439 6543 gen_op_subl_T0_T1();
... ... @@ -6495,15 +6599,15 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6495 6599 if (!arm_feature(env, ARM_FEATURE_DIV))
6496 6600 goto illegal_op;
6497 6601 if (op & 0x20)
6498   - gen_op_udivl_T0_T1();
  6602 + gen_helper_udiv(cpu_T[0], cpu_T[0], cpu_T[1]);
6499 6603 else
6500   - gen_op_sdivl_T0_T1();
  6604 + gen_helper_sdiv(cpu_T[0], cpu_T[0], cpu_T[1]);
6501 6605 gen_movl_reg_T0(s, rd);
6502 6606 } else if ((op & 0xe) == 0xc) {
6503 6607 /* Dual multiply accumulate long. */
6504 6608 if (op & 1)
6505 6609 gen_swap_half(cpu_T[1]);
6506   - gen_op_mul_dual_T0_T1();
  6610 + gen_smul_dual(cpu_T[0], cpu_T[1]);
6507 6611 if (op & 0x10) {
6508 6612 gen_op_subl_T0_T1();
6509 6613 } else {
... ... @@ -6727,14 +6831,14 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6727 6831 if (shift + imm > 32)
6728 6832 goto illegal_op;
6729 6833 if (imm < 32)
6730   - gen_op_sbfx_T1(shift, imm);
  6834 + gen_sbfx(cpu_T[1], shift, imm);
6731 6835 break;
6732 6836 case 6: /* Unsigned bitfield extract. */
6733 6837 imm++;
6734 6838 if (shift + imm > 32)
6735 6839 goto illegal_op;
6736 6840 if (imm < 32)
6737   - gen_op_ubfx_T1(shift, (1u << imm) - 1);
  6841 + gen_ubfx(cpu_T[1], shift, (1u << imm) - 1);
6738 6842 break;
6739 6843 case 3: /* Bitfield insert/clear. */
6740 6844 if (imm < shift)
... ... @@ -6742,7 +6846,8 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6742 6846 imm = imm + 1 - shift;
6743 6847 if (imm != 32) {
6744 6848 gen_movl_T0_reg(s, rd);
6745   - gen_op_bfi_T1_T0(shift, ((1u << imm) - 1) << shift);
  6849 + gen_bfi(cpu_T[1], cpu_T[0], cpu_T[1],
  6850 + shift, ((1u << imm) - 1) << shift);
6746 6851 }
6747 6852 break;
6748 6853 case 7:
... ... @@ -7161,7 +7266,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
7161 7266 break;
7162 7267 case 0x6: /* sbc */
7163 7268 if (s->condexec_mask)
7164   - gen_op_sbcl_T0_T1();
  7269 + gen_sbc_T0_T1();
7165 7270 else
7166 7271 gen_op_sbcl_T0_T1_cc();
7167 7272 break;
... ... @@ -7479,8 +7584,8 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
7479 7584 gen_movl_T0_reg(s, rn);
7480 7585 switch ((insn >> 6) & 3) {
7481 7586 case 0: gen_op_rev_T0(); break;
7482   - case 1: gen_op_rev16_T0(); break;
7483   - case 3: gen_op_revsh_T0(); break;
  7587 + case 1: gen_rev16(cpu_T[0]); break;
  7588 + case 3: gen_revsh(cpu_T[0]); break;
7484 7589 default: goto illegal_op;
7485 7590 }
7486 7591 gen_movl_reg_T0(s, rd);
... ...