Commit b010980544b543d421b3f514bba3cafc59b3a3db

Authored by pbrook
1 parent d9ba4830

ARM TCG conversion 9/16.

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4146 c046a42c-6fe2-441c-8c8c-71466251a162
target-arm/exec.h
... ... @@ -72,8 +72,6 @@ void helper_set_cp(CPUState *, uint32_t, uint32_t);
72 72 uint32_t helper_get_cp(CPUState *, uint32_t);
73 73 void helper_set_cp15(CPUState *, uint32_t, uint32_t);
74 74 uint32_t helper_get_cp15(CPUState *, uint32_t);
75   -void helper_set_r13_banked(CPUState *env, int mode, uint32_t val);
76   -uint32_t helper_get_r13_banked(CPUState *env, int mode);
77 75 uint32_t helper_v7m_mrs(CPUState *env, int reg);
78 76 void helper_v7m_msr(CPUState *env, int reg, uint32_t val);
79 77  
... ...
target-arm/helper.c
... ... @@ -513,12 +513,12 @@ void switch_mode(CPUState *env, int mode)
513 513 cpu_abort(env, "Tried to switch out of user mode\n");
514 514 }
515 515  
516   -void helper_set_r13_banked(CPUState *env, int mode, uint32_t val)
  516 +void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
517 517 {
518 518 cpu_abort(env, "banked r13 write\n");
519 519 }
520 520  
521   -uint32_t helper_get_r13_banked(CPUState *env, int mode)
  521 +uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
522 522 {
523 523 cpu_abort(env, "banked r13 read\n");
524 524 return 0;
... ... @@ -1793,12 +1793,12 @@ bad_reg:
1793 1793 return 0;
1794 1794 }
1795 1795  
1796   -void helper_set_r13_banked(CPUState *env, int mode, uint32_t val)
  1796 +void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
1797 1797 {
1798 1798 env->banked_r13[bank_number(mode)] = val;
1799 1799 }
1800 1800  
1801   -uint32_t helper_get_r13_banked(CPUState *env, int mode)
  1801 +uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
1802 1802 {
1803 1803 return env->banked_r13[bank_number(mode)];
1804 1804 }
... ...
target-arm/helpers.h
... ... @@ -19,6 +19,13 @@ static inline void gen_helper_##name(TCGv arg1, TCGv arg2) \
19 19 { \
20 20 tcg_gen_helper_0_2(helper_##name, arg1, arg2); \
21 21 }
  22 +#define DEF_HELPER_0_3(name, ret, args) \
  23 +DEF_HELPER(name, ret, args) \
  24 +static inline void gen_helper_##name( \
  25 + TCGv arg1, TCGv arg2, TCGv arg3) \
  26 +{ \
  27 + tcg_gen_helper_0_3(helper_##name, arg1, arg2, arg3); \
  28 +}
22 29 #define DEF_HELPER_1_0(name, ret, args) \
23 30 DEF_HELPER(name, ret, args) \
24 31 static inline void gen_helper_##name(TCGv ret) \
... ... @@ -48,6 +55,7 @@ static inline void gen_helper_##name(TCGv ret, \
48 55 #define DEF_HELPER_0_0 DEF_HELPER
49 56 #define DEF_HELPER_0_1 DEF_HELPER
50 57 #define DEF_HELPER_0_2 DEF_HELPER
  58 +#define DEF_HELPER_0_3 DEF_HELPER
51 59 #define DEF_HELPER_1_0 DEF_HELPER
52 60 #define DEF_HELPER_1_1 DEF_HELPER
53 61 #define DEF_HELPER_1_2 DEF_HELPER
... ... @@ -108,6 +116,12 @@ DEF_HELPER_0_0(wfi, void, (void))
108 116 DEF_HELPER_0_2(cpsr_write, void, (uint32_t, uint32_t))
109 117 DEF_HELPER_1_0(cpsr_read, uint32_t, (void))
110 118  
  119 +DEF_HELPER_1_2(get_r13_banked, uint32_t, (CPUState *, uint32_t))
  120 +DEF_HELPER_0_3(set_r13_banked, void, (CPUState *, uint32_t, uint32_t))
  121 +
  122 +DEF_HELPER_1_1(get_user_reg, uint32_t, (uint32_t))
  123 +DEF_HELPER_0_2(set_user_reg, void, (uint32_t, uint32_t))
  124 +
111 125 #undef DEF_HELPER
112 126 #undef DEF_HELPER_0_0
113 127 #undef DEF_HELPER_0_1
... ...
target-arm/op.c
... ... @@ -589,48 +589,6 @@ void OPPROTO op_movl_T0_cp15(void)
589 589 FORCE_RET();
590 590 }
591 591  
592   -/* Access to user mode registers from privileged modes. */
593   -void OPPROTO op_movl_T0_user(void)
594   -{
595   - int regno = PARAM1;
596   - if (regno == 13) {
597   - T0 = env->banked_r13[0];
598   - } else if (regno == 14) {
599   - T0 = env->banked_r14[0];
600   - } else if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
601   - T0 = env->usr_regs[regno - 8];
602   - } else {
603   - T0 = env->regs[regno];
604   - }
605   - FORCE_RET();
606   -}
607   -
608   -
609   -void OPPROTO op_movl_user_T0(void)
610   -{
611   - int regno = PARAM1;
612   - if (regno == 13) {
613   - env->banked_r13[0] = T0;
614   - } else if (regno == 14) {
615   - env->banked_r14[0] = T0;
616   - } else if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
617   - env->usr_regs[regno - 8] = T0;
618   - } else {
619   - env->regs[regno] = T0;
620   - }
621   - FORCE_RET();
622   -}
623   -
624   -void OPPROTO op_movl_T1_r13_banked(void)
625   -{
626   - T1 = helper_get_r13_banked(env, PARAM1);
627   -}
628   -
629   -void OPPROTO op_movl_r13_T1_banked(void)
630   -{
631   - helper_set_r13_banked(env, PARAM1, T1);
632   -}
633   -
634 592 void OPPROTO op_v7m_mrs_T0(void)
635 593 {
636 594 T0 = helper_v7m_mrs(env, PARAM1);
... ...
target-arm/op_helper.c
... ... @@ -459,3 +459,36 @@ void HELPER(cpsr_write)(uint32_t val, uint32_t mask)
459 459 {
460 460 cpsr_write(env, val, mask);
461 461 }
  462 +
  463 +/* Access to user mode registers from privileged modes. */
  464 +uint32_t HELPER(get_user_reg)(uint32_t regno)
  465 +{
  466 + uint32_t val;
  467 +
  468 + if (regno == 13) {
  469 + val = env->banked_r13[0];
  470 + } else if (regno == 14) {
  471 + val = env->banked_r14[0];
  472 + } else if (regno >= 8
  473 + && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
  474 + val = env->usr_regs[regno - 8];
  475 + } else {
  476 + val = env->regs[regno];
  477 + }
  478 + return val;
  479 +}
  480 +
  481 +void HELPER(set_user_reg)(uint32_t regno, uint32_t val)
  482 +{
  483 + if (regno == 13) {
  484 + env->banked_r13[0] = val;
  485 + } else if (regno == 14) {
  486 + env->banked_r14[0] = val;
  487 + } else if (regno >= 8
  488 + && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
  489 + env->usr_regs[regno - 8] = val;
  490 + } else {
  491 + env->regs[regno] = val;
  492 + }
  493 +}
  494 +
... ...
target-arm/op_mem.h
1 1 /* ARM memory operations. */
2 2  
3   -void helper_ld(uint32_t);
4   -/* Load from address T1 into T0. */
5   -#define MEM_LD_OP(name) \
6   -void OPPROTO glue(op_ld##name,MEMSUFFIX)(void) \
7   -{ \
8   - T0 = glue(ld##name,MEMSUFFIX)(T1); \
9   - FORCE_RET(); \
10   -}
11   -
12   -MEM_LD_OP(ub)
13   -MEM_LD_OP(sb)
14   -MEM_LD_OP(uw)
15   -MEM_LD_OP(sw)
16   -MEM_LD_OP(l)
17   -
18   -#undef MEM_LD_OP
19   -
20   -/* Store T0 to address T1. */
21   -#define MEM_ST_OP(name) \
22   -void OPPROTO glue(op_st##name,MEMSUFFIX)(void) \
23   -{ \
24   - glue(st##name,MEMSUFFIX)(T1, T0); \
25   - FORCE_RET(); \
26   -}
27   -
28   -MEM_ST_OP(b)
29   -MEM_ST_OP(w)
30   -MEM_ST_OP(l)
31   -
32   -#undef MEM_ST_OP
33   -
34 3 /* Swap T0 with memory at address T1. */
35 4 /* ??? Is this exception safe? */
36 5 #define MEM_SWP_OP(name, lname) \
... ...
target-arm/translate.c
... ... @@ -222,7 +222,6 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
222 222  
223 223 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
224 224 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
225   -#define gen_op_rev_T0() tcg_gen_bswap_i32(cpu_T[0], cpu_T[0])
226 225  
227 226 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
228 227  
... ... @@ -804,6 +803,51 @@ static inline void gen_bx_T0(DisasContext *s)
804 803 gen_op_##name##_kernel(); \
805 804 } while (0)
806 805 #endif
  806 +static inline TCGv gen_ld8s(TCGv addr, int index)
  807 +{
  808 + TCGv tmp = new_tmp();
  809 + tcg_gen_qemu_ld8s(tmp, addr, index);
  810 + return tmp;
  811 +}
  812 +static inline TCGv gen_ld8u(TCGv addr, int index)
  813 +{
  814 + TCGv tmp = new_tmp();
  815 + tcg_gen_qemu_ld8u(tmp, addr, index);
  816 + return tmp;
  817 +}
  818 +static inline TCGv gen_ld16s(TCGv addr, int index)
  819 +{
  820 + TCGv tmp = new_tmp();
  821 + tcg_gen_qemu_ld16s(tmp, addr, index);
  822 + return tmp;
  823 +}
  824 +static inline TCGv gen_ld16u(TCGv addr, int index)
  825 +{
  826 + TCGv tmp = new_tmp();
  827 + tcg_gen_qemu_ld16u(tmp, addr, index);
  828 + return tmp;
  829 +}
  830 +static inline TCGv gen_ld32(TCGv addr, int index)
  831 +{
  832 + TCGv tmp = new_tmp();
  833 + tcg_gen_qemu_ld32u(tmp, addr, index);
  834 + return tmp;
  835 +}
  836 +static inline void gen_st8(TCGv val, TCGv addr, int index)
  837 +{
  838 + tcg_gen_qemu_st8(val, addr, index);
  839 + dead_tmp(val);
  840 +}
  841 +static inline void gen_st16(TCGv val, TCGv addr, int index)
  842 +{
  843 + tcg_gen_qemu_st16(val, addr, index);
  844 + dead_tmp(val);
  845 +}
  846 +static inline void gen_st32(TCGv val, TCGv addr, int index)
  847 +{
  848 + tcg_gen_qemu_st32(val, addr, index);
  849 + dead_tmp(val);
  850 +}
807 851  
808 852 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
809 853 {
... ... @@ -859,7 +903,8 @@ static inline void gen_lookup_tb(DisasContext *s)
859 903 s->is_jmp = DISAS_UPDATE;
860 904 }
861 905  
862   -static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
  906 +static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
  907 + TCGv var)
863 908 {
864 909 int val, rm, shift, shiftop;
865 910 TCGv offset;
... ... @@ -870,7 +915,7 @@ static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
870 915 if (!(insn & (1 << 23)))
871 916 val = -val;
872 917 if (val != 0)
873   - gen_op_addl_T1_im(val);
  918 + tcg_gen_addi_i32(var, var, val);
874 919 } else {
875 920 /* shift/register */
876 921 rm = (insn) & 0xf;
... ... @@ -879,15 +924,15 @@ static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
879 924 offset = load_reg(s, rm);
880 925 gen_arm_shift_im(offset, shiftop, shift, 0);
881 926 if (!(insn & (1 << 23)))
882   - tcg_gen_sub_i32(cpu_T[1], cpu_T[1], offset);
  927 + tcg_gen_sub_i32(var, var, offset);
883 928 else
884   - tcg_gen_add_i32(cpu_T[1], cpu_T[1], offset);
  929 + tcg_gen_add_i32(var, var, offset);
885 930 dead_tmp(offset);
886 931 }
887 932 }
888 933  
889 934 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
890   - int extra)
  935 + int extra, TCGv var)
891 936 {
892 937 int val, rm;
893 938 TCGv offset;
... ... @@ -899,17 +944,17 @@ static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
899 944 val = -val;
900 945 val += extra;
901 946 if (val != 0)
902   - gen_op_addl_T1_im(val);
  947 + tcg_gen_addi_i32(var, var, val);
903 948 } else {
904 949 /* register */
905 950 if (extra)
906   - gen_op_addl_T1_im(extra);
  951 + tcg_gen_addi_i32(var, var, extra);
907 952 rm = (insn) & 0xf;
908 953 offset = load_reg(s, rm);
909 954 if (!(insn & (1 << 23)))
910   - tcg_gen_sub_i32(cpu_T[1], cpu_T[1], offset);
  955 + tcg_gen_sub_i32(var, var, offset);
911 956 else
912   - tcg_gen_add_i32(cpu_T[1], cpu_T[1], offset);
  957 + tcg_gen_add_i32(var, var, offset);
913 958 dead_tmp(offset);
914 959 }
915 960 }
... ... @@ -1090,6 +1135,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1090 1135 {
1091 1136 int rd, wrd;
1092 1137 int rdhi, rdlo, rd0, rd1, i;
  1138 + TCGv tmp;
1093 1139  
1094 1140 if ((insn & 0x0e000e00) == 0x0c000000) {
1095 1141 if ((insn & 0x0fe00ff0) == 0x0c400000) {
... ... @@ -1114,7 +1160,9 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1114 1160 return 1;
1115 1161 if (insn & ARM_CP_RW_BIT) {
1116 1162 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1117   - gen_ldst(ldl, s);
  1163 + tmp = gen_ld32(cpu_T[1], IS_USER(s));
  1164 + tcg_gen_mov_i32(cpu_T[0], tmp);
  1165 + dead_tmp(tmp);
1118 1166 gen_op_iwmmxt_movl_wCx_T0(wrd);
1119 1167 } else {
1120 1168 if (insn & (1 << 8))
... ... @@ -1132,7 +1180,9 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1132 1180 } else {
1133 1181 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1134 1182 gen_op_iwmmxt_movl_T0_wCx(wrd);
1135   - gen_ldst(stl, s);
  1183 + tmp = new_tmp();
  1184 + tcg_gen_mov_i32(tmp, cpu_T[0]);
  1185 + gen_st32(tmp, cpu_T[1], IS_USER(s));
1136 1186 } else {
1137 1187 gen_op_iwmmxt_movq_M0_wRn(wrd);
1138 1188 if (insn & (1 << 8))
... ... @@ -2970,12 +3020,12 @@ static void gen_exception_return(DisasContext *s)
2970 3020 s->is_jmp = DISAS_UPDATE;
2971 3021 }
2972 3022  
2973   -/* Generate a v6 exception return. */
2974   -static void gen_rfe(DisasContext *s)
  3023 +/* Generate a v6 exception return. Marks both values as dead. */
  3024 +static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2975 3025 {
2976   - gen_set_cpsr(cpu_T[0], 0xffffffff);
2977   - gen_op_movl_T0_T2();
2978   - gen_set_pc_T0();
  3026 + gen_set_cpsr(cpsr, 0xffffffff);
  3027 + dead_tmp(cpsr);
  3028 + store_reg(s, 15, pc);
2979 3029 s->is_jmp = DISAS_UPDATE;
2980 3030 }
2981 3031  
... ... @@ -3288,6 +3338,7 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3288 3338 int shift;
3289 3339 uint32_t mask;
3290 3340 int n;
  3341 + TCGv tmp;
3291 3342  
3292 3343 if (!vfp_enabled(env))
3293 3344 return 1;
... ... @@ -3316,36 +3367,50 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3316 3367 for (pass = 0; pass < 2; pass++) {
3317 3368 if (size == 2) {
3318 3369 if (load) {
3319   - gen_ldst(ldl, s);
  3370 + tmp = gen_ld32(cpu_T[1], IS_USER(s));
  3371 + tcg_gen_mov_i32(cpu_T[0], tmp);
  3372 + dead_tmp(tmp);
3320 3373 NEON_SET_REG(T0, rd, pass);
3321 3374 } else {
3322 3375 NEON_GET_REG(T0, rd, pass);
3323   - gen_ldst(stl, s);
  3376 + tmp = new_tmp();
  3377 + tcg_gen_mov_i32(tmp, cpu_T[0]);
  3378 + gen_st32(tmp, cpu_T[1], IS_USER(s));
3324 3379 }
3325 3380 gen_op_addl_T1_im(stride);
3326 3381 } else if (size == 1) {
3327 3382 if (load) {
3328   - gen_ldst(lduw, s);
  3383 + tmp = gen_ld16u(cpu_T[1], IS_USER(s));
  3384 + tcg_gen_mov_i32(cpu_T[0], tmp);
  3385 + dead_tmp(tmp);
3329 3386 gen_op_addl_T1_im(stride);
3330 3387 gen_op_movl_T2_T0();
3331   - gen_ldst(lduw, s);
  3388 + tmp = gen_ld16u(cpu_T[1], IS_USER(s));
  3389 + tcg_gen_mov_i32(cpu_T[0], tmp);
  3390 + dead_tmp(tmp);
3332 3391 gen_op_addl_T1_im(stride);
3333 3392 gen_op_neon_insert_elt(16, 0xffff);
3334 3393 NEON_SET_REG(T2, rd, pass);
3335 3394 } else {
3336 3395 NEON_GET_REG(T2, rd, pass);
3337 3396 gen_op_movl_T0_T2();
3338   - gen_ldst(stw, s);
  3397 + tmp = new_tmp();
  3398 + tcg_gen_mov_i32(tmp, cpu_T[0]);
  3399 + gen_st16(tmp, cpu_T[1], IS_USER(s));
3339 3400 gen_op_addl_T1_im(stride);
3340 3401 gen_op_neon_extract_elt(16, 0xffff0000);
3341   - gen_ldst(stw, s);
  3402 + tmp = new_tmp();
  3403 + tcg_gen_mov_i32(tmp, cpu_T[0]);
  3404 + gen_st16(tmp, cpu_T[1], IS_USER(s));
3342 3405 gen_op_addl_T1_im(stride);
3343 3406 }
3344 3407 } else /* size == 0 */ {
3345 3408 if (load) {
3346 3409 mask = 0xff;
3347 3410 for (n = 0; n < 4; n++) {
3348   - gen_ldst(ldub, s);
  3411 + tmp = gen_ld8u(cpu_T[1], IS_USER(s));
  3412 + tcg_gen_mov_i32(cpu_T[0], tmp);
  3413 + dead_tmp(tmp);
3349 3414 gen_op_addl_T1_im(stride);
3350 3415 if (n == 0) {
3351 3416 gen_op_movl_T2_T0();
... ... @@ -3364,7 +3429,9 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3364 3429 } else {
3365 3430 gen_op_neon_extract_elt(n * 8, mask);
3366 3431 }
3367   - gen_ldst(stb, s);
  3432 + tmp = new_tmp();
  3433 + tcg_gen_mov_i32(tmp, cpu_T[0]);
  3434 + gen_st8(tmp, cpu_T[1], IS_USER(s));
3368 3435 gen_op_addl_T1_im(stride);
3369 3436 mask <<= 8;
3370 3437 }
... ... @@ -3387,15 +3454,21 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3387 3454 for (reg = 0; reg < nregs; reg++) {
3388 3455 switch (size) {
3389 3456 case 0:
3390   - gen_ldst(ldub, s);
  3457 + tmp = gen_ld8u(cpu_T[1], IS_USER(s));
  3458 + tcg_gen_mov_i32(cpu_T[0], tmp);
  3459 + dead_tmp(tmp);
3391 3460 gen_op_neon_dup_u8(0);
3392 3461 break;
3393 3462 case 1:
3394   - gen_ldst(lduw, s);
  3463 + tmp = gen_ld16u(cpu_T[1], IS_USER(s));
  3464 + tcg_gen_mov_i32(cpu_T[0], tmp);
  3465 + dead_tmp(tmp);
3395 3466 gen_op_neon_dup_low16();
3396 3467 break;
3397 3468 case 2:
3398   - gen_ldst(ldl, s);
  3469 + tmp = gen_ld32(cpu_T[0], IS_USER(s));
  3470 + tcg_gen_mov_i32(cpu_T[0], tmp);
  3471 + dead_tmp(tmp);
3399 3472 break;
3400 3473 case 3:
3401 3474 return 1;
... ... @@ -3437,19 +3510,22 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3437 3510 }
3438 3511 switch (size) {
3439 3512 case 0:
3440   - gen_ldst(ldub, s);
  3513 + tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3441 3514 break;
3442 3515 case 1:
3443   - gen_ldst(lduw, s);
  3516 + tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3444 3517 break;
3445 3518 case 2:
3446   - gen_ldst(ldl, s);
3447   - NEON_SET_REG(T0, rd, pass);
  3519 + tmp = gen_ld32(cpu_T[1], IS_USER(s));
3448 3520 break;
3449 3521 }
  3522 + tcg_gen_mov_i32(cpu_T[0], tmp);
  3523 + dead_tmp(tmp);
3450 3524 if (size != 2) {
3451 3525 gen_op_neon_insert_elt(shift, ~mask);
3452 3526 NEON_SET_REG(T0, rd, pass);
  3527 + } else {
  3528 + NEON_SET_REG(T0, rd, pass);
3453 3529 }
3454 3530 } else { /* Store */
3455 3531 if (size == 2) {
... ... @@ -3458,15 +3534,17 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3458 3534 NEON_GET_REG(T2, rd, pass);
3459 3535 gen_op_neon_extract_elt(shift, mask);
3460 3536 }
  3537 + tmp = new_tmp();
  3538 + tcg_gen_mov_i32(tmp, cpu_T[0]);
3461 3539 switch (size) {
3462 3540 case 0:
3463   - gen_ldst(stb, s);
  3541 + gen_st8(tmp, cpu_T[1], IS_USER(s));
3464 3542 break;
3465 3543 case 1:
3466   - gen_ldst(stw, s);
  3544 + gen_st16(tmp, cpu_T[1], IS_USER(s));
3467 3545 break;
3468 3546 case 2:
3469   - gen_ldst(stl, s);
  3547 + gen_st32(tmp, cpu_T[1], IS_USER(s));
3470 3548 break;
3471 3549 }
3472 3550 }
... ... @@ -4647,7 +4725,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4647 4725 NEON_GET_REG(T0, rm, pass * 2);
4648 4726 NEON_GET_REG(T1, rm, pass * 2 + 1);
4649 4727 switch (size) {
4650   - case 0: gen_op_rev_T0(); break;
  4728 + case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
4651 4729 case 1: gen_swap_half(cpu_T[0]); break;
4652 4730 case 2: /* no-op */ break;
4653 4731 default: abort();
... ... @@ -4658,7 +4736,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4658 4736 } else {
4659 4737 gen_op_movl_T0_T1();
4660 4738 switch (size) {
4661   - case 0: gen_op_rev_T0(); break;
  4739 + case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
4662 4740 case 1: gen_swap_half(cpu_T[0]); break;
4663 4741 default: abort();
4664 4742 }
... ... @@ -4827,7 +4905,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4827 4905 switch (op) {
4828 4906 case 1: /* VREV32 */
4829 4907 switch (size) {
4830   - case 0: gen_op_rev_T0(); break;
  4908 + case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
4831 4909 case 1: gen_swap_half(cpu_T[0]); break;
4832 4910 default: return 1;
4833 4911 }
... ... @@ -5099,6 +5177,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5099 5177 TCGv tmp;
5100 5178 TCGv tmp2;
5101 5179 TCGv tmp3;
  5180 + TCGv addr;
5102 5181  
5103 5182 insn = ldl_code(s->pc);
5104 5183 s->pc += 4;
... ... @@ -5160,9 +5239,10 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5160 5239 ARCH(6);
5161 5240 op1 = (insn & 0x1f);
5162 5241 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5163   - gen_movl_T1_reg(s, 13);
  5242 + addr = load_reg(s, 13);
5164 5243 } else {
5165   - gen_op_movl_T1_r13_banked(op1);
  5244 + addr = new_tmp();
  5245 + gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
5166 5246 }
5167 5247 i = (insn >> 23) & 3;
5168 5248 switch (i) {
... ... @@ -5173,12 +5253,13 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5173 5253 default: abort();
5174 5254 }
5175 5255 if (offset)
5176   - gen_op_addl_T1_im(offset);
5177   - gen_movl_T0_reg(s, 14);
5178   - gen_ldst(stl, s);
5179   - gen_helper_cpsr_read(cpu_T[0]);
5180   - gen_op_addl_T1_im(4);
5181   - gen_ldst(stl, s);
  5256 + tcg_gen_addi_i32(addr, addr, offset);
  5257 + tmp = load_reg(s, 14);
  5258 + gen_st32(tmp, addr, 0);
  5259 + tmp = new_tmp();
  5260 + gen_helper_cpsr_read(tmp);
  5261 + tcg_gen_addi_i32(addr, addr, 4);
  5262 + gen_st32(tmp, addr, 0);
5182 5263 if (insn & (1 << 21)) {
5183 5264 /* Base writeback. */
5184 5265 switch (i) {
... ... @@ -5189,12 +5270,14 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5189 5270 default: abort();
5190 5271 }
5191 5272 if (offset)
5192   - gen_op_addl_T1_im(offset);
  5273 + tcg_gen_addi_i32(addr, tmp, offset);
5193 5274 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5194 5275 gen_movl_reg_T1(s, 13);
5195 5276 } else {
5196   - gen_op_movl_r13_T1_banked(op1);
  5277 + gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
5197 5278 }
  5279 + } else {
  5280 + dead_tmp(addr);
5198 5281 }
5199 5282 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5200 5283 /* rfe */
... ... @@ -5203,36 +5286,37 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5203 5286 goto illegal_op;
5204 5287 ARCH(6);
5205 5288 rn = (insn >> 16) & 0xf;
5206   - gen_movl_T1_reg(s, rn);
  5289 + addr = load_reg(s, rn);
5207 5290 i = (insn >> 23) & 3;
5208 5291 switch (i) {
5209   - case 0: offset = 0; break; /* DA */
5210   - case 1: offset = -4; break; /* DB */
5211   - case 2: offset = 4; break; /* IA */
5212   - case 3: offset = 8; break; /* IB */
  5292 + case 0: offset = -4; break; /* DA */
  5293 + case 1: offset = -8; break; /* DB */
  5294 + case 2: offset = 0; break; /* IA */
  5295 + case 3: offset = 4; break; /* IB */
5213 5296 default: abort();
5214 5297 }
5215 5298 if (offset)
5216   - gen_op_addl_T1_im(offset);
5217   - /* Load CPSR into T2 and PC into T0. */
5218   - gen_ldst(ldl, s);
5219   - gen_op_movl_T2_T0();
5220   - gen_op_addl_T1_im(-4);
5221   - gen_ldst(ldl, s);
  5299 + tcg_gen_addi_i32(addr, addr, offset);
  5300 + /* Load PC into tmp and CPSR into tmp2. */
  5301 + tmp = gen_ld32(addr, 0);
  5302 + tcg_gen_addi_i32(addr, addr, 4);
  5303 + tmp2 = gen_ld32(addr, 0);
5222 5304 if (insn & (1 << 21)) {
5223 5305 /* Base writeback. */
5224 5306 switch (i) {
5225   - case 0: offset = -4; break;
5226   - case 1: offset = 0; break;
5227   - case 2: offset = 8; break;
5228   - case 3: offset = 4; break;
  5307 + case 0: offset = -8; break;
  5308 + case 1: offset = -4; break;
  5309 + case 2: offset = 4; break;
  5310 + case 3: offset = 0; break;
5229 5311 default: abort();
5230 5312 }
5231 5313 if (offset)
5232   - gen_op_addl_T1_im(offset);
5233   - gen_movl_reg_T1(s, rn);
  5314 + tcg_gen_addi_i32(addr, addr, offset);
  5315 + store_reg(s, rn, addr);
  5316 + } else {
  5317 + dead_tmp(addr);
5234 5318 }
5235   - gen_rfe(s);
  5319 + gen_rfe(s, tmp, tmp2);
5236 5320 } else if ((insn & 0x0e000000) == 0x0a000000) {
5237 5321 /* branch link and change to thumb (blx <offset>) */
5238 5322 int32_t offset;
... ... @@ -5707,22 +5791,22 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5707 5791 /* Misc load/store */
5708 5792 rn = (insn >> 16) & 0xf;
5709 5793 rd = (insn >> 12) & 0xf;
5710   - gen_movl_T1_reg(s, rn);
  5794 + addr = load_reg(s, rn);
5711 5795 if (insn & (1 << 24))
5712   - gen_add_datah_offset(s, insn, 0);
  5796 + gen_add_datah_offset(s, insn, 0, addr);
5713 5797 address_offset = 0;
5714 5798 if (insn & (1 << 20)) {
5715 5799 /* load */
5716 5800 switch(sh) {
5717 5801 case 1:
5718   - gen_ldst(lduw, s);
  5802 + tmp = gen_ld16u(addr, IS_USER(s));
5719 5803 break;
5720 5804 case 2:
5721   - gen_ldst(ldsb, s);
  5805 + tmp = gen_ld8s(addr, IS_USER(s));
5722 5806 break;
5723 5807 default:
5724 5808 case 3:
5725   - gen_ldst(ldsw, s);
  5809 + tmp = gen_ld16s(addr, IS_USER(s));
5726 5810 break;
5727 5811 }
5728 5812 load = 1;
... ... @@ -5730,26 +5814,26 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5730 5814 /* doubleword */
5731 5815 if (sh & 1) {
5732 5816 /* store */
5733   - gen_movl_T0_reg(s, rd);
5734   - gen_ldst(stl, s);
5735   - gen_op_addl_T1_im(4);
5736   - gen_movl_T0_reg(s, rd + 1);
5737   - gen_ldst(stl, s);
  5817 + tmp = load_reg(s, rd);
  5818 + gen_st32(tmp, addr, IS_USER(s));
  5819 + tcg_gen_addi_i32(addr, addr, 4);
  5820 + tmp = load_reg(s, rd + 1);
  5821 + gen_st32(tmp, addr, IS_USER(s));
5738 5822 load = 0;
5739 5823 } else {
5740 5824 /* load */
5741   - gen_ldst(ldl, s);
5742   - gen_movl_reg_T0(s, rd);
5743   - gen_op_addl_T1_im(4);
5744   - gen_ldst(ldl, s);
  5825 + tmp = gen_ld32(addr, IS_USER(s));
  5826 + store_reg(s, rd, tmp);
  5827 + tcg_gen_addi_i32(addr, addr, 4);
  5828 + tmp = gen_ld32(addr, IS_USER(s));
5745 5829 rd++;
5746 5830 load = 1;
5747 5831 }
5748 5832 address_offset = -4;
5749 5833 } else {
5750 5834 /* store */
5751   - gen_movl_T0_reg(s, rd);
5752   - gen_ldst(stw, s);
  5835 + tmp = load_reg(s, rd);
  5836 + gen_st16(tmp, addr, IS_USER(s));
5753 5837 load = 0;
5754 5838 }
5755 5839 /* Perform base writeback before the loaded value to
... ... @@ -5757,16 +5841,18 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5757 5841 ldrd with base writeback is is undefined if the
5758 5842 destination and index registers overlap. */
5759 5843 if (!(insn & (1 << 24))) {
5760   - gen_add_datah_offset(s, insn, address_offset);
5761   - gen_movl_reg_T1(s, rn);
  5844 + gen_add_datah_offset(s, insn, address_offset, addr);
  5845 + store_reg(s, rn, addr);
5762 5846 } else if (insn & (1 << 21)) {
5763 5847 if (address_offset)
5764   - gen_op_addl_T1_im(address_offset);
5765   - gen_movl_reg_T1(s, rn);
  5848 + tcg_gen_addi_i32(addr, addr, address_offset);
  5849 + store_reg(s, rn, addr);
  5850 + } else {
  5851 + dead_tmp(addr);
5766 5852 }
5767 5853 if (load) {
5768 5854 /* Complete the load. */
5769   - gen_movl_reg_T0(s, rd);
  5855 + store_reg(s, rd, tmp);
5770 5856 }
5771 5857 }
5772 5858 break;
... ... @@ -5882,21 +5968,21 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
5882 5968 gen_movl_reg_T1(s, rd);
5883 5969 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
5884 5970 /* rev */
5885   - gen_movl_T0_reg(s, rm);
  5971 + tmp = load_reg(s, rm);
5886 5972 if (insn & (1 << 22)) {
5887 5973 if (insn & (1 << 7)) {
5888   - gen_revsh(cpu_T[0]);
  5974 + gen_revsh(tmp);
5889 5975 } else {
5890 5976 ARCH(6T2);
5891   - gen_helper_rbit(cpu_T[0], cpu_T[0]);
  5977 + gen_helper_rbit(tmp, tmp);
5892 5978 }
5893 5979 } else {
5894 5980 if (insn & (1 << 7))
5895   - gen_rev16(cpu_T[0]);
  5981 + gen_rev16(tmp);
5896 5982 else
5897   - gen_op_rev_T0();
  5983 + tcg_gen_bswap_i32(tmp, tmp);
5898 5984 }
5899   - gen_movl_reg_T0(s, rd);
  5985 + store_reg(s, rd, tmp);
5900 5986 } else {
5901 5987 goto illegal_op;
5902 5988 }
... ... @@ -6020,71 +6106,47 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
6020 6106 /* load/store byte/word */
6021 6107 rn = (insn >> 16) & 0xf;
6022 6108 rd = (insn >> 12) & 0xf;
6023   - gen_movl_T1_reg(s, rn);
  6109 + tmp2 = load_reg(s, rn);
6024 6110 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6025 6111 if (insn & (1 << 24))
6026   - gen_add_data_offset(s, insn);
  6112 + gen_add_data_offset(s, insn, tmp2);
6027 6113 if (insn & (1 << 20)) {
6028 6114 /* load */
6029 6115 s->is_mem = 1;
6030   -#if defined(CONFIG_USER_ONLY)
6031   - if (insn & (1 << 22))
6032   - gen_op_ldub_raw();
6033   - else
6034   - gen_op_ldl_raw();
6035   -#else
6036 6116 if (insn & (1 << 22)) {
6037   - if (i)
6038   - gen_op_ldub_user();
6039   - else
6040   - gen_op_ldub_kernel();
  6117 + tmp = gen_ld8u(tmp2, i);
6041 6118 } else {
6042   - if (i)
6043   - gen_op_ldl_user();
6044   - else
6045   - gen_op_ldl_kernel();
  6119 + tmp = gen_ld32(tmp2, i);
6046 6120 }
6047   -#endif
6048 6121 } else {
6049 6122 /* store */
6050   - gen_movl_T0_reg(s, rd);
6051   -#if defined(CONFIG_USER_ONLY)
  6123 + tmp = load_reg(s, rd);
6052 6124 if (insn & (1 << 22))
6053   - gen_op_stb_raw();
  6125 + gen_st8(tmp, tmp2, i);
6054 6126 else
6055   - gen_op_stl_raw();
6056   -#else
6057   - if (insn & (1 << 22)) {
6058   - if (i)
6059   - gen_op_stb_user();
6060   - else
6061   - gen_op_stb_kernel();
6062   - } else {
6063   - if (i)
6064   - gen_op_stl_user();
6065   - else
6066   - gen_op_stl_kernel();
6067   - }
6068   -#endif
  6127 + gen_st32(tmp, tmp2, i);
6069 6128 }
6070 6129 if (!(insn & (1 << 24))) {
6071   - gen_add_data_offset(s, insn);
6072   - gen_movl_reg_T1(s, rn);
6073   - } else if (insn & (1 << 21))
6074   - gen_movl_reg_T1(s, rn); {
  6130 + gen_add_data_offset(s, insn, tmp2);
  6131 + store_reg(s, rn, tmp2);
  6132 + } else if (insn & (1 << 21)) {
  6133 + store_reg(s, rn, tmp2);
  6134 + } else {
  6135 + dead_tmp(tmp2);
6075 6136 }
6076 6137 if (insn & (1 << 20)) {
6077 6138 /* Complete the load. */
6078 6139 if (rd == 15)
6079   - gen_bx_T0(s);
  6140 + gen_bx(s, tmp);
6080 6141 else
6081   - gen_movl_reg_T0(s, rd);
  6142 + store_reg(s, rd, tmp);
6082 6143 }
6083 6144 break;
6084 6145 case 0x08:
6085 6146 case 0x09:
6086 6147 {
6087 6148 int j, n, user, loaded_base;
  6149 + TCGv loaded_var;
6088 6150 /* load/store multiple words */
6089 6151 /* XXX: store correct base if write back */
6090 6152 user = 0;
... ... @@ -6096,7 +6158,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
6096 6158 user = 1;
6097 6159 }
6098 6160 rn = (insn >> 16) & 0xf;
6099   - gen_movl_T1_reg(s, rn);
  6161 + addr = load_reg(s, rn);
6100 6162  
6101 6163 /* compute total size */
6102 6164 loaded_base = 0;
... ... @@ -6109,18 +6171,18 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
6109 6171 if (insn & (1 << 23)) {
6110 6172 if (insn & (1 << 24)) {
6111 6173 /* pre increment */
6112   - gen_op_addl_T1_im(4);
  6174 + tcg_gen_addi_i32(addr, addr, 4);
6113 6175 } else {
6114 6176 /* post increment */
6115 6177 }
6116 6178 } else {
6117 6179 if (insn & (1 << 24)) {
6118 6180 /* pre decrement */
6119   - gen_op_addl_T1_im(-(n * 4));
  6181 + tcg_gen_addi_i32(addr, addr, -(n * 4));
6120 6182 } else {
6121 6183 /* post decrement */
6122 6184 if (n != 1)
6123   - gen_op_addl_T1_im(-((n - 1) * 4));
  6185 + tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6124 6186 }
6125 6187 }
6126 6188 j = 0;
... ... @@ -6128,34 +6190,37 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
6128 6190 if (insn & (1 << i)) {
6129 6191 if (insn & (1 << 20)) {
6130 6192 /* load */
6131   - gen_ldst(ldl, s);
  6193 + tmp = gen_ld32(addr, IS_USER(s));
6132 6194 if (i == 15) {
6133   - gen_bx_T0(s);
  6195 + gen_bx(s, tmp);
6134 6196 } else if (user) {
6135   - gen_op_movl_user_T0(i);
  6197 + gen_helper_set_user_reg(tcg_const_i32(i), tmp);
  6198 + dead_tmp(tmp);
6136 6199 } else if (i == rn) {
6137   - gen_op_movl_T2_T0();
  6200 + loaded_var = tmp;
6138 6201 loaded_base = 1;
6139 6202 } else {
6140   - gen_movl_reg_T0(s, i);
  6203 + store_reg(s, i, tmp);
6141 6204 }
6142 6205 } else {
6143 6206 /* store */
6144 6207 if (i == 15) {
6145 6208 /* special case: r15 = PC + 8 */
6146 6209 val = (long)s->pc + 4;
6147   - gen_op_movl_T0_im(val);
  6210 + tmp = new_tmp();
  6211 + tcg_gen_movi_i32(tmp, val);
6148 6212 } else if (user) {
6149   - gen_op_movl_T0_user(i);
  6213 + tmp = new_tmp();
  6214 + gen_helper_get_user_reg(tmp, tcg_const_i32(i));
6150 6215 } else {
6151   - gen_movl_T0_reg(s, i);
  6216 + tmp = load_reg(s, i);
6152 6217 }
6153   - gen_ldst(stl, s);
  6218 + gen_st32(tmp, addr, IS_USER(s));
6154 6219 }
6155 6220 j++;
6156 6221 /* no need to add after the last transfer */
6157 6222 if (j != n)
6158   - gen_op_addl_T1_im(4);
  6223 + tcg_gen_addi_i32(addr, addr, 4);
6159 6224 }
6160 6225 }
6161 6226 if (insn & (1 << 21)) {
... ... @@ -6165,23 +6230,24 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
6165 6230 /* pre increment */
6166 6231 } else {
6167 6232 /* post increment */
6168   - gen_op_addl_T1_im(4);
  6233 + tcg_gen_addi_i32(addr, addr, 4);
6169 6234 }
6170 6235 } else {
6171 6236 if (insn & (1 << 24)) {
6172 6237 /* pre decrement */
6173 6238 if (n != 1)
6174   - gen_op_addl_T1_im(-((n - 1) * 4));
  6239 + tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6175 6240 } else {
6176 6241 /* post decrement */
6177   - gen_op_addl_T1_im(-(n * 4));
  6242 + tcg_gen_addi_i32(addr, addr, -(n * 4));
6178 6243 }
6179 6244 }
6180   - gen_movl_reg_T1(s, rn);
  6245 + store_reg(s, rn, addr);
  6246 + } else {
  6247 + dead_tmp(addr);
6181 6248 }
6182 6249 if (loaded_base) {
6183   - gen_op_movl_T0_T2();
6184   - gen_movl_reg_T0(s, rn);
  6250 + store_reg(s, rn, loaded_var);
6185 6251 }
6186 6252 if ((insn & (1 << 22)) && !user) {
6187 6253 /* Restore CPSR from SPSR. */
... ... @@ -6319,11 +6385,12 @@ gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6319 6385 is not legal. */
6320 6386 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6321 6387 {
6322   - uint32_t insn, imm, shift, offset, addr;
  6388 + uint32_t insn, imm, shift, offset;
6323 6389 uint32_t rd, rn, rm, rs;
6324 6390 TCGv tmp;
6325 6391 TCGv tmp2;
6326 6392 TCGv tmp3;
  6393 + TCGv addr;
6327 6394 int op;
6328 6395 int shiftop;
6329 6396 int conds;
... ... @@ -6341,9 +6408,8 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6341 6408 tcg_gen_addi_i32(tmp, tmp, offset);
6342 6409 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
6343 6410  
6344   - addr = (uint32_t)s->pc;
6345 6411 tmp2 = new_tmp();
6346   - tcg_gen_movi_i32(tmp2, addr | 1);
  6412 + tcg_gen_movi_i32(tmp2, s->pc | 1);
6347 6413 store_reg(s, 14, tmp2);
6348 6414 gen_bx(s, tmp);
6349 6415 return 0;
... ... @@ -6354,9 +6420,8 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6354 6420 tmp = load_reg(s, 14);
6355 6421 tcg_gen_addi_i32(tmp, tmp, 14);
6356 6422  
6357   - addr = (uint32_t)s->pc;
6358 6423 tmp2 = new_tmp();
6359   - tcg_gen_movi_i32(tmp2, addr | 1);
  6424 + tcg_gen_movi_i32(tmp2, s->pc | 1);
6360 6425 store_reg(s, 14, tmp2);
6361 6426 gen_bx(s, tmp);
6362 6427 return 0;
... ... @@ -6366,8 +6431,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6366 6431 16-bit instructions in case the second half causes an
6367 6432 prefetch abort. */
6368 6433 offset = ((int32_t)insn << 21) >> 9;
6369   - addr = s->pc + 2 + offset;
6370   - gen_op_movl_T0_im(addr);
  6434 + gen_op_movl_T0_im(s->pc + 2 + offset);
6371 6435 gen_movl_reg_T0(s, 14);
6372 6436 return 0;
6373 6437 }
... ... @@ -6396,38 +6460,41 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6396 6460 if (insn & 0x01200000) {
6397 6461 /* Load/store doubleword. */
6398 6462 if (rn == 15) {
6399   - gen_op_movl_T1_im(s->pc & ~3);
  6463 + addr = new_tmp();
  6464 + tcg_gen_movi_i32(addr, s->pc & ~3);
6400 6465 } else {
6401   - gen_movl_T1_reg(s, rn);
  6466 + addr = load_reg(s, rn);
6402 6467 }
6403 6468 offset = (insn & 0xff) * 4;
6404 6469 if ((insn & (1 << 23)) == 0)
6405 6470 offset = -offset;
6406 6471 if (insn & (1 << 24)) {
6407   - gen_op_addl_T1_im(offset);
  6472 + tcg_gen_addi_i32(addr, addr, offset);
6408 6473 offset = 0;
6409 6474 }
6410 6475 if (insn & (1 << 20)) {
6411 6476 /* ldrd */
6412   - gen_ldst(ldl, s);
6413   - gen_movl_reg_T0(s, rs);
6414   - gen_op_addl_T1_im(4);
6415   - gen_ldst(ldl, s);
6416   - gen_movl_reg_T0(s, rd);
  6477 + tmp = gen_ld32(addr, IS_USER(s));
  6478 + store_reg(s, rs, tmp);
  6479 + tcg_gen_addi_i32(addr, addr, 4);
  6480 + tmp = gen_ld32(addr, IS_USER(s));
  6481 + store_reg(s, rd, tmp);
6417 6482 } else {
6418 6483 /* strd */
6419   - gen_movl_T0_reg(s, rs);
6420   - gen_ldst(stl, s);
6421   - gen_op_addl_T1_im(4);
6422   - gen_movl_T0_reg(s, rd);
6423   - gen_ldst(stl, s);
  6484 + tmp = load_reg(s, rs);
  6485 + gen_st32(tmp, addr, IS_USER(s));
  6486 + tcg_gen_addi_i32(addr, addr, 4);
  6487 + tmp = load_reg(s, rd);
  6488 + gen_st32(tmp, addr, IS_USER(s));
6424 6489 }
6425 6490 if (insn & (1 << 21)) {
6426 6491 /* Base writeback. */
6427 6492 if (rn == 15)
6428 6493 goto illegal_op;
6429   - gen_op_addl_T1_im(offset - 4);
6430   - gen_movl_reg_T1(s, rn);
  6494 + tcg_gen_addi_i32(addr, addr, offset - 4);
  6495 + store_reg(s, rn, addr);
  6496 + } else {
  6497 + dead_tmp(addr);
6431 6498 }
6432 6499 } else if ((insn & (1 << 23)) == 0) {
6433 6500 /* Load/store exclusive word. */
... ... @@ -6442,24 +6509,26 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6442 6509 } else if ((insn & (1 << 6)) == 0) {
6443 6510 /* Table Branch. */
6444 6511 if (rn == 15) {
6445   - gen_op_movl_T1_im(s->pc);
  6512 + addr = new_tmp();
  6513 + tcg_gen_movi_i32(addr, s->pc);
6446 6514 } else {
6447   - gen_movl_T1_reg(s, rn);
  6515 + addr = load_reg(s, rn);
6448 6516 }
6449 6517 tmp = load_reg(s, rm);
6450   - tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
  6518 + tcg_gen_add_i32(addr, addr, tmp);
6451 6519 if (insn & (1 << 4)) {
6452 6520 /* tbh */
6453   - tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
  6521 + tcg_gen_add_i32(addr, addr, tmp);
6454 6522 dead_tmp(tmp);
6455   - gen_ldst(lduw, s);
  6523 + tmp = gen_ld16u(addr, IS_USER(s));
6456 6524 } else { /* tbb */
6457 6525 dead_tmp(tmp);
6458   - gen_ldst(ldub, s);
  6526 + tmp = gen_ld8u(addr, IS_USER(s));
6459 6527 }
6460   - tcg_gen_shli_i32(cpu_T[0], cpu_T[0], 1);
6461   - tcg_gen_addi_i32(cpu_T[0], cpu_T[0], s->pc);
6462   - gen_movl_reg_T0(s, 15);
  6528 + dead_tmp(addr);
  6529 + tcg_gen_shli_i32(tmp, tmp, 1);
  6530 + tcg_gen_addi_i32(tmp, tmp, s->pc);
  6531 + store_reg(s, 15, tmp);
6463 6532 } else {
6464 6533 /* Load/store exclusive byte/halfword/doubleword. */
6465 6534 op = (insn >> 4) & 0x3;
... ... @@ -6503,68 +6572,74 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6503 6572 /* Load/store multiple, RFE, SRS. */
6504 6573 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
6505 6574 /* Not available in user mode. */
6506   - if (!IS_USER(s))
  6575 + if (IS_USER(s))
6507 6576 goto illegal_op;
6508 6577 if (insn & (1 << 20)) {
6509 6578 /* rfe */
6510   - gen_movl_T1_reg(s, rn);
6511   - if (insn & (1 << 24)) {
6512   - gen_op_addl_T1_im(4);
6513   - } else {
6514   - gen_op_addl_T1_im(-4);
6515   - }
6516   - /* Load CPSR into T2 and PC into T0. */
6517   - gen_ldst(ldl, s);
6518   - gen_op_movl_T2_T0();
6519   - gen_op_addl_T1_im(-4);
6520   - gen_ldst(ldl, s);
  6579 + addr = load_reg(s, rn);
  6580 + if ((insn & (1 << 24)) == 0)
  6581 + tcg_gen_addi_i32(addr, addr, -8);
  6582 + /* Load PC into tmp and CPSR into tmp2. */
  6583 + tmp = gen_ld32(addr, 0);
  6584 + tcg_gen_addi_i32(addr, addr, 4);
  6585 + tmp2 = gen_ld32(addr, 0);
6521 6586 if (insn & (1 << 21)) {
6522 6587 /* Base writeback. */
6523   - if (insn & (1 << 24))
6524   - gen_op_addl_T1_im(8);
6525   - gen_movl_reg_T1(s, rn);
  6588 + if (insn & (1 << 24)) {
  6589 + tcg_gen_addi_i32(addr, addr, 4);
  6590 + } else {
  6591 + tcg_gen_addi_i32(addr, addr, -4);
  6592 + }
  6593 + store_reg(s, rn, addr);
  6594 + } else {
  6595 + dead_tmp(addr);
6526 6596 }
6527   - gen_rfe(s);
  6597 + gen_rfe(s, tmp, tmp2);
6528 6598 } else {
6529 6599 /* srs */
6530 6600 op = (insn & 0x1f);
6531 6601 if (op == (env->uncached_cpsr & CPSR_M)) {
6532   - gen_movl_T1_reg(s, 13);
  6602 + addr = load_reg(s, 13);
6533 6603 } else {
6534   - gen_op_movl_T1_r13_banked(op);
  6604 + addr = new_tmp();
  6605 + gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
6535 6606 }
6536 6607 if ((insn & (1 << 24)) == 0) {
6537   - gen_op_addl_T1_im(-8);
  6608 + tcg_gen_addi_i32(addr, addr, -8);
6538 6609 }
6539   - gen_movl_T0_reg(s, 14);
6540   - gen_ldst(stl, s);
6541   - gen_helper_cpsr_read(cpu_T[0]);
6542   - gen_op_addl_T1_im(4);
6543   - gen_ldst(stl, s);
  6610 + tmp = load_reg(s, 14);
  6611 + gen_st32(tmp, addr, 0);
  6612 + tcg_gen_addi_i32(addr, addr, 4);
  6613 + tmp = new_tmp();
  6614 + gen_helper_cpsr_read(tmp);
  6615 + gen_st32(tmp, addr, 0);
6544 6616 if (insn & (1 << 21)) {
6545 6617 if ((insn & (1 << 24)) == 0) {
6546   - gen_op_addl_T1_im(-4);
  6618 + tcg_gen_addi_i32(addr, addr, -4);
6547 6619 } else {
6548   - gen_op_addl_T1_im(4);
  6620 + tcg_gen_addi_i32(addr, addr, 4);
6549 6621 }
6550 6622 if (op == (env->uncached_cpsr & CPSR_M)) {
6551   - gen_movl_reg_T1(s, 13);
  6623 + store_reg(s, 13, addr);
6552 6624 } else {
6553   - gen_op_movl_r13_T1_banked(op);
  6625 + gen_helper_set_r13_banked(cpu_env,
  6626 + tcg_const_i32(op), addr);
6554 6627 }
  6628 + } else {
  6629 + dead_tmp(addr);
6555 6630 }
6556 6631 }
6557 6632 } else {
6558 6633 int i;
6559 6634 /* Load/store multiple. */
6560   - gen_movl_T1_reg(s, rn);
  6635 + addr = load_reg(s, rn);
6561 6636 offset = 0;
6562 6637 for (i = 0; i < 16; i++) {
6563 6638 if (insn & (1 << i))
6564 6639 offset += 4;
6565 6640 }
6566 6641 if (insn & (1 << 24)) {
6567   - gen_op_addl_T1_im(-offset);
  6642 + tcg_gen_addi_i32(addr, addr, -offset);
6568 6643 }
6569 6644  
6570 6645 for (i = 0; i < 16; i++) {
... ... @@ -6572,28 +6647,30 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6572 6647 continue;
6573 6648 if (insn & (1 << 20)) {
6574 6649 /* Load. */
6575   - gen_ldst(ldl, s);
  6650 + tmp = gen_ld32(addr, IS_USER(s));
6576 6651 if (i == 15) {
6577   - gen_bx_T0(s);
  6652 + gen_bx(s, tmp);
6578 6653 } else {
6579   - gen_movl_reg_T0(s, i);
  6654 + store_reg(s, i, tmp);
6580 6655 }
6581 6656 } else {
6582 6657 /* Store. */
6583   - gen_movl_T0_reg(s, i);
6584   - gen_ldst(stl, s);
  6658 + tmp = load_reg(s, i);
  6659 + gen_st32(tmp, addr, IS_USER(s));
6585 6660 }
6586   - gen_op_addl_T1_im(4);
  6661 + tcg_gen_addi_i32(addr, addr, 4);
6587 6662 }
6588 6663 if (insn & (1 << 21)) {
6589 6664 /* Base register writeback. */
6590 6665 if (insn & (1 << 24)) {
6591   - gen_op_addl_T1_im(-offset);
  6666 + tcg_gen_addi_i32(addr, addr, -offset);
6592 6667 }
6593 6668 /* Fault if writeback register is in register list. */
6594 6669 if (insn & (1 << rn))
6595 6670 goto illegal_op;
6596   - gen_movl_reg_T1(s, rn);
  6671 + store_reg(s, rn, addr);
  6672 + } else {
  6673 + dead_tmp(addr);
6597 6674 }
6598 6675 }
6599 6676 }
... ... @@ -6892,21 +6969,20 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6892 6969 offset ^= ((~insn) & (1 << 13)) << 10;
6893 6970 offset ^= ((~insn) & (1 << 11)) << 11;
6894 6971  
6895   - addr = s->pc;
6896 6972 if (insn & (1 << 14)) {
6897 6973 /* Branch and link. */
6898   - gen_op_movl_T1_im(addr | 1);
  6974 + gen_op_movl_T1_im(s->pc | 1);
6899 6975 gen_movl_reg_T1(s, 14);
6900 6976 }
6901 6977  
6902   - addr += offset;
  6978 + offset += s->pc;
6903 6979 if (insn & (1 << 12)) {
6904 6980 /* b/bl */
6905   - gen_jmp(s, addr);
  6981 + gen_jmp(s, offset);
6906 6982 } else {
6907 6983 /* blx */
6908   - addr &= ~(uint32_t)2;
6909   - gen_bx_im(s, addr);
  6984 + offset &= ~(uint32_t)2;
  6985 + gen_bx_im(s, offset);
6910 6986 }
6911 6987 } else if (((insn >> 23) & 7) == 7) {
6912 6988 /* Misc control */
... ... @@ -7025,8 +7101,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7025 7101 offset |= (insn & (1 << 11)) << 8;
7026 7102  
7027 7103 /* jump to the offset */
7028   - addr = s->pc + offset;
7029   - gen_jmp(s, addr);
  7104 + gen_jmp(s, s->pc + offset);
7030 7105 }
7031 7106 } else {
7032 7107 /* Data processing immediate. */
... ... @@ -7114,12 +7189,12 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7114 7189 } else {
7115 7190 /* Add/sub 12-bit immediate. */
7116 7191 if (rn == 15) {
7117   - addr = s->pc & ~(uint32_t)3;
  7192 + offset = s->pc & ~(uint32_t)3;
7118 7193 if (insn & (1 << 23))
7119   - addr -= imm;
  7194 + offset -= imm;
7120 7195 else
7121   - addr += imm;
7122   - gen_op_movl_T0_im(addr);
  7196 + offset += imm;
  7197 + gen_op_movl_T0_im(offset);
7123 7198 } else {
7124 7199 gen_movl_T0_reg(s, rn);
7125 7200 gen_op_movl_T1_im(imm);
... ... @@ -7179,12 +7254,15 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7179 7254 {
7180 7255 int postinc = 0;
7181 7256 int writeback = 0;
  7257 + int user;
7182 7258 if ((insn & 0x01100000) == 0x01000000) {
7183 7259 if (disas_neon_ls_insn(env, s, insn))
7184 7260 goto illegal_op;
7185 7261 break;
7186 7262 }
  7263 + user = IS_USER(s);
7187 7264 if (rn == 15) {
  7265 + addr = new_tmp();
7188 7266 /* PC relative. */
7189 7267 /* s->pc has already been incremented by 4. */
7190 7268 imm = s->pc & 0xfffffffc;
... ... @@ -7192,13 +7270,13 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7192 7270 imm += insn & 0xfff;
7193 7271 else
7194 7272 imm -= insn & 0xfff;
7195   - gen_op_movl_T1_im(imm);
  7273 + tcg_gen_movi_i32(addr, imm);
7196 7274 } else {
7197   - gen_movl_T1_reg(s, rn);
  7275 + addr = load_reg(s, rn);
7198 7276 if (insn & (1 << 23)) {
7199 7277 /* Positive offset. */
7200 7278 imm = insn & 0xfff;
7201   - gen_op_addl_T1_im(imm);
  7279 + tcg_gen_addi_i32(addr, addr, imm);
7202 7280 } else {
7203 7281 op = (insn >> 8) & 7;
7204 7282 imm = insn & 0xff;
... ... @@ -7210,14 +7288,15 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7210 7288 tmp = load_reg(s, rm);
7211 7289 if (shift)
7212 7290 tcg_gen_shli_i32(tmp, tmp, shift);
7213   - tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
  7291 + tcg_gen_add_i32(addr, addr, tmp);
7214 7292 dead_tmp(tmp);
7215 7293 break;
7216 7294 case 4: /* Negative offset. */
7217   - gen_op_addl_T1_im(-imm);
  7295 + tcg_gen_addi_i32(addr, addr, -imm);
7218 7296 break;
7219 7297 case 6: /* User privilege. */
7220   - gen_op_addl_T1_im(imm);
  7298 + tcg_gen_addi_i32(addr, addr, imm);
  7299 + user = 1;
7221 7300 break;
7222 7301 case 1: /* Post-decrement. */
7223 7302 imm = -imm;
... ... @@ -7230,7 +7309,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7230 7309 imm = -imm;
7231 7310 /* Fall through. */
7232 7311 case 7: /* Pre-increment. */
7233   - gen_op_addl_T1_im(imm);
  7312 + tcg_gen_addi_i32(addr, addr, imm);
7234 7313 writeback = 1;
7235 7314 break;
7236 7315 default:
... ... @@ -7247,35 +7326,38 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7247 7326 /* Memory hint. Implemented as NOP. */
7248 7327 } else {
7249 7328 switch (op) {
7250   - case 0: gen_ldst(ldub, s); break;
7251   - case 4: gen_ldst(ldsb, s); break;
7252   - case 1: gen_ldst(lduw, s); break;
7253   - case 5: gen_ldst(ldsw, s); break;
7254   - case 2: gen_ldst(ldl, s); break;
  7329 + case 0: tmp = gen_ld8u(addr, user); break;
  7330 + case 4: tmp = gen_ld8s(addr, user); break;
  7331 + case 1: tmp = gen_ld16u(addr, user); break;
  7332 + case 5: tmp = gen_ld16s(addr, user); break;
  7333 + case 2: tmp = gen_ld32(addr, user); break;
7255 7334 default: goto illegal_op;
7256 7335 }
7257 7336 if (rs == 15) {
7258   - gen_bx_T0(s);
  7337 + gen_bx(s, tmp);
7259 7338 } else {
7260   - gen_movl_reg_T0(s, rs);
  7339 + store_reg(s, rs, tmp);
7261 7340 }
7262 7341 }
7263 7342 } else {
7264 7343 /* Store. */
7265 7344 if (rs == 15)
7266 7345 goto illegal_op;
7267   - gen_movl_T0_reg(s, rs);
  7346 + tmp = load_reg(s, rs);
7268 7347 switch (op) {
7269   - case 0: gen_ldst(stb, s); break;
7270   - case 1: gen_ldst(stw, s); break;
7271   - case 2: gen_ldst(stl, s); break;
  7348 + case 0: gen_st8(tmp, addr, user); break;
  7349 + case 1: gen_st16(tmp, addr, user); break;
  7350 + case 2: gen_st32(tmp, addr, user); break;
7272 7351 default: goto illegal_op;
7273 7352 }
7274 7353 }
7275 7354 if (postinc)
7276   - gen_op_addl_T1_im(imm);
7277   - if (writeback)
7278   - gen_movl_reg_T1(s, rn);
  7355 + tcg_gen_addi_i32(addr, addr, imm);
  7356 + if (writeback) {
  7357 + store_reg(s, rn, addr);
  7358 + } else {
  7359 + dead_tmp(addr);
  7360 + }
7279 7361 }
7280 7362 break;
7281 7363 default:
... ... @@ -7293,6 +7375,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
7293 7375 int i;
7294 7376 TCGv tmp;
7295 7377 TCGv tmp2;
  7378 + TCGv addr;
7296 7379  
7297 7380 if (s->condexec_mask) {
7298 7381 cond = s->condexec_cond;
... ... @@ -7383,9 +7466,11 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
7383 7466 /* load pc-relative. Bit 1 of PC is ignored. */
7384 7467 val = s->pc + 2 + ((insn & 0xff) * 4);
7385 7468 val &= ~(uint32_t)2;
7386   - gen_op_movl_T1_im(val);
7387   - gen_ldst(ldl, s);
7388   - gen_movl_reg_T0(s, rd);
  7469 + addr = new_tmp();
  7470 + tcg_gen_movi_i32(addr, val);
  7471 + tmp = gen_ld32(addr, IS_USER(s));
  7472 + dead_tmp(addr);
  7473 + store_reg(s, rd, tmp);
7389 7474 break;
7390 7475 }
7391 7476 if (insn & (1 << 10)) {
... ... @@ -7410,12 +7495,13 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
7410 7495 gen_movl_reg_T0(s, rd);
7411 7496 break;
7412 7497 case 3:/* branch [and link] exchange thumb register */
  7498 + tmp = load_reg(s, rm);
7413 7499 if (insn & (1 << 7)) {
7414 7500 val = (uint32_t)s->pc | 1;
7415   - gen_op_movl_T1_im(val);
7416   - gen_movl_reg_T1(s, 14);
  7501 + tmp2 = new_tmp();
  7502 + tcg_gen_movi_i32(tmp2, val);
  7503 + store_reg(s, 14, tmp2);
7417 7504 }
7418   - tmp = load_reg(s, rm);
7419 7505 gen_bx(s, tmp);
7420 7506 break;
7421 7507 }
... ... @@ -7553,117 +7639,122 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
7553 7639 rn = (insn >> 3) & 7;
7554 7640 rm = (insn >> 6) & 7;
7555 7641 op = (insn >> 9) & 7;
7556   - gen_movl_T1_reg(s, rn);
  7642 + addr = load_reg(s, rn);
7557 7643 tmp = load_reg(s, rm);
7558   - tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
  7644 + tcg_gen_add_i32(addr, addr, tmp);
7559 7645 dead_tmp(tmp);
7560 7646  
7561 7647 if (op < 3) /* store */
7562   - gen_movl_T0_reg(s, rd);
  7648 + tmp = load_reg(s, rd);
7563 7649  
7564 7650 switch (op) {
7565 7651 case 0: /* str */
7566   - gen_ldst(stl, s);
  7652 + gen_st32(tmp, addr, IS_USER(s));
7567 7653 break;
7568 7654 case 1: /* strh */
7569   - gen_ldst(stw, s);
  7655 + gen_st16(tmp, addr, IS_USER(s));
7570 7656 break;
7571 7657 case 2: /* strb */
7572   - gen_ldst(stb, s);
  7658 + gen_st8(tmp, addr, IS_USER(s));
7573 7659 break;
7574 7660 case 3: /* ldrsb */
7575   - gen_ldst(ldsb, s);
  7661 + tmp = gen_ld8s(addr, IS_USER(s));
7576 7662 break;
7577 7663 case 4: /* ldr */
7578   - gen_ldst(ldl, s);
  7664 + tmp = gen_ld32(addr, IS_USER(s));
7579 7665 break;
7580 7666 case 5: /* ldrh */
7581   - gen_ldst(lduw, s);
  7667 + tmp = gen_ld16u(addr, IS_USER(s));
7582 7668 break;
7583 7669 case 6: /* ldrb */
7584   - gen_ldst(ldub, s);
  7670 + tmp = gen_ld8u(addr, IS_USER(s));
7585 7671 break;
7586 7672 case 7: /* ldrsh */
7587   - gen_ldst(ldsw, s);
  7673 + tmp = gen_ld16s(addr, IS_USER(s));
7588 7674 break;
7589 7675 }
7590 7676 if (op >= 3) /* load */
7591   - gen_movl_reg_T0(s, rd);
  7677 + store_reg(s, rd, tmp);
  7678 + dead_tmp(addr);
7592 7679 break;
7593 7680  
7594 7681 case 6:
7595 7682 /* load/store word immediate offset */
7596 7683 rd = insn & 7;
7597 7684 rn = (insn >> 3) & 7;
7598   - gen_movl_T1_reg(s, rn);
  7685 + addr = load_reg(s, rn);
7599 7686 val = (insn >> 4) & 0x7c;
7600   - tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
  7687 + tcg_gen_addi_i32(addr, addr, val);
7601 7688  
7602 7689 if (insn & (1 << 11)) {
7603 7690 /* load */
7604   - gen_ldst(ldl, s);
7605   - gen_movl_reg_T0(s, rd);
  7691 + tmp = gen_ld32(addr, IS_USER(s));
  7692 + store_reg(s, rd, tmp);
7606 7693 } else {
7607 7694 /* store */
7608   - gen_movl_T0_reg(s, rd);
7609   - gen_ldst(stl, s);
  7695 + tmp = load_reg(s, rd);
  7696 + gen_st32(tmp, addr, IS_USER(s));
7610 7697 }
  7698 + dead_tmp(addr);
7611 7699 break;
7612 7700  
7613 7701 case 7:
7614 7702 /* load/store byte immediate offset */
7615 7703 rd = insn & 7;
7616 7704 rn = (insn >> 3) & 7;
7617   - gen_movl_T1_reg(s, rn);
  7705 + addr = load_reg(s, rn);
7618 7706 val = (insn >> 6) & 0x1f;
7619   - tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
  7707 + tcg_gen_addi_i32(addr, addr, val);
7620 7708  
7621 7709 if (insn & (1 << 11)) {
7622 7710 /* load */
7623   - gen_ldst(ldub, s);
7624   - gen_movl_reg_T0(s, rd);
  7711 + tmp = gen_ld8u(addr, IS_USER(s));
  7712 + store_reg(s, rd, tmp);
7625 7713 } else {
7626 7714 /* store */
7627   - gen_movl_T0_reg(s, rd);
7628   - gen_ldst(stb, s);
  7715 + tmp = load_reg(s, rd);
  7716 + gen_st8(tmp, addr, IS_USER(s));
7629 7717 }
  7718 + dead_tmp(addr);
7630 7719 break;
7631 7720  
7632 7721 case 8:
7633 7722 /* load/store halfword immediate offset */
7634 7723 rd = insn & 7;
7635 7724 rn = (insn >> 3) & 7;
7636   - gen_movl_T1_reg(s, rn);
  7725 + addr = load_reg(s, rn);
7637 7726 val = (insn >> 5) & 0x3e;
7638   - tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
  7727 + tcg_gen_addi_i32(addr, addr, val);
7639 7728  
7640 7729 if (insn & (1 << 11)) {
7641 7730 /* load */
7642   - gen_ldst(lduw, s);
7643   - gen_movl_reg_T0(s, rd);
  7731 + tmp = gen_ld16u(addr, IS_USER(s));
  7732 + store_reg(s, rd, tmp);
7644 7733 } else {
7645 7734 /* store */
7646   - gen_movl_T0_reg(s, rd);
7647   - gen_ldst(stw, s);
  7735 + tmp = load_reg(s, rd);
  7736 + gen_st16(tmp, addr, IS_USER(s));
7648 7737 }
  7738 + dead_tmp(addr);
7649 7739 break;
7650 7740  
7651 7741 case 9:
7652 7742 /* load/store from stack */
7653 7743 rd = (insn >> 8) & 7;
7654   - gen_movl_T1_reg(s, 13);
  7744 + addr = load_reg(s, 13);
7655 7745 val = (insn & 0xff) * 4;
7656   - tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
  7746 + tcg_gen_addi_i32(addr, addr, val);
7657 7747  
7658 7748 if (insn & (1 << 11)) {
7659 7749 /* load */
7660   - gen_ldst(ldl, s);
7661   - gen_movl_reg_T0(s, rd);
  7750 + tmp = gen_ld32(addr, IS_USER(s));
  7751 + store_reg(s, rd, tmp);
7662 7752 } else {
7663 7753 /* store */
7664   - gen_movl_T0_reg(s, rd);
7665   - gen_ldst(stl, s);
  7754 + tmp = load_reg(s, rd);
  7755 + gen_st32(tmp, addr, IS_USER(s));
7666 7756 }
  7757 + dead_tmp(addr);
7667 7758 break;
7668 7759  
7669 7760 case 10:
... ... @@ -7700,18 +7791,18 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
7700 7791 ARCH(6);
7701 7792 rd = insn & 7;
7702 7793 rm = (insn >> 3) & 7;
7703   - gen_movl_T1_reg(s, rm);
  7794 + tmp = load_reg(s, rm);
7704 7795 switch ((insn >> 6) & 3) {
7705   - case 0: gen_sxth(cpu_T[1]); break;
7706   - case 1: gen_sxtb(cpu_T[1]); break;
7707   - case 2: gen_uxth(cpu_T[1]); break;
7708   - case 3: gen_uxtb(cpu_T[1]); break;
  7796 + case 0: gen_sxth(tmp); break;
  7797 + case 1: gen_sxtb(tmp); break;
  7798 + case 2: gen_uxth(tmp); break;
  7799 + case 3: gen_uxtb(tmp); break;
7709 7800 }
7710   - gen_movl_reg_T1(s, rd);
  7801 + store_reg(s, rd, tmp);
7711 7802 break;
7712 7803 case 4: case 5: case 0xc: case 0xd:
7713 7804 /* push/pop */
7714   - gen_movl_T1_reg(s, 13);
  7805 + addr = load_reg(s, 13);
7715 7806 if (insn & (1 << 8))
7716 7807 offset = 4;
7717 7808 else
... ... @@ -7721,44 +7812,44 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
7721 7812 offset += 4;
7722 7813 }
7723 7814 if ((insn & (1 << 11)) == 0) {
7724   - gen_op_addl_T1_im(-offset);
  7815 + tcg_gen_addi_i32(addr, addr, -offset);
7725 7816 }
7726 7817 for (i = 0; i < 8; i++) {
7727 7818 if (insn & (1 << i)) {
7728 7819 if (insn & (1 << 11)) {
7729 7820 /* pop */
7730   - gen_ldst(ldl, s);
7731   - gen_movl_reg_T0(s, i);
  7821 + tmp = gen_ld32(addr, IS_USER(s));
  7822 + store_reg(s, i, tmp);
7732 7823 } else {
7733 7824 /* push */
7734   - gen_movl_T0_reg(s, i);
7735   - gen_ldst(stl, s);
  7825 + tmp = load_reg(s, i);
  7826 + gen_st32(tmp, addr, IS_USER(s));
7736 7827 }
7737 7828 /* advance to the next address. */
7738   - gen_op_addl_T1_im(4);
  7829 + tcg_gen_addi_i32(addr, addr, 4);
7739 7830 }
7740 7831 }
7741 7832 if (insn & (1 << 8)) {
7742 7833 if (insn & (1 << 11)) {
7743 7834 /* pop pc */
7744   - gen_ldst(ldl, s);
  7835 + tmp = gen_ld32(addr, IS_USER(s));
7745 7836 /* don't set the pc until the rest of the instruction
7746 7837 has completed */
7747 7838 } else {
7748 7839 /* push lr */
7749   - gen_movl_T0_reg(s, 14);
7750   - gen_ldst(stl, s);
  7840 + tmp = load_reg(s, 14);
  7841 + gen_st32(tmp, addr, IS_USER(s));
7751 7842 }
7752   - gen_op_addl_T1_im(4);
  7843 + tcg_gen_addi_i32(addr, addr, 4);
7753 7844 }
7754 7845 if ((insn & (1 << 11)) == 0) {
7755   - gen_op_addl_T1_im(-offset);
  7846 + tcg_gen_addi_i32(addr, addr, -offset);
7756 7847 }
7757 7848 /* write back the new stack pointer */
7758   - gen_movl_reg_T1(s, 13);
  7849 + store_reg(s, 13, addr);
7759 7850 /* set the new PC value */
7760 7851 if ((insn & 0x0900) == 0x0900)
7761   - gen_bx_T0(s);
  7852 + gen_bx(s, tmp);
7762 7853 break;
7763 7854  
7764 7855 case 1: case 3: case 9: case 11: /* czb */
... ... @@ -7801,14 +7892,14 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
7801 7892 ARCH(6);
7802 7893 rn = (insn >> 3) & 0x7;
7803 7894 rd = insn & 0x7;
7804   - gen_movl_T0_reg(s, rn);
  7895 + tmp = load_reg(s, rn);
7805 7896 switch ((insn >> 6) & 3) {
7806   - case 0: gen_op_rev_T0(); break;
7807   - case 1: gen_rev16(cpu_T[0]); break;
7808   - case 3: gen_revsh(cpu_T[0]); break;
  7897 + case 0: tcg_gen_bswap_i32(tmp, tmp); break;
  7898 + case 1: gen_rev16(tmp); break;
  7899 + case 3: gen_revsh(tmp); break;
7809 7900 default: goto illegal_op;
7810 7901 }
7811   - gen_movl_reg_T0(s, rd);
  7902 + store_reg(s, rd, tmp);
7812 7903 break;
7813 7904  
7814 7905 case 6: /* cps */
... ... @@ -7846,25 +7937,28 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
7846 7937 case 12:
7847 7938 /* load/store multiple */
7848 7939 rn = (insn >> 8) & 0x7;
7849   - gen_movl_T1_reg(s, rn);
  7940 + addr = load_reg(s, rn);
7850 7941 for (i = 0; i < 8; i++) {
7851 7942 if (insn & (1 << i)) {
7852 7943 if (insn & (1 << 11)) {
7853 7944 /* load */
7854   - gen_ldst(ldl, s);
7855   - gen_movl_reg_T0(s, i);
  7945 + tmp = gen_ld32(addr, IS_USER(s));
  7946 + store_reg(s, i, tmp);
7856 7947 } else {
7857 7948 /* store */
7858   - gen_movl_T0_reg(s, i);
7859   - gen_ldst(stl, s);
  7949 + tmp = load_reg(s, i);
  7950 + gen_st32(tmp, addr, IS_USER(s));
7860 7951 }
7861 7952 /* advance to the next address */
7862   - gen_op_addl_T1_im(4);
  7953 + tcg_gen_addi_i32(addr, addr, 4);
7863 7954 }
7864 7955 }
7865 7956 /* Base register writeback. */
7866   - if ((insn & (1 << rn)) == 0)
7867   - gen_movl_reg_T1(s, rn);
  7957 + if ((insn & (1 << rn)) == 0) {
  7958 + store_reg(s, rn, addr);
  7959 + } else {
  7960 + dead_tmp(addr);
  7961 + }
7868 7962 break;
7869 7963  
7870 7964 case 13:
... ...
tcg/tcg-op.h
... ... @@ -199,6 +199,18 @@ static inline void tcg_gen_helper_0_2(void *func, TCGv arg1, TCGv arg2)
199 199 0, NULL, 2, args);
200 200 }
201 201  
  202 +static inline void tcg_gen_helper_0_3(void *func,
  203 + TCGv arg1, TCGv arg2, TCGv arg3)
  204 +{
  205 + TCGv args[3];
  206 + args[0] = arg1;
  207 + args[1] = arg2;
  208 + args[2] = arg3;
  209 + tcg_gen_call(&tcg_ctx,
  210 + tcg_const_ptr((tcg_target_long)func), TCG_HELPER_CALL_FLAGS,
  211 + 0, NULL, 3, args);
  212 +}
  213 +
202 214 static inline void tcg_gen_helper_0_4(void *func, TCGv arg1, TCGv arg2,
203 215 TCGv arg3, TCGv arg4)
204 216 {
... ...