Commit 8393617c1a2a28e6e9efec2b517431c5d4aaab60
1 parent
bc24a225
Use dynamical computation for condition codes
Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
Showing
5 changed files
with
196 additions
and
24 deletions
target-sparc/cpu.h
| ... | ... | @@ -92,6 +92,27 @@ |
| 92 | 92 | #define PSR_ET (1<<5) |
| 93 | 93 | #define PSR_CWP 0x1f |
| 94 | 94 | |
| 95 | +#define CC_SRC (env->cc_src) | |
| 96 | +#define CC_SRC2 (env->cc_src2) | |
| 97 | +#define CC_DST (env->cc_dst) | |
| 98 | +#define CC_OP (env->cc_op) | |
| 99 | + | |
| 100 | +enum { | |
| 101 | + CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ | |
| 102 | + CC_OP_FLAGS, /* all cc are back in status register */ | |
| 103 | + CC_OP_DIV, /* modify N, Z and V, C = 0*/ | |
| 104 | + CC_OP_ADD, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
| 105 | + CC_OP_ADDX, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
| 106 | + CC_OP_TADD, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
| 107 | + CC_OP_TADDTV, /* modify all flags except V, CC_DST = res, CC_SRC = src1 */ | |
| 108 | + CC_OP_SUB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
| 109 | + CC_OP_SUBX, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
| 110 | + CC_OP_TSUB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
| 111 | + CC_OP_TSUBTV, /* modify all flags except V, CC_DST = res, CC_SRC = src1 */ | |
| 112 | + CC_OP_LOGIC, /* modify N and Z, C = V = 0, CC_DST = res */ | |
| 113 | + CC_OP_NB, | |
| 114 | +}; | |
| 115 | + | |
| 95 | 116 | /* Trap base register */ |
| 96 | 117 | #define TBR_BASE_MASK 0xfffff000 |
| 97 | 118 | |
| ... | ... | @@ -261,6 +282,7 @@ typedef struct CPUSPARCState { |
| 261 | 282 | /* emulator internal flags handling */ |
| 262 | 283 | target_ulong cc_src, cc_src2; |
| 263 | 284 | target_ulong cc_dst; |
| 285 | + uint32_t cc_op; | |
| 264 | 286 | |
| 265 | 287 | target_ulong t0, t1; /* temporaries live across basic blocks */ |
| 266 | 288 | target_ulong cond; /* conditional branch result (XXX: save it in a |
| ... | ... | @@ -413,6 +435,7 @@ static inline int cpu_cwp_dec(CPUSPARCState *env1, int cwp) |
| 413 | 435 | env->psrps = (_tmp & PSR_PS)? 1 : 0; \ |
| 414 | 436 | env->psret = (_tmp & PSR_ET)? 1 : 0; \ |
| 415 | 437 | cpu_set_cwp(env, _tmp & PSR_CWP); \ |
| 438 | + CC_OP = CC_OP_FLAGS; \ | |
| 416 | 439 | } while (0) |
| 417 | 440 | |
| 418 | 441 | #ifdef TARGET_SPARC64 |
| ... | ... | @@ -420,6 +443,7 @@ static inline int cpu_cwp_dec(CPUSPARCState *env1, int cwp) |
| 420 | 443 | #define PUT_CCR(env, val) do { int _tmp = val; \ |
| 421 | 444 | env->xcc = (_tmp >> 4) << 20; \ |
| 422 | 445 | env->psr = (_tmp & 0xf) << 20; \ |
| 446 | + CC_OP = CC_OP_FLAGS; \ | |
| 423 | 447 | } while (0) |
| 424 | 448 | #define GET_CWP64(env) (env->nwindows - 1 - (env)->cwp) |
| 425 | 449 | ... | ... |
target-sparc/helper.c
target-sparc/helper.h
target-sparc/op_helper.c
| ... | ... | @@ -746,6 +746,67 @@ GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1); |
| 746 | 746 | GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0); |
| 747 | 747 | GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1); |
| 748 | 748 | |
| 749 | +static uint32_t compute_all_flags(void) | |
| 750 | +{ | |
| 751 | + return env->psr & PSR_ICC; | |
| 752 | +} | |
| 753 | + | |
| 754 | +static uint32_t compute_C_flags(void) | |
| 755 | +{ | |
| 756 | + return env->psr & PSR_CARRY; | |
| 757 | +} | |
| 758 | + | |
| 759 | +#ifdef TARGET_SPARC64 | |
| 760 | +static uint32_t compute_all_flags_xcc(void) | |
| 761 | +{ | |
| 762 | + return env->xcc & PSR_ICC; | |
| 763 | +} | |
| 764 | + | |
| 765 | +static uint32_t compute_C_flags_xcc(void) | |
| 766 | +{ | |
| 767 | + return env->xcc & PSR_CARRY; | |
| 768 | +} | |
| 769 | + | |
| 770 | +#endif | |
| 771 | + | |
| 772 | +typedef struct CCTable { | |
| 773 | + uint32_t (*compute_all)(void); /* return all the flags */ | |
| 774 | + uint32_t (*compute_c)(void); /* return the C flag */ | |
| 775 | +} CCTable; | |
| 776 | + | |
| 777 | +static const CCTable icc_table[CC_OP_NB] = { | |
| 778 | + /* CC_OP_DYNAMIC should never happen */ | |
| 779 | + [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags }, | |
| 780 | +}; | |
| 781 | + | |
| 782 | +#ifdef TARGET_SPARC64 | |
| 783 | +static const CCTable xcc_table[CC_OP_NB] = { | |
| 784 | + /* CC_OP_DYNAMIC should never happen */ | |
| 785 | + [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc }, | |
| 786 | +}; | |
| 787 | +#endif | |
| 788 | + | |
| 789 | +void helper_compute_psr(void) | |
| 790 | +{ | |
| 791 | + uint32_t new_psr; | |
| 792 | + | |
| 793 | + new_psr = icc_table[CC_OP].compute_all(); | |
| 794 | + env->psr = new_psr; | |
| 795 | +#ifdef TARGET_SPARC64 | |
| 796 | + new_psr = xcc_table[CC_OP].compute_all(); | |
| 797 | + env->xcc = new_psr; | |
| 798 | +#endif | |
| 799 | + CC_OP = CC_OP_FLAGS; | |
| 800 | +} | |
| 801 | + | |
| 802 | +uint32_t helper_compute_C_icc(void) | |
| 803 | +{ | |
| 804 | + uint32_t ret; | |
| 805 | + | |
| 806 | + ret = icc_table[CC_OP].compute_c() >> PSR_CARRY_SHIFT; | |
| 807 | + return ret; | |
| 808 | +} | |
| 809 | + | |
| 749 | 810 | #ifdef TARGET_SPARC64 |
| 750 | 811 | GEN_FCMPS(fcmps_fcc1, float32, 22, 0); |
| 751 | 812 | GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0); | ... | ... |
target-sparc/translate.c
| ... | ... | @@ -42,7 +42,7 @@ |
| 42 | 42 | |
| 43 | 43 | /* global register indexes */ |
| 44 | 44 | static TCGv_ptr cpu_env, cpu_regwptr; |
| 45 | -static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst; | |
| 45 | +static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst, cpu_cc_op; | |
| 46 | 46 | static TCGv_i32 cpu_psr; |
| 47 | 47 | static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8]; |
| 48 | 48 | static TCGv cpu_y; |
| ... | ... | @@ -76,6 +76,7 @@ typedef struct DisasContext { |
| 76 | 76 | int mem_idx; |
| 77 | 77 | int fpu_enabled; |
| 78 | 78 | int address_mask_32bit; |
| 79 | + uint32_t cc_op; /* current CC operation */ | |
| 79 | 80 | struct TranslationBlock *tb; |
| 80 | 81 | sparc_def_t *def; |
| 81 | 82 | } DisasContext; |
| ... | ... | @@ -1286,7 +1287,8 @@ static inline void gen_op_next_insn(void) |
| 1286 | 1287 | tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); |
| 1287 | 1288 | } |
| 1288 | 1289 | |
| 1289 | -static inline void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond) | |
| 1290 | +static inline void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond, | |
| 1291 | + DisasContext *dc) | |
| 1290 | 1292 | { |
| 1291 | 1293 | TCGv_i32 r_src; |
| 1292 | 1294 | |
| ... | ... | @@ -1298,6 +1300,14 @@ static inline void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond) |
| 1298 | 1300 | #else |
| 1299 | 1301 | r_src = cpu_psr; |
| 1300 | 1302 | #endif |
| 1303 | + switch (dc->cc_op) { | |
| 1304 | + case CC_OP_FLAGS: | |
| 1305 | + break; | |
| 1306 | + default: | |
| 1307 | + gen_helper_compute_psr(); | |
| 1308 | + dc->cc_op = CC_OP_FLAGS; | |
| 1309 | + break; | |
| 1310 | + } | |
| 1301 | 1311 | switch (cond) { |
| 1302 | 1312 | case 0x0: |
| 1303 | 1313 | gen_op_eval_bn(r_dst); |
| ... | ... | @@ -1474,7 +1484,7 @@ static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc, |
| 1474 | 1484 | } |
| 1475 | 1485 | } else { |
| 1476 | 1486 | flush_cond(dc, r_cond); |
| 1477 | - gen_cond(r_cond, cc, cond); | |
| 1487 | + gen_cond(r_cond, cc, cond, dc); | |
| 1478 | 1488 | if (a) { |
| 1479 | 1489 | gen_branch_a(dc, target, dc->npc, r_cond); |
| 1480 | 1490 | dc->is_br = 1; |
| ... | ... | @@ -2154,14 +2164,14 @@ static void disas_sparc_insn(DisasContext * dc) |
| 2154 | 2164 | |
| 2155 | 2165 | save_state(dc, cpu_cond); |
| 2156 | 2166 | if (cc == 0) |
| 2157 | - gen_cond(r_cond, 0, cond); | |
| 2167 | + gen_cond(r_cond, 0, cond, dc); | |
| 2158 | 2168 | else if (cc == 2) |
| 2159 | - gen_cond(r_cond, 1, cond); | |
| 2169 | + gen_cond(r_cond, 1, cond, dc); | |
| 2160 | 2170 | else |
| 2161 | 2171 | goto illegal_insn; |
| 2162 | 2172 | #else |
| 2163 | 2173 | save_state(dc, cpu_cond); |
| 2164 | - gen_cond(r_cond, 0, cond); | |
| 2174 | + gen_cond(r_cond, 0, cond, dc); | |
| 2165 | 2175 | #endif |
| 2166 | 2176 | l1 = gen_new_label(); |
| 2167 | 2177 | tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1); |
| ... | ... | @@ -2200,6 +2210,7 @@ static void disas_sparc_insn(DisasContext * dc) |
| 2200 | 2210 | break; |
| 2201 | 2211 | #ifdef TARGET_SPARC64 |
| 2202 | 2212 | case 0x2: /* V9 rdccr */ |
| 2213 | + gen_helper_compute_psr(); | |
| 2203 | 2214 | gen_helper_rdccr(cpu_dst); |
| 2204 | 2215 | gen_movl_TN_reg(rd, cpu_dst); |
| 2205 | 2216 | break; |
| ... | ... | @@ -2275,6 +2286,8 @@ static void disas_sparc_insn(DisasContext * dc) |
| 2275 | 2286 | #ifndef TARGET_SPARC64 |
| 2276 | 2287 | if (!supervisor(dc)) |
| 2277 | 2288 | goto priv_insn; |
| 2289 | + gen_helper_compute_psr(); | |
| 2290 | + dc->cc_op = CC_OP_FLAGS; | |
| 2278 | 2291 | gen_helper_rdpsr(cpu_dst); |
| 2279 | 2292 | #else |
| 2280 | 2293 | CHECK_IU_FEATURE(dc, HYPV); |
| ... | ... | @@ -2923,7 +2936,7 @@ static void disas_sparc_insn(DisasContext * dc) |
| 2923 | 2936 | l1 = gen_new_label(); \ |
| 2924 | 2937 | r_cond = tcg_temp_new(); \ |
| 2925 | 2938 | cond = GET_FIELD_SP(insn, 14, 17); \ |
| 2926 | - gen_cond(r_cond, icc, cond); \ | |
| 2939 | + gen_cond(r_cond, icc, cond, dc); \ | |
| 2927 | 2940 | tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \ |
| 2928 | 2941 | 0, l1); \ |
| 2929 | 2942 | tcg_gen_mov_i32(cpu_fpr[rd], cpu_fpr[rs2]); \ |
| ... | ... | @@ -2938,7 +2951,7 @@ static void disas_sparc_insn(DisasContext * dc) |
| 2938 | 2951 | l1 = gen_new_label(); \ |
| 2939 | 2952 | r_cond = tcg_temp_new(); \ |
| 2940 | 2953 | cond = GET_FIELD_SP(insn, 14, 17); \ |
| 2941 | - gen_cond(r_cond, icc, cond); \ | |
| 2954 | + gen_cond(r_cond, icc, cond, dc); \ | |
| 2942 | 2955 | tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \ |
| 2943 | 2956 | 0, l1); \ |
| 2944 | 2957 | tcg_gen_mov_i32(cpu_fpr[DFPREG(rd)], \ |
| ... | ... | @@ -2956,7 +2969,7 @@ static void disas_sparc_insn(DisasContext * dc) |
| 2956 | 2969 | l1 = gen_new_label(); \ |
| 2957 | 2970 | r_cond = tcg_temp_new(); \ |
| 2958 | 2971 | cond = GET_FIELD_SP(insn, 14, 17); \ |
| 2959 | - gen_cond(r_cond, icc, cond); \ | |
| 2972 | + gen_cond(r_cond, icc, cond, dc); \ | |
| 2960 | 2973 | tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, \ |
| 2961 | 2974 | 0, l1); \ |
| 2962 | 2975 | tcg_gen_mov_i32(cpu_fpr[QFPREG(rd)], \ |
| ... | ... | @@ -3140,12 +3153,16 @@ static void disas_sparc_insn(DisasContext * dc) |
| 3140 | 3153 | simm = GET_FIELDs(insn, 19, 31); |
| 3141 | 3154 | if (xop & 0x10) { |
| 3142 | 3155 | gen_op_addi_cc(cpu_dst, cpu_src1, simm); |
| 3156 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3157 | + dc->cc_op = CC_OP_FLAGS; | |
| 3143 | 3158 | } else { |
| 3144 | 3159 | tcg_gen_addi_tl(cpu_dst, cpu_src1, simm); |
| 3145 | 3160 | } |
| 3146 | 3161 | } else { |
| 3147 | 3162 | if (xop & 0x10) { |
| 3148 | 3163 | gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2); |
| 3164 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3165 | + dc->cc_op = CC_OP_FLAGS; | |
| 3149 | 3166 | } else { |
| 3150 | 3167 | tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2); |
| 3151 | 3168 | } |
| ... | ... | @@ -3160,6 +3177,8 @@ static void disas_sparc_insn(DisasContext * dc) |
| 3160 | 3177 | } |
| 3161 | 3178 | if (xop & 0x10) { |
| 3162 | 3179 | gen_op_logic_cc(cpu_dst); |
| 3180 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3181 | + dc->cc_op = CC_OP_FLAGS; | |
| 3163 | 3182 | } |
| 3164 | 3183 | break; |
| 3165 | 3184 | case 0x2: /* or */ |
| ... | ... | @@ -3169,8 +3188,11 @@ static void disas_sparc_insn(DisasContext * dc) |
| 3169 | 3188 | } else { |
| 3170 | 3189 | tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2); |
| 3171 | 3190 | } |
| 3172 | - if (xop & 0x10) | |
| 3191 | + if (xop & 0x10) { | |
| 3173 | 3192 | gen_op_logic_cc(cpu_dst); |
| 3193 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3194 | + dc->cc_op = CC_OP_FLAGS; | |
| 3195 | + } | |
| 3174 | 3196 | break; |
| 3175 | 3197 | case 0x3: /* xor */ |
| 3176 | 3198 | if (IS_IMM) { |
| ... | ... | @@ -3179,20 +3201,27 @@ static void disas_sparc_insn(DisasContext * dc) |
| 3179 | 3201 | } else { |
| 3180 | 3202 | tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2); |
| 3181 | 3203 | } |
| 3182 | - if (xop & 0x10) | |
| 3204 | + if (xop & 0x10) { | |
| 3183 | 3205 | gen_op_logic_cc(cpu_dst); |
| 3206 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3207 | + dc->cc_op = CC_OP_FLAGS; | |
| 3208 | + } | |
| 3184 | 3209 | break; |
| 3185 | 3210 | case 0x4: /* sub */ |
| 3186 | 3211 | if (IS_IMM) { |
| 3187 | 3212 | simm = GET_FIELDs(insn, 19, 31); |
| 3188 | 3213 | if (xop & 0x10) { |
| 3189 | 3214 | gen_op_subi_cc(cpu_dst, cpu_src1, simm); |
| 3215 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3216 | + dc->cc_op = CC_OP_FLAGS; | |
| 3190 | 3217 | } else { |
| 3191 | 3218 | tcg_gen_subi_tl(cpu_dst, cpu_src1, simm); |
| 3192 | 3219 | } |
| 3193 | 3220 | } else { |
| 3194 | 3221 | if (xop & 0x10) { |
| 3195 | 3222 | gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2); |
| 3223 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3224 | + dc->cc_op = CC_OP_FLAGS; | |
| 3196 | 3225 | } else { |
| 3197 | 3226 | tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2); |
| 3198 | 3227 | } |
| ... | ... | @@ -3205,8 +3234,11 @@ static void disas_sparc_insn(DisasContext * dc) |
| 3205 | 3234 | } else { |
| 3206 | 3235 | tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2); |
| 3207 | 3236 | } |
| 3208 | - if (xop & 0x10) | |
| 3237 | + if (xop & 0x10) { | |
| 3209 | 3238 | gen_op_logic_cc(cpu_dst); |
| 3239 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3240 | + dc->cc_op = CC_OP_FLAGS; | |
| 3241 | + } | |
| 3210 | 3242 | break; |
| 3211 | 3243 | case 0x6: /* orn */ |
| 3212 | 3244 | if (IS_IMM) { |
| ... | ... | @@ -3215,8 +3247,11 @@ static void disas_sparc_insn(DisasContext * dc) |
| 3215 | 3247 | } else { |
| 3216 | 3248 | tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2); |
| 3217 | 3249 | } |
| 3218 | - if (xop & 0x10) | |
| 3250 | + if (xop & 0x10) { | |
| 3219 | 3251 | gen_op_logic_cc(cpu_dst); |
| 3252 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3253 | + dc->cc_op = CC_OP_FLAGS; | |
| 3254 | + } | |
| 3220 | 3255 | break; |
| 3221 | 3256 | case 0x7: /* xorn */ |
| 3222 | 3257 | if (IS_IMM) { |
| ... | ... | @@ -3226,23 +3261,34 @@ static void disas_sparc_insn(DisasContext * dc) |
| 3226 | 3261 | tcg_gen_not_tl(cpu_tmp0, cpu_src2); |
| 3227 | 3262 | tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_tmp0); |
| 3228 | 3263 | } |
| 3229 | - if (xop & 0x10) | |
| 3264 | + if (xop & 0x10) { | |
| 3230 | 3265 | gen_op_logic_cc(cpu_dst); |
| 3266 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3267 | + dc->cc_op = CC_OP_FLAGS; | |
| 3268 | + } | |
| 3231 | 3269 | break; |
| 3232 | 3270 | case 0x8: /* addx, V9 addc */ |
| 3233 | 3271 | if (IS_IMM) { |
| 3234 | 3272 | simm = GET_FIELDs(insn, 19, 31); |
| 3235 | - if (xop & 0x10) | |
| 3273 | + if (xop & 0x10) { | |
| 3274 | + gen_helper_compute_psr(); | |
| 3236 | 3275 | gen_op_addxi_cc(cpu_dst, cpu_src1, simm); |
| 3237 | - else { | |
| 3276 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3277 | + dc->cc_op = CC_OP_FLAGS; | |
| 3278 | + } else { | |
| 3279 | + gen_helper_compute_psr(); | |
| 3238 | 3280 | gen_mov_reg_C(cpu_tmp0, cpu_psr); |
| 3239 | 3281 | tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, simm); |
| 3240 | 3282 | tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_tmp0); |
| 3241 | 3283 | } |
| 3242 | 3284 | } else { |
| 3243 | - if (xop & 0x10) | |
| 3285 | + if (xop & 0x10) { | |
| 3286 | + gen_helper_compute_psr(); | |
| 3244 | 3287 | gen_op_addx_cc(cpu_dst, cpu_src1, cpu_src2); |
| 3245 | - else { | |
| 3288 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3289 | + dc->cc_op = CC_OP_FLAGS; | |
| 3290 | + } else { | |
| 3291 | + gen_helper_compute_psr(); | |
| 3246 | 3292 | gen_mov_reg_C(cpu_tmp0, cpu_psr); |
| 3247 | 3293 | tcg_gen_add_tl(cpu_tmp0, cpu_src2, cpu_tmp0); |
| 3248 | 3294 | tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_tmp0); |
| ... | ... | @@ -3262,29 +3308,43 @@ static void disas_sparc_insn(DisasContext * dc) |
| 3262 | 3308 | case 0xa: /* umul */ |
| 3263 | 3309 | CHECK_IU_FEATURE(dc, MUL); |
| 3264 | 3310 | gen_op_umul(cpu_dst, cpu_src1, cpu_src2); |
| 3265 | - if (xop & 0x10) | |
| 3311 | + if (xop & 0x10) { | |
| 3266 | 3312 | gen_op_logic_cc(cpu_dst); |
| 3313 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3314 | + dc->cc_op = CC_OP_FLAGS; | |
| 3315 | + } | |
| 3267 | 3316 | break; |
| 3268 | 3317 | case 0xb: /* smul */ |
| 3269 | 3318 | CHECK_IU_FEATURE(dc, MUL); |
| 3270 | 3319 | gen_op_smul(cpu_dst, cpu_src1, cpu_src2); |
| 3271 | - if (xop & 0x10) | |
| 3320 | + if (xop & 0x10) { | |
| 3272 | 3321 | gen_op_logic_cc(cpu_dst); |
| 3322 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3323 | + dc->cc_op = CC_OP_FLAGS; | |
| 3324 | + } | |
| 3273 | 3325 | break; |
| 3274 | 3326 | case 0xc: /* subx, V9 subc */ |
| 3275 | 3327 | if (IS_IMM) { |
| 3276 | 3328 | simm = GET_FIELDs(insn, 19, 31); |
| 3277 | 3329 | if (xop & 0x10) { |
| 3330 | + gen_helper_compute_psr(); | |
| 3278 | 3331 | gen_op_subxi_cc(cpu_dst, cpu_src1, simm); |
| 3332 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3333 | + dc->cc_op = CC_OP_FLAGS; | |
| 3279 | 3334 | } else { |
| 3335 | + gen_helper_compute_psr(); | |
| 3280 | 3336 | gen_mov_reg_C(cpu_tmp0, cpu_psr); |
| 3281 | 3337 | tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, simm); |
| 3282 | 3338 | tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_tmp0); |
| 3283 | 3339 | } |
| 3284 | 3340 | } else { |
| 3285 | 3341 | if (xop & 0x10) { |
| 3342 | + gen_helper_compute_psr(); | |
| 3286 | 3343 | gen_op_subx_cc(cpu_dst, cpu_src1, cpu_src2); |
| 3344 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3345 | + dc->cc_op = CC_OP_FLAGS; | |
| 3287 | 3346 | } else { |
| 3347 | + gen_helper_compute_psr(); | |
| 3288 | 3348 | gen_mov_reg_C(cpu_tmp0, cpu_psr); |
| 3289 | 3349 | tcg_gen_add_tl(cpu_tmp0, cpu_src2, cpu_tmp0); |
| 3290 | 3350 | tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_tmp0); |
| ... | ... | @@ -3302,14 +3362,20 @@ static void disas_sparc_insn(DisasContext * dc) |
| 3302 | 3362 | case 0xe: /* udiv */ |
| 3303 | 3363 | CHECK_IU_FEATURE(dc, DIV); |
| 3304 | 3364 | gen_helper_udiv(cpu_dst, cpu_src1, cpu_src2); |
| 3305 | - if (xop & 0x10) | |
| 3365 | + if (xop & 0x10) { | |
| 3306 | 3366 | gen_op_div_cc(cpu_dst); |
| 3367 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3368 | + dc->cc_op = CC_OP_FLAGS; | |
| 3369 | + } | |
| 3307 | 3370 | break; |
| 3308 | 3371 | case 0xf: /* sdiv */ |
| 3309 | 3372 | CHECK_IU_FEATURE(dc, DIV); |
| 3310 | 3373 | gen_helper_sdiv(cpu_dst, cpu_src1, cpu_src2); |
| 3311 | - if (xop & 0x10) | |
| 3374 | + if (xop & 0x10) { | |
| 3312 | 3375 | gen_op_div_cc(cpu_dst); |
| 3376 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3377 | + dc->cc_op = CC_OP_FLAGS; | |
| 3378 | + } | |
| 3313 | 3379 | break; |
| 3314 | 3380 | default: |
| 3315 | 3381 | goto illegal_insn; |
| ... | ... | @@ -3322,24 +3388,35 @@ static void disas_sparc_insn(DisasContext * dc) |
| 3322 | 3388 | case 0x20: /* taddcc */ |
| 3323 | 3389 | gen_op_tadd_cc(cpu_dst, cpu_src1, cpu_src2); |
| 3324 | 3390 | gen_movl_TN_reg(rd, cpu_dst); |
| 3391 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3392 | + dc->cc_op = CC_OP_FLAGS; | |
| 3325 | 3393 | break; |
| 3326 | 3394 | case 0x21: /* tsubcc */ |
| 3327 | 3395 | gen_op_tsub_cc(cpu_dst, cpu_src1, cpu_src2); |
| 3328 | 3396 | gen_movl_TN_reg(rd, cpu_dst); |
| 3397 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3398 | + dc->cc_op = CC_OP_FLAGS; | |
| 3329 | 3399 | break; |
| 3330 | 3400 | case 0x22: /* taddcctv */ |
| 3331 | 3401 | save_state(dc, cpu_cond); |
| 3332 | 3402 | gen_op_tadd_ccTV(cpu_dst, cpu_src1, cpu_src2); |
| 3333 | 3403 | gen_movl_TN_reg(rd, cpu_dst); |
| 3404 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3405 | + dc->cc_op = CC_OP_FLAGS; | |
| 3334 | 3406 | break; |
| 3335 | 3407 | case 0x23: /* tsubcctv */ |
| 3336 | 3408 | save_state(dc, cpu_cond); |
| 3337 | 3409 | gen_op_tsub_ccTV(cpu_dst, cpu_src1, cpu_src2); |
| 3338 | 3410 | gen_movl_TN_reg(rd, cpu_dst); |
| 3411 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3412 | + dc->cc_op = CC_OP_FLAGS; | |
| 3339 | 3413 | break; |
| 3340 | 3414 | case 0x24: /* mulscc */ |
| 3415 | + gen_helper_compute_psr(); | |
| 3341 | 3416 | gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2); |
| 3342 | 3417 | gen_movl_TN_reg(rd, cpu_dst); |
| 3418 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3419 | + dc->cc_op = CC_OP_FLAGS; | |
| 3343 | 3420 | break; |
| 3344 | 3421 | #ifndef TARGET_SPARC64 |
| 3345 | 3422 | case 0x25: /* sll */ |
| ... | ... | @@ -3394,6 +3471,8 @@ static void disas_sparc_insn(DisasContext * dc) |
| 3394 | 3471 | case 0x2: /* V9 wrccr */ |
| 3395 | 3472 | tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2); |
| 3396 | 3473 | gen_helper_wrccr(cpu_dst); |
| 3474 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3475 | + dc->cc_op = CC_OP_FLAGS; | |
| 3397 | 3476 | break; |
| 3398 | 3477 | case 0x3: /* V9 wrasi */ |
| 3399 | 3478 | tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2); |
| ... | ... | @@ -3525,6 +3604,8 @@ static void disas_sparc_insn(DisasContext * dc) |
| 3525 | 3604 | #else |
| 3526 | 3605 | tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2); |
| 3527 | 3606 | gen_helper_wrpsr(cpu_dst); |
| 3607 | + tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); | |
| 3608 | + dc->cc_op = CC_OP_FLAGS; | |
| 3528 | 3609 | save_state(dc, cpu_cond); |
| 3529 | 3610 | gen_op_next_insn(); |
| 3530 | 3611 | tcg_gen_exit_tb(0); |
| ... | ... | @@ -3739,9 +3820,9 @@ static void disas_sparc_insn(DisasContext * dc) |
| 3739 | 3820 | r_cond = tcg_temp_new(); |
| 3740 | 3821 | if (insn & (1 << 18)) { |
| 3741 | 3822 | if (cc == 0) |
| 3742 | - gen_cond(r_cond, 0, cond); | |
| 3823 | + gen_cond(r_cond, 0, cond, dc); | |
| 3743 | 3824 | else if (cc == 2) |
| 3744 | - gen_cond(r_cond, 1, cond); | |
| 3825 | + gen_cond(r_cond, 1, cond, dc); | |
| 3745 | 3826 | else |
| 3746 | 3827 | goto illegal_insn; |
| 3747 | 3828 | } else { |
| ... | ... | @@ -4919,6 +5000,7 @@ static inline void gen_intermediate_code_internal(TranslationBlock * tb, |
| 4919 | 5000 | dc->pc = pc_start; |
| 4920 | 5001 | last_pc = dc->pc; |
| 4921 | 5002 | dc->npc = (target_ulong) tb->cs_base; |
| 5003 | + dc->cc_op = CC_OP_DYNAMIC; | |
| 4922 | 5004 | dc->mem_idx = cpu_mmu_index(env); |
| 4923 | 5005 | dc->def = env->def; |
| 4924 | 5006 | if ((dc->def->features & CPU_FEATURE_FLOAT)) |
| ... | ... | @@ -5131,6 +5213,8 @@ void gen_intermediate_code_init(CPUSPARCState *env) |
| 5131 | 5213 | "cc_src2"); |
| 5132 | 5214 | cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst), |
| 5133 | 5215 | "cc_dst"); |
| 5216 | + cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, cc_op), | |
| 5217 | + "cc_op"); | |
| 5134 | 5218 | cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, psr), |
| 5135 | 5219 | "psr"); |
| 5136 | 5220 | cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, fsr), | ... | ... |