Commit 3761035f2c22badf220747c5734d85a922c53258
1 parent
496cb5b9
alpha: directly access ir registers
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5151 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
2 changed files
with
145 additions
and
326 deletions
target-alpha/op_template.h
| ... | ... | @@ -20,11 +20,6 @@ |
| 20 | 20 | |
| 21 | 21 | /* Optimized constant loads */ |
| 22 | 22 | #if REG < 3 |
| 23 | -void OPPROTO glue(op_reset_T, REG) (void) | |
| 24 | -{ | |
| 25 | - glue(T, REG) = 0; | |
| 26 | - RETURN(); | |
| 27 | -} | |
| 28 | 23 | |
| 29 | 24 | #if !defined(HOST_SPARC) && !defined(HOST_SPARC64) |
| 30 | 25 | void OPPROTO glue(op_reset_FT, REG) (void) |
| ... | ... | @@ -40,87 +35,10 @@ void OPPROTO glue(op_reset_FT, REG) (void) |
| 40 | 35 | } |
| 41 | 36 | #endif |
| 42 | 37 | |
| 43 | -/* XXX: This can be great on most RISC machines */ | |
| 44 | -#if !defined(__i386__) && !defined(__x86_64__) | |
| 45 | -void OPPROTO glue(op_set_s16_T, REG) (void) | |
| 46 | -{ | |
| 47 | - glue(T, REG) = (int16_t)PARAM(1); | |
| 48 | - RETURN(); | |
| 49 | -} | |
| 50 | - | |
| 51 | -void OPPROTO glue(op_set_u16_T, REG) (void) | |
| 52 | -{ | |
| 53 | - glue(T, REG) = (uint16_t)PARAM(1); | |
| 54 | - RETURN(); | |
| 55 | -} | |
| 56 | -#endif | |
| 57 | - | |
| 58 | -void OPPROTO glue(op_set_s32_T, REG) (void) | |
| 59 | -{ | |
| 60 | - glue(T, REG) = (int32_t)PARAM(1); | |
| 61 | - RETURN(); | |
| 62 | -} | |
| 63 | - | |
| 64 | -void OPPROTO glue(op_set_u32_T, REG) (void) | |
| 65 | -{ | |
| 66 | - glue(T, REG) = (uint32_t)PARAM(1); | |
| 67 | - RETURN(); | |
| 68 | -} | |
| 69 | - | |
| 70 | -#if 0 // Qemu does not know how to do this... | |
| 71 | -void OPPROTO glue(op_set_64_T, REG) (void) | |
| 72 | -{ | |
| 73 | - glue(T, REG) = (int64_t)PARAM(1); | |
| 74 | - RETURN(); | |
| 75 | -} | |
| 76 | -#else | |
| 77 | -void OPPROTO glue(op_set_64_T, REG) (void) | |
| 78 | -{ | |
| 79 | - glue(T, REG) = ((int64_t)PARAM(1) << 32) | (int64_t)PARAM(2); | |
| 80 | - RETURN(); | |
| 81 | -} | |
| 82 | -#endif | |
| 83 | - | |
| 84 | 38 | #endif /* REG < 3 */ |
| 85 | 39 | |
| 86 | 40 | /* Fixed-point register moves */ |
| 87 | 41 | #if REG < 31 |
| 88 | -void OPPROTO glue(op_load_T0_ir, REG) (void) | |
| 89 | -{ | |
| 90 | - T0 = env->ir[REG]; | |
| 91 | - RETURN(); | |
| 92 | -} | |
| 93 | - | |
| 94 | -void OPPROTO glue(op_load_T1_ir, REG) (void) | |
| 95 | -{ | |
| 96 | - T1 = env->ir[REG]; | |
| 97 | - RETURN(); | |
| 98 | -} | |
| 99 | - | |
| 100 | -void OPPROTO glue(op_load_T2_ir, REG) (void) | |
| 101 | -{ | |
| 102 | - T2 = env->ir[REG]; | |
| 103 | - RETURN(); | |
| 104 | -} | |
| 105 | - | |
| 106 | -void OPPROTO glue(op_store_T0_ir, REG) (void) | |
| 107 | -{ | |
| 108 | - env->ir[REG] = T0; | |
| 109 | - RETURN(); | |
| 110 | -} | |
| 111 | - | |
| 112 | -void OPPROTO glue(op_store_T1_ir, REG) (void) | |
| 113 | -{ | |
| 114 | - env->ir[REG] = T1; | |
| 115 | - RETURN(); | |
| 116 | -} | |
| 117 | - | |
| 118 | -void OPPROTO glue(op_store_T2_ir, REG) (void) | |
| 119 | -{ | |
| 120 | - env->ir[REG] = T2; | |
| 121 | - RETURN(); | |
| 122 | -} | |
| 123 | - | |
| 124 | 42 | void OPPROTO glue(op_cmov_ir, REG) (void) |
| 125 | 43 | { |
| 126 | 44 | if (T0) | ... | ... |
target-alpha/translate.c
| ... | ... | @@ -44,10 +44,15 @@ struct DisasContext { |
| 44 | 44 | uint32_t amask; |
| 45 | 45 | }; |
| 46 | 46 | |
| 47 | +/* global register indexes */ | |
| 47 | 48 | static TCGv cpu_env; |
| 48 | 49 | static TCGv cpu_ir[31]; |
| 49 | 50 | static TCGv cpu_pc; |
| 50 | 51 | |
| 52 | +/* dyngen register indexes */ | |
| 53 | +static TCGv cpu_T[3]; | |
| 54 | + | |
| 55 | +/* register names */ | |
| 51 | 56 | static char cpu_reg_names[5*31]; |
| 52 | 57 | |
| 53 | 58 | #include "gen-icount.h" |
| ... | ... | @@ -63,6 +68,19 @@ static void alpha_translate_init(void) |
| 63 | 68 | |
| 64 | 69 | cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env"); |
| 65 | 70 | |
| 71 | +#if TARGET_LONG_BITS > HOST_LONG_BITS | |
| 72 | + cpu_T[0] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0, | |
| 73 | + offsetof(CPUState, t0), "T0"); | |
| 74 | + cpu_T[1] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0, | |
| 75 | + offsetof(CPUState, t1), "T1"); | |
| 76 | + cpu_T[2] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0, | |
| 77 | + offsetof(CPUState, t2), "T2"); | |
| 78 | +#else | |
| 79 | + cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I64, TCG_AREG1, "T0"); | |
| 80 | + cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I64, TCG_AREG2, "T1"); | |
| 81 | + cpu_T[2] = tcg_global_reg_new(TCG_TYPE_I64, TCG_AREG3, "T2"); | |
| 82 | +#endif | |
| 83 | + | |
| 66 | 84 | p = cpu_reg_names; |
| 67 | 85 | for (i = 0; i < 31; i++) { |
| 68 | 86 | sprintf(p, "ir%d", i); |
| ... | ... | @@ -107,65 +125,9 @@ static always_inline void func (int n) \ |
| 107 | 125 | |
| 108 | 126 | /* IR moves */ |
| 109 | 127 | /* Special hacks for ir31 */ |
| 110 | -#define gen_op_load_T0_ir31 gen_op_reset_T0 | |
| 111 | -#define gen_op_load_T1_ir31 gen_op_reset_T1 | |
| 112 | -#define gen_op_load_T2_ir31 gen_op_reset_T2 | |
| 113 | -#define gen_op_store_T0_ir31 gen_op_nop | |
| 114 | -#define gen_op_store_T1_ir31 gen_op_nop | |
| 115 | -#define gen_op_store_T2_ir31 gen_op_nop | |
| 116 | 128 | #define gen_op_cmov_ir31 gen_op_nop |
| 117 | -GEN32(gen_op_load_T0_ir, gen_op_load_T0_ir); | |
| 118 | -GEN32(gen_op_load_T1_ir, gen_op_load_T1_ir); | |
| 119 | -GEN32(gen_op_load_T2_ir, gen_op_load_T2_ir); | |
| 120 | -GEN32(gen_op_store_T0_ir, gen_op_store_T0_ir); | |
| 121 | -GEN32(gen_op_store_T1_ir, gen_op_store_T1_ir); | |
| 122 | -GEN32(gen_op_store_T2_ir, gen_op_store_T2_ir); | |
| 123 | 129 | GEN32(gen_op_cmov_ir, gen_op_cmov_ir); |
| 124 | 130 | |
| 125 | -static always_inline void gen_load_ir (DisasContext *ctx, int irn, int Tn) | |
| 126 | -{ | |
| 127 | - switch (Tn) { | |
| 128 | - case 0: | |
| 129 | - gen_op_load_T0_ir(irn); | |
| 130 | - break; | |
| 131 | - case 1: | |
| 132 | - gen_op_load_T1_ir(irn); | |
| 133 | - break; | |
| 134 | - case 2: | |
| 135 | - gen_op_load_T2_ir(irn); | |
| 136 | - break; | |
| 137 | - } | |
| 138 | -} | |
| 139 | - | |
| 140 | -static always_inline void gen_store_ir (DisasContext *ctx, int irn, int Tn) | |
| 141 | -{ | |
| 142 | - switch (Tn) { | |
| 143 | - case 0: | |
| 144 | - gen_op_store_T0_ir(irn); | |
| 145 | - break; | |
| 146 | - case 1: | |
| 147 | - gen_op_store_T1_ir(irn); | |
| 148 | - break; | |
| 149 | - case 2: | |
| 150 | - gen_op_store_T2_ir(irn); | |
| 151 | - break; | |
| 152 | - } | |
| 153 | -} | |
| 154 | - | |
| 155 | -static inline void get_ir (TCGv t, int reg) | |
| 156 | -{ | |
| 157 | - if (reg == 31) | |
| 158 | - tcg_gen_movi_i64(t, 0); | |
| 159 | - else | |
| 160 | - tcg_gen_mov_i64(t, cpu_ir[reg]); | |
| 161 | -} | |
| 162 | - | |
| 163 | -static inline void set_ir (TCGv t, int reg) | |
| 164 | -{ | |
| 165 | - if (reg != 31) | |
| 166 | - tcg_gen_mov_i64(cpu_ir[reg], t); | |
| 167 | -} | |
| 168 | - | |
| 169 | 131 | /* FIR moves */ |
| 170 | 132 | /* Special hacks for fir31 */ |
| 171 | 133 | #define gen_op_load_FT0_fir31 gen_op_reset_FT0 |
| ... | ... | @@ -280,120 +242,6 @@ GEN_ST(s); |
| 280 | 242 | GEN_LD(t); |
| 281 | 243 | GEN_ST(t); |
| 282 | 244 | |
| 283 | -#if defined(__i386__) || defined(__x86_64__) | |
| 284 | -static always_inline void gen_op_set_s16_T0 (int16_t imm) | |
| 285 | -{ | |
| 286 | - gen_op_set_s32_T0((int32_t)imm); | |
| 287 | -} | |
| 288 | - | |
| 289 | -static always_inline void gen_op_set_s16_T1 (int16_t imm) | |
| 290 | -{ | |
| 291 | - gen_op_set_s32_T1((int32_t)imm); | |
| 292 | -} | |
| 293 | - | |
| 294 | -static always_inline void gen_op_set_u16_T0 (uint16_t imm) | |
| 295 | -{ | |
| 296 | - gen_op_set_s32_T0((uint32_t)imm); | |
| 297 | -} | |
| 298 | - | |
| 299 | -static always_inline void gen_op_set_u16_T1 (uint16_t imm) | |
| 300 | -{ | |
| 301 | - gen_op_set_s32_T1((uint32_t)imm); | |
| 302 | -} | |
| 303 | -#endif | |
| 304 | - | |
| 305 | -static always_inline void gen_set_sT0 (DisasContext *ctx, int64_t imm) | |
| 306 | -{ | |
| 307 | - int32_t imm32; | |
| 308 | - int16_t imm16; | |
| 309 | - | |
| 310 | - imm32 = imm; | |
| 311 | - if (imm32 == imm) { | |
| 312 | - imm16 = imm; | |
| 313 | - if (imm16 == imm) { | |
| 314 | - if (imm == 0) { | |
| 315 | - gen_op_reset_T0(); | |
| 316 | - } else { | |
| 317 | - gen_op_set_s16_T0(imm16); | |
| 318 | - } | |
| 319 | - } else { | |
| 320 | - gen_op_set_s32_T0(imm32); | |
| 321 | - } | |
| 322 | - } else { | |
| 323 | -#if 0 // Qemu does not know how to do this... | |
| 324 | - gen_op_set_64_T0(imm); | |
| 325 | -#else | |
| 326 | - gen_op_set_64_T0(imm >> 32, imm); | |
| 327 | -#endif | |
| 328 | - } | |
| 329 | -} | |
| 330 | - | |
| 331 | -static always_inline void gen_set_sT1 (DisasContext *ctx, int64_t imm) | |
| 332 | -{ | |
| 333 | - int32_t imm32; | |
| 334 | - int16_t imm16; | |
| 335 | - | |
| 336 | - imm32 = imm; | |
| 337 | - if (imm32 == imm) { | |
| 338 | - imm16 = imm; | |
| 339 | - if (imm16 == imm) { | |
| 340 | - if (imm == 0) { | |
| 341 | - gen_op_reset_T1(); | |
| 342 | - } else { | |
| 343 | - gen_op_set_s16_T1(imm16); | |
| 344 | - } | |
| 345 | - } else { | |
| 346 | - gen_op_set_s32_T1(imm32); | |
| 347 | - } | |
| 348 | - } else { | |
| 349 | -#if 0 // Qemu does not know how to do this... | |
| 350 | - gen_op_set_64_T1(imm); | |
| 351 | -#else | |
| 352 | - gen_op_set_64_T1(imm >> 32, imm); | |
| 353 | -#endif | |
| 354 | - } | |
| 355 | -} | |
| 356 | - | |
| 357 | -static always_inline void gen_set_uT0 (DisasContext *ctx, uint64_t imm) | |
| 358 | -{ | |
| 359 | - if (!(imm >> 32)) { | |
| 360 | - if ((!imm >> 16)) { | |
| 361 | - if (imm == 0) | |
| 362 | - gen_op_reset_T0(); | |
| 363 | - else | |
| 364 | - gen_op_set_u16_T0(imm); | |
| 365 | - } else { | |
| 366 | - gen_op_set_u32_T0(imm); | |
| 367 | - } | |
| 368 | - } else { | |
| 369 | -#if 0 // Qemu does not know how to do this... | |
| 370 | - gen_op_set_64_T0(imm); | |
| 371 | -#else | |
| 372 | - gen_op_set_64_T0(imm >> 32, imm); | |
| 373 | -#endif | |
| 374 | - } | |
| 375 | -} | |
| 376 | - | |
| 377 | -static always_inline void gen_set_uT1 (DisasContext *ctx, uint64_t imm) | |
| 378 | -{ | |
| 379 | - if (!(imm >> 32)) { | |
| 380 | - if ((!imm >> 16)) { | |
| 381 | - if (imm == 0) | |
| 382 | - gen_op_reset_T1(); | |
| 383 | - else | |
| 384 | - gen_op_set_u16_T1(imm); | |
| 385 | - } else { | |
| 386 | - gen_op_set_u32_T1(imm); | |
| 387 | - } | |
| 388 | - } else { | |
| 389 | -#if 0 // Qemu does not know how to do this... | |
| 390 | - gen_op_set_64_T1(imm); | |
| 391 | -#else | |
| 392 | - gen_op_set_64_T1(imm >> 32, imm); | |
| 393 | -#endif | |
| 394 | - } | |
| 395 | -} | |
| 396 | - | |
| 397 | 245 | static always_inline void _gen_op_bcond (DisasContext *ctx) |
| 398 | 246 | { |
| 399 | 247 | #if 0 // Qemu does not know how to do this... |
| ... | ... | @@ -424,15 +272,19 @@ static always_inline void gen_load_mem (DisasContext *ctx, |
| 424 | 272 | /* UNOP */ |
| 425 | 273 | gen_op_nop(); |
| 426 | 274 | } else { |
| 427 | - gen_load_ir(ctx, rb, 0); | |
| 275 | + if (rb != 31) | |
| 276 | + tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); | |
| 277 | + else | |
| 278 | + tcg_gen_movi_i64(cpu_T[0], 0); | |
| 428 | 279 | if (disp16 != 0) { |
| 429 | - gen_set_sT1(ctx, disp16); | |
| 280 | + tcg_gen_movi_i64(cpu_T[1], disp16); | |
| 430 | 281 | gen_op_addq(); |
| 431 | 282 | } |
| 432 | 283 | if (clear) |
| 433 | 284 | gen_op_n7(); |
| 434 | 285 | (*gen_load_op)(ctx); |
| 435 | - gen_store_ir(ctx, ra, 1); | |
| 286 | + if (ra != 31) | |
| 287 | + tcg_gen_mov_i64(cpu_ir[ra], cpu_T[1]); | |
| 436 | 288 | } |
| 437 | 289 | } |
| 438 | 290 | |
| ... | ... | @@ -441,14 +293,20 @@ static always_inline void gen_store_mem (DisasContext *ctx, |
| 441 | 293 | int ra, int rb, int32_t disp16, |
| 442 | 294 | int clear) |
| 443 | 295 | { |
| 444 | - gen_load_ir(ctx, rb, 0); | |
| 296 | + if (rb != 31) | |
| 297 | + tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); | |
| 298 | + else | |
| 299 | + tcg_gen_movi_i64(cpu_T[0], 0); | |
| 445 | 300 | if (disp16 != 0) { |
| 446 | - gen_set_sT1(ctx, disp16); | |
| 301 | + tcg_gen_movi_i64(cpu_T[1], disp16); | |
| 447 | 302 | gen_op_addq(); |
| 448 | 303 | } |
| 449 | 304 | if (clear) |
| 450 | 305 | gen_op_n7(); |
| 451 | - gen_load_ir(ctx, ra, 1); | |
| 306 | + if (ra != 31) | |
| 307 | + tcg_gen_mov_i64(cpu_T[1], cpu_ir[ra]); | |
| 308 | + else | |
| 309 | + tcg_gen_movi_i64(cpu_T[1], 0); | |
| 452 | 310 | (*gen_store_op)(ctx); |
| 453 | 311 | } |
| 454 | 312 | |
| ... | ... | @@ -456,9 +314,12 @@ static always_inline void gen_load_fmem (DisasContext *ctx, |
| 456 | 314 | void (*gen_load_fop)(DisasContext *ctx), |
| 457 | 315 | int ra, int rb, int32_t disp16) |
| 458 | 316 | { |
| 459 | - gen_load_ir(ctx, rb, 0); | |
| 317 | + if (rb != 31) | |
| 318 | + tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); | |
| 319 | + else | |
| 320 | + tcg_gen_movi_i64(cpu_T[0], 0); | |
| 460 | 321 | if (disp16 != 0) { |
| 461 | - gen_set_sT1(ctx, disp16); | |
| 322 | + tcg_gen_movi_i64(cpu_T[1], disp16); | |
| 462 | 323 | gen_op_addq(); |
| 463 | 324 | } |
| 464 | 325 | (*gen_load_fop)(ctx); |
| ... | ... | @@ -469,9 +330,12 @@ static always_inline void gen_store_fmem (DisasContext *ctx, |
| 469 | 330 | void (*gen_store_fop)(DisasContext *ctx), |
| 470 | 331 | int ra, int rb, int32_t disp16) |
| 471 | 332 | { |
| 472 | - gen_load_ir(ctx, rb, 0); | |
| 333 | + if (rb != 31) | |
| 334 | + tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); | |
| 335 | + else | |
| 336 | + tcg_gen_movi_i64(cpu_T[0], 0); | |
| 473 | 337 | if (disp16 != 0) { |
| 474 | - gen_set_sT1(ctx, disp16); | |
| 338 | + tcg_gen_movi_i64(cpu_T[1], disp16); | |
| 475 | 339 | gen_op_addq(); |
| 476 | 340 | } |
| 477 | 341 | gen_load_fir(ctx, ra, 1); |
| ... | ... | @@ -483,13 +347,16 @@ static always_inline void gen_bcond (DisasContext *ctx, |
| 483 | 347 | int ra, int32_t disp16) |
| 484 | 348 | { |
| 485 | 349 | if (disp16 != 0) { |
| 486 | - gen_set_uT0(ctx, ctx->pc); | |
| 487 | - gen_set_sT1(ctx, disp16 << 2); | |
| 350 | + tcg_gen_movi_i64(cpu_T[0], ctx->pc); | |
| 351 | + tcg_gen_movi_i64(cpu_T[1], disp16 << 2); | |
| 488 | 352 | gen_op_addq1(); |
| 489 | 353 | } else { |
| 490 | - gen_set_uT1(ctx, ctx->pc); | |
| 354 | + tcg_gen_movi_i64(cpu_T[1], ctx->pc); | |
| 491 | 355 | } |
| 492 | - gen_load_ir(ctx, ra, 0); | |
| 356 | + if (ra != 31) | |
| 357 | + tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]); | |
| 358 | + else | |
| 359 | + tcg_gen_movi_i64(cpu_T[0], 0); | |
| 493 | 360 | (*gen_test_op)(); |
| 494 | 361 | _gen_op_bcond(ctx); |
| 495 | 362 | } |
| ... | ... | @@ -499,11 +366,11 @@ static always_inline void gen_fbcond (DisasContext *ctx, |
| 499 | 366 | int ra, int32_t disp16) |
| 500 | 367 | { |
| 501 | 368 | if (disp16 != 0) { |
| 502 | - gen_set_uT0(ctx, ctx->pc); | |
| 503 | - gen_set_sT1(ctx, disp16 << 2); | |
| 369 | + tcg_gen_movi_i64(cpu_T[0], ctx->pc); | |
| 370 | + tcg_gen_movi_i64(cpu_T[1], disp16 << 2); | |
| 504 | 371 | gen_op_addq1(); |
| 505 | 372 | } else { |
| 506 | - gen_set_uT1(ctx, ctx->pc); | |
| 373 | + tcg_gen_movi_i64(cpu_T[1], ctx->pc); | |
| 507 | 374 | } |
| 508 | 375 | gen_load_fir(ctx, ra, 0); |
| 509 | 376 | (*gen_test_op)(); |
| ... | ... | @@ -515,11 +382,14 @@ static always_inline void gen_arith2 (DisasContext *ctx, |
| 515 | 382 | int rb, int rc, int islit, int8_t lit) |
| 516 | 383 | { |
| 517 | 384 | if (islit) |
| 518 | - gen_set_sT0(ctx, lit); | |
| 385 | + tcg_gen_movi_i64(cpu_T[0], lit); | |
| 386 | + else if (rb != 31) | |
| 387 | + tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); | |
| 519 | 388 | else |
| 520 | - gen_load_ir(ctx, rb, 0); | |
| 389 | + tcg_gen_movi_i64(cpu_T[0], 0); | |
| 521 | 390 | (*gen_arith_op)(); |
| 522 | - gen_store_ir(ctx, rc, 0); | |
| 391 | + if (rc != 31) | |
| 392 | + tcg_gen_mov_i64(cpu_ir[rc], cpu_T[0]); | |
| 523 | 393 | } |
| 524 | 394 | |
| 525 | 395 | static always_inline void gen_arith3 (DisasContext *ctx, |
| ... | ... | @@ -527,13 +397,19 @@ static always_inline void gen_arith3 (DisasContext *ctx, |
| 527 | 397 | int ra, int rb, int rc, |
| 528 | 398 | int islit, int8_t lit) |
| 529 | 399 | { |
| 530 | - gen_load_ir(ctx, ra, 0); | |
| 400 | + if (ra != 31) | |
| 401 | + tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]); | |
| 402 | + else | |
| 403 | + tcg_gen_movi_i64(cpu_T[0], 0); | |
| 531 | 404 | if (islit) |
| 532 | - gen_set_sT1(ctx, lit); | |
| 405 | + tcg_gen_movi_i64(cpu_T[1], lit); | |
| 406 | + else if (rb != 31) | |
| 407 | + tcg_gen_mov_i64(cpu_T[1], cpu_ir[rb]); | |
| 533 | 408 | else |
| 534 | - gen_load_ir(ctx, rb, 1); | |
| 409 | + tcg_gen_movi_i64(cpu_T[1], 0); | |
| 535 | 410 | (*gen_arith_op)(); |
| 536 | - gen_store_ir(ctx, rc, 0); | |
| 411 | + if (rc != 31) | |
| 412 | + tcg_gen_mov_i64(cpu_ir[rc], cpu_T[0]); | |
| 537 | 413 | } |
| 538 | 414 | |
| 539 | 415 | static always_inline void gen_cmov (DisasContext *ctx, |
| ... | ... | @@ -541,11 +417,16 @@ static always_inline void gen_cmov (DisasContext *ctx, |
| 541 | 417 | int ra, int rb, int rc, |
| 542 | 418 | int islit, int8_t lit) |
| 543 | 419 | { |
| 544 | - gen_load_ir(ctx, ra, 1); | |
| 420 | + if (ra != 31) | |
| 421 | + tcg_gen_mov_i64(cpu_T[1], cpu_ir[ra]); | |
| 422 | + else | |
| 423 | + tcg_gen_movi_i64(cpu_T[1], 0); | |
| 545 | 424 | if (islit) |
| 546 | - gen_set_sT0(ctx, lit); | |
| 425 | + tcg_gen_movi_i64(cpu_T[0], lit); | |
| 426 | + else if (rb != 31) | |
| 427 | + tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); | |
| 547 | 428 | else |
| 548 | - gen_load_ir(ctx, rb, 0); | |
| 429 | + tcg_gen_movi_i64(cpu_T[0], 0); | |
| 549 | 430 | (*gen_test_op)(); |
| 550 | 431 | gen_op_cmov_ir(rc); |
| 551 | 432 | } |
| ... | ... | @@ -585,14 +466,18 @@ static always_inline void gen_fti (DisasContext *ctx, |
| 585 | 466 | { |
| 586 | 467 | gen_load_fir(ctx, rc, 0); |
| 587 | 468 | (*gen_move_fop)(); |
| 588 | - gen_store_ir(ctx, ra, 0); | |
| 469 | + if (ra != 31) | |
| 470 | + tcg_gen_mov_i64(cpu_ir[ra], cpu_T[0]); | |
| 589 | 471 | } |
| 590 | 472 | |
| 591 | 473 | static always_inline void gen_itf (DisasContext *ctx, |
| 592 | 474 | void (*gen_move_fop)(void), |
| 593 | 475 | int ra, int rc) |
| 594 | 476 | { |
| 595 | - gen_load_ir(ctx, ra, 0); | |
| 477 | + if (ra != 31) | |
| 478 | + tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]); | |
| 479 | + else | |
| 480 | + tcg_gen_movi_i64(cpu_T[0], 0); | |
| 596 | 481 | (*gen_move_fop)(); |
| 597 | 482 | gen_store_fir(ctx, rc, 0); |
| 598 | 483 | } |
| ... | ... | @@ -727,22 +612,20 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) |
| 727 | 612 | goto invalid_opc; |
| 728 | 613 | case 0x08: |
| 729 | 614 | /* LDA */ |
| 730 | - { | |
| 731 | - TCGv v = tcg_const_i64(disp16); | |
| 615 | + if (ra != 31) { | |
| 732 | 616 | if (rb != 31) |
| 733 | - tcg_gen_add_i64(v, cpu_ir[rb], v); | |
| 734 | - set_ir(v, ra); | |
| 735 | - tcg_temp_free(v); | |
| 617 | + tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16); | |
| 618 | + else | |
| 619 | + tcg_gen_movi_i64(cpu_ir[ra], disp16); | |
| 736 | 620 | } |
| 737 | 621 | break; |
| 738 | 622 | case 0x09: |
| 739 | 623 | /* LDAH */ |
| 740 | - { | |
| 741 | - TCGv v = tcg_const_i64(disp16 << 16); | |
| 624 | + if (ra != 31) { | |
| 742 | 625 | if (rb != 31) |
| 743 | - tcg_gen_add_i64(v, cpu_ir[rb], v); | |
| 744 | - set_ir(v, ra); | |
| 745 | - tcg_temp_free(v); | |
| 626 | + tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16); | |
| 627 | + else | |
| 628 | + tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16); | |
| 746 | 629 | } |
| 747 | 630 | break; |
| 748 | 631 | case 0x0A: |
| ... | ... | @@ -897,8 +780,12 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) |
| 897 | 780 | gen_op_nop(); |
| 898 | 781 | } else { |
| 899 | 782 | /* MOV */ |
| 900 | - gen_load_ir(ctx, rb, 0); | |
| 901 | - gen_store_ir(ctx, rc, 0); | |
| 783 | + if (rc != 31) { | |
| 784 | + if (rb != 31) | |
| 785 | + tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); | |
| 786 | + else | |
| 787 | + tcg_gen_movi_i64(cpu_ir[rc], 0); | |
| 788 | + } | |
| 902 | 789 | } |
| 903 | 790 | } else { |
| 904 | 791 | gen_arith3(ctx, &gen_op_bis, ra, rb, rc, islit, lit); |
| ... | ... | @@ -947,7 +834,8 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) |
| 947 | 834 | case 0x6C: |
| 948 | 835 | /* IMPLVER */ |
| 949 | 836 | gen_op_load_implver(); |
| 950 | - gen_store_ir(ctx, rc, 0); | |
| 837 | + if (rc != 31) | |
| 838 | + tcg_gen_mov_i64(cpu_ir[rc], cpu_T[0]); | |
| 951 | 839 | break; |
| 952 | 840 | default: |
| 953 | 841 | goto invalid_opc; |
| ... | ... | @@ -1413,12 +1301,14 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) |
| 1413 | 1301 | case 0xC000: |
| 1414 | 1302 | /* RPCC */ |
| 1415 | 1303 | gen_op_load_pcc(); |
| 1416 | - gen_store_ir(ctx, ra, 0); | |
| 1304 | + if (ra != 31) | |
| 1305 | + tcg_gen_mov_i64(cpu_ir[ra], cpu_T[0]); | |
| 1417 | 1306 | break; |
| 1418 | 1307 | case 0xE000: |
| 1419 | 1308 | /* RC */ |
| 1420 | 1309 | gen_op_load_irf(); |
| 1421 | - gen_store_ir(ctx, ra, 0); | |
| 1310 | + if (ra != 31) | |
| 1311 | + tcg_gen_mov_i64(cpu_ir[ra], cpu_T[0]); | |
| 1422 | 1312 | gen_op_clear_irf(); |
| 1423 | 1313 | break; |
| 1424 | 1314 | case 0xE800: |
| ... | ... | @@ -1433,7 +1323,8 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) |
| 1433 | 1323 | case 0xF000: |
| 1434 | 1324 | /* RS */ |
| 1435 | 1325 | gen_op_load_irf(); |
| 1436 | - gen_store_ir(ctx, ra, 0); | |
| 1326 | + if (ra != 31) | |
| 1327 | + tcg_gen_mov_i64(cpu_ir[ra], cpu_T[0]); | |
| 1437 | 1328 | gen_op_set_irf(); |
| 1438 | 1329 | break; |
| 1439 | 1330 | case 0xF800: |
| ... | ... | @@ -1452,16 +1343,17 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) |
| 1452 | 1343 | if (!ctx->pal_mode) |
| 1453 | 1344 | goto invalid_opc; |
| 1454 | 1345 | gen_op_mfpr(insn & 0xFF); |
| 1455 | - gen_store_ir(ctx, ra, 0); | |
| 1346 | + if (ra != 31) | |
| 1347 | + tcg_gen_mov_i64(cpu_ir[ra], cpu_T[0]); | |
| 1456 | 1348 | break; |
| 1457 | 1349 | #endif |
| 1458 | 1350 | case 0x1A: |
| 1459 | - gen_load_ir(ctx, rb, 0); | |
| 1460 | - if (ra != 31) { | |
| 1461 | - gen_set_uT1(ctx, ctx->pc); | |
| 1462 | - gen_store_ir(ctx, ra, 1); | |
| 1463 | - } | |
| 1464 | - gen_op_branch(); | |
| 1351 | + if (ra != 31) | |
| 1352 | + tcg_gen_movi_i64(cpu_ir[ra], ctx->pc); | |
| 1353 | + if (rb != 31) | |
| 1354 | + tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3); | |
| 1355 | + else | |
| 1356 | + tcg_gen_movi_i64(cpu_pc, 0); | |
| 1465 | 1357 | /* Those four jumps only differ by the branch prediction hint */ |
| 1466 | 1358 | switch (fn2) { |
| 1467 | 1359 | case 0x0: |
| ... | ... | @@ -1486,8 +1378,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) |
| 1486 | 1378 | #else |
| 1487 | 1379 | if (!ctx->pal_mode) |
| 1488 | 1380 | goto invalid_opc; |
| 1489 | - gen_load_ir(ctx, rb, 0); | |
| 1490 | - gen_set_sT1(ctx, disp12); | |
| 1381 | + if (rb != 31) | |
| 1382 | + tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); | |
| 1383 | + else | |
| 1384 | + tcg_gen_movi_i64(cpu_T[0], 0); | |
| 1385 | + tcg_gen_movi_i64(cpu_T[1], disp12); | |
| 1491 | 1386 | gen_op_addq(); |
| 1492 | 1387 | switch ((insn >> 12) & 0xF) { |
| 1493 | 1388 | case 0x0: |
| ... | ... | @@ -1569,7 +1464,8 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) |
| 1569 | 1464 | gen_op_restore_mode(); |
| 1570 | 1465 | break; |
| 1571 | 1466 | } |
| 1572 | - gen_store_ir(ctx, ra, 1); | |
| 1467 | + if (ra != 31) | |
| 1468 | + tcg_gen_mov_i64(cpu_ir[ra], cpu_T[1]); | |
| 1573 | 1469 | break; |
| 1574 | 1470 | #endif |
| 1575 | 1471 | case 0x1C: |
| ... | ... | @@ -1718,7 +1614,10 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) |
| 1718 | 1614 | #else |
| 1719 | 1615 | if (!ctx->pal_mode) |
| 1720 | 1616 | goto invalid_opc; |
| 1721 | - gen_load_ir(ctx, ra, 0); | |
| 1617 | + if (ra != 31) | |
| 1618 | + tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]); | |
| 1619 | + else | |
| 1620 | + tcg_gen_movi_i64(cpu_T[0], 0); | |
| 1722 | 1621 | gen_op_mtpr(insn & 0xFF); |
| 1723 | 1622 | ret = 2; |
| 1724 | 1623 | break; |
| ... | ... | @@ -1734,8 +1633,11 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) |
| 1734 | 1633 | /* "Old" alpha */ |
| 1735 | 1634 | gen_op_hw_rei(); |
| 1736 | 1635 | } else { |
| 1737 | - gen_load_ir(ctx, rb, 0); | |
| 1738 | - gen_set_uT1(ctx, (((int64_t)insn << 51) >> 51)); | |
| 1636 | + if (ra != 31) | |
| 1637 | + tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); | |
| 1638 | + else | |
| 1639 | + tcg_gen_movi_i64(cpu_T[0], 0); | |
| 1640 | + tcg_gen_movi_i64(cpu_T[1], (((int64_t)insn << 51) >> 51)); | |
| 1739 | 1641 | gen_op_addq(); |
| 1740 | 1642 | gen_op_hw_ret(); |
| 1741 | 1643 | } |
| ... | ... | @@ -1749,10 +1651,16 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) |
| 1749 | 1651 | #else |
| 1750 | 1652 | if (!ctx->pal_mode) |
| 1751 | 1653 | goto invalid_opc; |
| 1752 | - gen_load_ir(ctx, rb, 0); | |
| 1753 | - gen_set_sT1(ctx, disp12); | |
| 1654 | + if (ra != 31) | |
| 1655 | + tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); | |
| 1656 | + else | |
| 1657 | + tcg_gen_movi_i64(cpu_T[0], 0); | |
| 1658 | + tcg_gen_movi_i64(cpu_T[1], disp12); | |
| 1754 | 1659 | gen_op_addq(); |
| 1755 | - gen_load_ir(ctx, ra, 1); | |
| 1660 | + if (ra != 31) | |
| 1661 | + tcg_gen_mov_i64(cpu_T[1], cpu_ir[ra]); | |
| 1662 | + else | |
| 1663 | + tcg_gen_movi_i64(cpu_T[1], 0); | |
| 1756 | 1664 | switch ((insn >> 12) & 0xF) { |
| 1757 | 1665 | case 0x0: |
| 1758 | 1666 | /* Longword physical access */ |
| ... | ... | @@ -1904,12 +1812,9 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) |
| 1904 | 1812 | break; |
| 1905 | 1813 | case 0x30: |
| 1906 | 1814 | /* BR */ |
| 1907 | - if (ra != 31) { | |
| 1908 | - TCGv t = tcg_const_i64(ctx->pc); | |
| 1909 | - set_ir(t, ra); | |
| 1910 | - tcg_temp_free(t); | |
| 1911 | - } | |
| 1912 | - tcg_gen_movi_i64(cpu_pc, ctx->pc + (disp21 << 2)); | |
| 1815 | + if (ra != 31) | |
| 1816 | + tcg_gen_movi_i64(cpu_ir[ra], ctx->pc); | |
| 1817 | + tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2)); | |
| 1913 | 1818 | ret = 1; |
| 1914 | 1819 | break; |
| 1915 | 1820 | case 0x31: |
| ... | ... | @@ -1929,13 +1834,9 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) |
| 1929 | 1834 | break; |
| 1930 | 1835 | case 0x34: |
| 1931 | 1836 | /* BSR */ |
| 1932 | - gen_set_uT0(ctx, ctx->pc); | |
| 1933 | - gen_store_ir(ctx, ra, 0); | |
| 1934 | - if (disp21 != 0) { | |
| 1935 | - gen_set_sT1(ctx, disp21 << 2); | |
| 1936 | - gen_op_addq(); | |
| 1937 | - } | |
| 1938 | - gen_op_branch(); | |
| 1837 | + if (ra != 31) | |
| 1838 | + tcg_gen_movi_i64(cpu_ir[ra], ctx->pc); | |
| 1839 | + tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2)); | |
| 1939 | 1840 | ret = 1; |
| 1940 | 1841 | break; |
| 1941 | 1842 | case 0x35: | ... | ... |