Commit 4f821e17574896c046c11bc073c1dc636a6f15d6

Authored by aurel32
1 parent 3761035f

alpha: convert a few more instructions to TCG

Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5152 c046a42c-6fe2-441c-8c8c-71466251a162
target-alpha/op.c
@@ -148,13 +148,6 @@ void OPPROTO op_no_op (void) @@ -148,13 +148,6 @@ void OPPROTO op_no_op (void)
148 #include "op_mem.h" 148 #include "op_mem.h"
149 #endif 149 #endif
150 150
151 -/* Special operation for load and store */  
152 -void OPPROTO op_n7 (void)  
153 -{  
154 - T0 &= ~(uint64_t)0x7;  
155 - RETURN();  
156 -}  
157 -  
158 /* Misc */ 151 /* Misc */
159 void OPPROTO op_excp (void) 152 void OPPROTO op_excp (void)
160 { 153 {
@@ -259,18 +252,6 @@ void OPPROTO op_sublv (void) @@ -259,18 +252,6 @@ void OPPROTO op_sublv (void)
259 RETURN(); 252 RETURN();
260 } 253 }
261 254
262 -void OPPROTO op_s4 (void)  
263 -{  
264 - T0 <<= 2;  
265 - RETURN();  
266 -}  
267 -  
268 -void OPPROTO op_s8 (void)  
269 -{  
270 - T0 <<= 3;  
271 - RETURN();  
272 -}  
273 -  
274 void OPPROTO op_mull (void) 255 void OPPROTO op_mull (void)
275 { 256 {
276 T0 = (int64_t)((int32_t)T0 * (int32_t)T1); 257 T0 = (int64_t)((int32_t)T0 * (int32_t)T1);
@@ -646,19 +627,6 @@ void OPPROTO op_cmplbc (void) @@ -646,19 +627,6 @@ void OPPROTO op_cmplbc (void)
646 RETURN(); 627 RETURN();
647 } 628 }
648 629
649 -/* Branches */  
650 -void OPPROTO op_branch (void)  
651 -{  
652 - env->pc = T0 & ~3;  
653 - RETURN();  
654 -}  
655 -  
656 -void OPPROTO op_addq1 (void)  
657 -{  
658 - T1 += T0;  
659 - RETURN();  
660 -}  
661 -  
662 #if 0 // Qemu does not know how to do this... 630 #if 0 // Qemu does not know how to do this...
663 void OPPROTO op_bcond (void) 631 void OPPROTO op_bcond (void)
664 { 632 {
target-alpha/translate.c
@@ -273,15 +273,11 @@ static always_inline void gen_load_mem (DisasContext *ctx, @@ -273,15 +273,11 @@ static always_inline void gen_load_mem (DisasContext *ctx,
273 gen_op_nop(); 273 gen_op_nop();
274 } else { 274 } else {
275 if (rb != 31) 275 if (rb != 31)
276 - tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); 276 + tcg_gen_addi_i64(cpu_T[0], cpu_ir[rb], disp16);
277 else 277 else
278 - tcg_gen_movi_i64(cpu_T[0], 0);  
279 - if (disp16 != 0) {  
280 - tcg_gen_movi_i64(cpu_T[1], disp16);  
281 - gen_op_addq();  
282 - } 278 + tcg_gen_movi_i64(cpu_T[0], disp16);
283 if (clear) 279 if (clear)
284 - gen_op_n7(); 280 + tcg_gen_andi_i64(cpu_T[0], cpu_T[0], ~0x7);
285 (*gen_load_op)(ctx); 281 (*gen_load_op)(ctx);
286 if (ra != 31) 282 if (ra != 31)
287 tcg_gen_mov_i64(cpu_ir[ra], cpu_T[1]); 283 tcg_gen_mov_i64(cpu_ir[ra], cpu_T[1]);
@@ -294,15 +290,11 @@ static always_inline void gen_store_mem (DisasContext *ctx, @@ -294,15 +290,11 @@ static always_inline void gen_store_mem (DisasContext *ctx,
294 int clear) 290 int clear)
295 { 291 {
296 if (rb != 31) 292 if (rb != 31)
297 - tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); 293 + tcg_gen_addi_i64(cpu_T[0], cpu_ir[rb], disp16);
298 else 294 else
299 - tcg_gen_movi_i64(cpu_T[0], 0);  
300 - if (disp16 != 0) {  
301 - tcg_gen_movi_i64(cpu_T[1], disp16);  
302 - gen_op_addq();  
303 - } 295 + tcg_gen_movi_i64(cpu_T[0], disp16);
304 if (clear) 296 if (clear)
305 - gen_op_n7(); 297 + tcg_gen_andi_i64(cpu_T[0], cpu_T[0], ~0x7);
306 if (ra != 31) 298 if (ra != 31)
307 tcg_gen_mov_i64(cpu_T[1], cpu_ir[ra]); 299 tcg_gen_mov_i64(cpu_T[1], cpu_ir[ra]);
308 else 300 else
@@ -315,13 +307,9 @@ static always_inline void gen_load_fmem (DisasContext *ctx, @@ -315,13 +307,9 @@ static always_inline void gen_load_fmem (DisasContext *ctx,
315 int ra, int rb, int32_t disp16) 307 int ra, int rb, int32_t disp16)
316 { 308 {
317 if (rb != 31) 309 if (rb != 31)
318 - tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); 310 + tcg_gen_addi_i64(cpu_T[0], cpu_ir[rb], disp16);
319 else 311 else
320 - tcg_gen_movi_i64(cpu_T[0], 0);  
321 - if (disp16 != 0) {  
322 - tcg_gen_movi_i64(cpu_T[1], disp16);  
323 - gen_op_addq();  
324 - } 312 + tcg_gen_movi_i64(cpu_T[0], disp16);
325 (*gen_load_fop)(ctx); 313 (*gen_load_fop)(ctx);
326 gen_store_fir(ctx, ra, 1); 314 gen_store_fir(ctx, ra, 1);
327 } 315 }
@@ -331,13 +319,9 @@ static always_inline void gen_store_fmem (DisasContext *ctx, @@ -331,13 +319,9 @@ static always_inline void gen_store_fmem (DisasContext *ctx,
331 int ra, int rb, int32_t disp16) 319 int ra, int rb, int32_t disp16)
332 { 320 {
333 if (rb != 31) 321 if (rb != 31)
334 - tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); 322 + tcg_gen_addi_i64(cpu_T[0], cpu_ir[rb], disp16);
335 else 323 else
336 - tcg_gen_movi_i64(cpu_T[0], 0);  
337 - if (disp16 != 0) {  
338 - tcg_gen_movi_i64(cpu_T[1], disp16);  
339 - gen_op_addq();  
340 - } 324 + tcg_gen_movi_i64(cpu_T[0], disp16);
341 gen_load_fir(ctx, ra, 1); 325 gen_load_fir(ctx, ra, 1);
342 (*gen_store_fop)(ctx); 326 (*gen_store_fop)(ctx);
343 } 327 }
@@ -346,13 +330,7 @@ static always_inline void gen_bcond (DisasContext *ctx, @@ -346,13 +330,7 @@ static always_inline void gen_bcond (DisasContext *ctx,
346 void (*gen_test_op)(void), 330 void (*gen_test_op)(void),
347 int ra, int32_t disp16) 331 int ra, int32_t disp16)
348 { 332 {
349 - if (disp16 != 0) {  
350 - tcg_gen_movi_i64(cpu_T[0], ctx->pc);  
351 - tcg_gen_movi_i64(cpu_T[1], disp16 << 2);  
352 - gen_op_addq1();  
353 - } else {  
354 - tcg_gen_movi_i64(cpu_T[1], ctx->pc);  
355 - } 333 + tcg_gen_movi_i64(cpu_T[1], ctx->pc + (int64_t)(disp16 << 2));
356 if (ra != 31) 334 if (ra != 31)
357 tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]); 335 tcg_gen_mov_i64(cpu_T[0], cpu_ir[ra]);
358 else 336 else
@@ -365,13 +343,7 @@ static always_inline void gen_fbcond (DisasContext *ctx, @@ -365,13 +343,7 @@ static always_inline void gen_fbcond (DisasContext *ctx,
365 void (*gen_test_op)(void), 343 void (*gen_test_op)(void),
366 int ra, int32_t disp16) 344 int ra, int32_t disp16)
367 { 345 {
368 - if (disp16 != 0) {  
369 - tcg_gen_movi_i64(cpu_T[0], ctx->pc);  
370 - tcg_gen_movi_i64(cpu_T[1], disp16 << 2);  
371 - gen_op_addq1();  
372 - } else {  
373 - tcg_gen_movi_i64(cpu_T[1], ctx->pc);  
374 - } 346 + tcg_gen_movi_i64(cpu_T[1], ctx->pc + (int64_t)(disp16 << 2));
375 gen_load_fir(ctx, ra, 0); 347 gen_load_fir(ctx, ra, 0);
376 (*gen_test_op)(); 348 (*gen_test_op)();
377 _gen_op_bcond(ctx); 349 _gen_op_bcond(ctx);
@@ -484,50 +456,50 @@ static always_inline void gen_itf (DisasContext *ctx, @@ -484,50 +456,50 @@ static always_inline void gen_itf (DisasContext *ctx,
484 456
485 static always_inline void gen_s4addl (void) 457 static always_inline void gen_s4addl (void)
486 { 458 {
487 - gen_op_s4(); 459 + tcg_gen_shli_i64(cpu_T[0], cpu_T[0], 2);
488 gen_op_addl(); 460 gen_op_addl();
489 } 461 }
490 462
491 static always_inline void gen_s4subl (void) 463 static always_inline void gen_s4subl (void)
492 { 464 {
493 - gen_op_s4(); 465 + tcg_gen_shli_i64(cpu_T[0], cpu_T[0], 2);
494 gen_op_subl(); 466 gen_op_subl();
495 } 467 }
496 468
497 static always_inline void gen_s8addl (void) 469 static always_inline void gen_s8addl (void)
498 { 470 {
499 - gen_op_s8(); 471 + tcg_gen_shli_i64(cpu_T[0], cpu_T[0], 3);
500 gen_op_addl(); 472 gen_op_addl();
501 } 473 }
502 474
503 static always_inline void gen_s8subl (void) 475 static always_inline void gen_s8subl (void)
504 { 476 {
505 - gen_op_s8(); 477 + tcg_gen_shli_i64(cpu_T[0], cpu_T[0], 3);
506 gen_op_subl(); 478 gen_op_subl();
507 } 479 }
508 480
509 static always_inline void gen_s4addq (void) 481 static always_inline void gen_s4addq (void)
510 { 482 {
511 - gen_op_s4();  
512 - gen_op_addq(); 483 + tcg_gen_shli_i64(cpu_T[0], cpu_T[0], 2);
  484 + tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
513 } 485 }
514 486
515 static always_inline void gen_s4subq (void) 487 static always_inline void gen_s4subq (void)
516 { 488 {
517 - gen_op_s4();  
518 - gen_op_subq(); 489 + tcg_gen_shli_i64(cpu_T[0], cpu_T[0], 2);
  490 + tcg_gen_sub_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
519 } 491 }
520 492
521 static always_inline void gen_s8addq (void) 493 static always_inline void gen_s8addq (void)
522 { 494 {
523 - gen_op_s8();  
524 - gen_op_addq(); 495 + tcg_gen_shli_i64(cpu_T[0], cpu_T[0], 3);
  496 + tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
525 } 497 }
526 498
527 static always_inline void gen_s8subq (void) 499 static always_inline void gen_s8subq (void)
528 { 500 {
529 - gen_op_s8();  
530 - gen_op_subq(); 501 + tcg_gen_shli_i64(cpu_T[0], cpu_T[0], 3);
  502 + tcg_gen_sub_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
531 } 503 }
532 504
533 static always_inline void gen_amask (void) 505 static always_inline void gen_amask (void)
@@ -1383,7 +1355,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) @@ -1383,7 +1355,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
1383 else 1355 else
1384 tcg_gen_movi_i64(cpu_T[0], 0); 1356 tcg_gen_movi_i64(cpu_T[0], 0);
1385 tcg_gen_movi_i64(cpu_T[1], disp12); 1357 tcg_gen_movi_i64(cpu_T[1], disp12);
1386 - gen_op_addq(); 1358 + tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1387 switch ((insn >> 12) & 0xF) { 1359 switch ((insn >> 12) & 0xF) {
1388 case 0x0: 1360 case 0x0:
1389 /* Longword physical access */ 1361 /* Longword physical access */
@@ -1638,7 +1610,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) @@ -1638,7 +1610,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
1638 else 1610 else
1639 tcg_gen_movi_i64(cpu_T[0], 0); 1611 tcg_gen_movi_i64(cpu_T[0], 0);
1640 tcg_gen_movi_i64(cpu_T[1], (((int64_t)insn << 51) >> 51)); 1612 tcg_gen_movi_i64(cpu_T[1], (((int64_t)insn << 51) >> 51));
1641 - gen_op_addq(); 1613 + tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1642 gen_op_hw_ret(); 1614 gen_op_hw_ret();
1643 } 1615 }
1644 ret = 2; 1616 ret = 2;
@@ -1652,11 +1624,9 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn) @@ -1652,11 +1624,9 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
1652 if (!ctx->pal_mode) 1624 if (!ctx->pal_mode)
1653 goto invalid_opc; 1625 goto invalid_opc;
1654 if (ra != 31) 1626 if (ra != 31)
1655 - tcg_gen_mov_i64(cpu_T[0], cpu_ir[rb]); 1627 + tcg_gen_addi_i64(cpu_T[0], cpu_ir[rb], disp12);
1656 else 1628 else
1657 - tcg_gen_movi_i64(cpu_T[0], 0);  
1658 - tcg_gen_movi_i64(cpu_T[1], disp12);  
1659 - gen_op_addq(); 1629 + tcg_gen_movi_i64(cpu_T[0], disp12);
1660 if (ra != 31) 1630 if (ra != 31)
1661 tcg_gen_mov_i64(cpu_T[1], cpu_ir[ra]); 1631 tcg_gen_mov_i64(cpu_T[1], cpu_ir[ra]);
1662 else 1632 else