Commit 74637406444c9276ecc26de970ff9018fbfc8735
1 parent
8d71247e
target-ppc: convert arithmetic functions to TCG
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5590 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
5 changed files
with
542 additions
and
1013 deletions
target-ppc/helper.h
| ... | ... | @@ -8,6 +8,12 @@ DEF_HELPER(uint32_t, helper_fcmpu, (void)) |
| 8 | 8 | DEF_HELPER(uint32_t, helper_load_cr, (void)) |
| 9 | 9 | DEF_HELPER(void, helper_store_cr, (target_ulong, uint32_t)) |
| 10 | 10 | |
| 11 | +#if defined(TARGET_PPC64) | |
| 12 | +DEF_HELPER(uint64_t, helper_mulhd, (uint64_t, uint64_t)) | |
| 13 | +DEF_HELPER(uint64_t, helper_mulhdu, (uint64_t, uint64_t)) | |
| 14 | +DEF_HELPER(uint64_t, helper_mulldo, (uint64_t, uint64_t)) | |
| 15 | +#endif | |
| 16 | + | |
| 11 | 17 | DEF_HELPER(target_ulong, helper_cntlzw, (target_ulong t)) |
| 12 | 18 | DEF_HELPER(target_ulong, helper_popcntb, (target_ulong val)) |
| 13 | 19 | DEF_HELPER(target_ulong, helper_sraw, (target_ulong, target_ulong)) | ... | ... |
target-ppc/op.c
| ... | ... | @@ -354,404 +354,6 @@ void OPPROTO op_check_addo_64 (void) |
| 354 | 354 | } |
| 355 | 355 | #endif |
| 356 | 356 | |
| 357 | -/* add carrying */ | |
| 358 | -void OPPROTO op_check_addc (void) | |
| 359 | -{ | |
| 360 | - if (likely((uint32_t)T0 >= (uint32_t)T2)) { | |
| 361 | - env->xer &= ~(1 << XER_CA); | |
| 362 | - } else { | |
| 363 | - env->xer |= (1 << XER_CA); | |
| 364 | - } | |
| 365 | - RETURN(); | |
| 366 | -} | |
| 367 | - | |
| 368 | -#if defined(TARGET_PPC64) | |
| 369 | -void OPPROTO op_check_addc_64 (void) | |
| 370 | -{ | |
| 371 | - if (likely((uint64_t)T0 >= (uint64_t)T2)) { | |
| 372 | - env->xer &= ~(1 << XER_CA); | |
| 373 | - } else { | |
| 374 | - env->xer |= (1 << XER_CA); | |
| 375 | - } | |
| 376 | - RETURN(); | |
| 377 | -} | |
| 378 | -#endif | |
| 379 | - | |
| 380 | -/* add extended */ | |
| 381 | -void OPPROTO op_adde (void) | |
| 382 | -{ | |
| 383 | - do_adde(); | |
| 384 | - RETURN(); | |
| 385 | -} | |
| 386 | - | |
| 387 | -#if defined(TARGET_PPC64) | |
| 388 | -void OPPROTO op_adde_64 (void) | |
| 389 | -{ | |
| 390 | - do_adde_64(); | |
| 391 | - RETURN(); | |
| 392 | -} | |
| 393 | -#endif | |
| 394 | - | |
| 395 | -/* add to minus one extended */ | |
| 396 | -void OPPROTO op_add_me (void) | |
| 397 | -{ | |
| 398 | - T0 += xer_ca + (-1); | |
| 399 | - if (likely((uint32_t)T1 != 0)) | |
| 400 | - env->xer |= (1 << XER_CA); | |
| 401 | - RETURN(); | |
| 402 | -} | |
| 403 | - | |
| 404 | -#if defined(TARGET_PPC64) | |
| 405 | -void OPPROTO op_add_me_64 (void) | |
| 406 | -{ | |
| 407 | - T0 += xer_ca + (-1); | |
| 408 | - if (likely((uint64_t)T1 != 0)) | |
| 409 | - env->xer |= (1 << XER_CA); | |
| 410 | - RETURN(); | |
| 411 | -} | |
| 412 | -#endif | |
| 413 | - | |
| 414 | -void OPPROTO op_addmeo (void) | |
| 415 | -{ | |
| 416 | - do_addmeo(); | |
| 417 | - RETURN(); | |
| 418 | -} | |
| 419 | - | |
| 420 | -void OPPROTO op_addmeo_64 (void) | |
| 421 | -{ | |
| 422 | - do_addmeo(); | |
| 423 | - RETURN(); | |
| 424 | -} | |
| 425 | - | |
| 426 | -/* add to zero extended */ | |
| 427 | -void OPPROTO op_add_ze (void) | |
| 428 | -{ | |
| 429 | - T0 += xer_ca; | |
| 430 | - RETURN(); | |
| 431 | -} | |
| 432 | - | |
| 433 | -/* divide word */ | |
| 434 | -void OPPROTO op_divw (void) | |
| 435 | -{ | |
| 436 | - if (unlikely(((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) || | |
| 437 | - (int32_t)T1 == 0)) { | |
| 438 | - T0 = (int32_t)(UINT32_MAX * ((uint32_t)T0 >> 31)); | |
| 439 | - } else { | |
| 440 | - T0 = (int32_t)T0 / (int32_t)T1; | |
| 441 | - } | |
| 442 | - RETURN(); | |
| 443 | -} | |
| 444 | - | |
| 445 | -#if defined(TARGET_PPC64) | |
| 446 | -void OPPROTO op_divd (void) | |
| 447 | -{ | |
| 448 | - if (unlikely(((int64_t)T0 == INT64_MIN && (int64_t)T1 == (int64_t)-1LL) || | |
| 449 | - (int64_t)T1 == 0)) { | |
| 450 | - T0 = (int64_t)(UINT64_MAX * ((uint64_t)T0 >> 63)); | |
| 451 | - } else { | |
| 452 | - T0 = (int64_t)T0 / (int64_t)T1; | |
| 453 | - } | |
| 454 | - RETURN(); | |
| 455 | -} | |
| 456 | -#endif | |
| 457 | - | |
| 458 | -void OPPROTO op_divwo (void) | |
| 459 | -{ | |
| 460 | - do_divwo(); | |
| 461 | - RETURN(); | |
| 462 | -} | |
| 463 | - | |
| 464 | -#if defined(TARGET_PPC64) | |
| 465 | -void OPPROTO op_divdo (void) | |
| 466 | -{ | |
| 467 | - do_divdo(); | |
| 468 | - RETURN(); | |
| 469 | -} | |
| 470 | -#endif | |
| 471 | - | |
| 472 | -/* divide word unsigned */ | |
| 473 | -void OPPROTO op_divwu (void) | |
| 474 | -{ | |
| 475 | - if (unlikely(T1 == 0)) { | |
| 476 | - T0 = 0; | |
| 477 | - } else { | |
| 478 | - T0 = (uint32_t)T0 / (uint32_t)T1; | |
| 479 | - } | |
| 480 | - RETURN(); | |
| 481 | -} | |
| 482 | - | |
| 483 | -#if defined(TARGET_PPC64) | |
| 484 | -void OPPROTO op_divdu (void) | |
| 485 | -{ | |
| 486 | - if (unlikely(T1 == 0)) { | |
| 487 | - T0 = 0; | |
| 488 | - } else { | |
| 489 | - T0 /= T1; | |
| 490 | - } | |
| 491 | - RETURN(); | |
| 492 | -} | |
| 493 | -#endif | |
| 494 | - | |
| 495 | -void OPPROTO op_divwuo (void) | |
| 496 | -{ | |
| 497 | - do_divwuo(); | |
| 498 | - RETURN(); | |
| 499 | -} | |
| 500 | - | |
| 501 | -#if defined(TARGET_PPC64) | |
| 502 | -void OPPROTO op_divduo (void) | |
| 503 | -{ | |
| 504 | - do_divduo(); | |
| 505 | - RETURN(); | |
| 506 | -} | |
| 507 | -#endif | |
| 508 | - | |
| 509 | -/* multiply high word */ | |
| 510 | -void OPPROTO op_mulhw (void) | |
| 511 | -{ | |
| 512 | - T0 = ((int64_t)((int32_t)T0) * (int64_t)((int32_t)T1)) >> 32; | |
| 513 | - RETURN(); | |
| 514 | -} | |
| 515 | - | |
| 516 | -#if defined(TARGET_PPC64) | |
| 517 | -void OPPROTO op_mulhd (void) | |
| 518 | -{ | |
| 519 | - uint64_t tl, th; | |
| 520 | - | |
| 521 | - muls64(&tl, &th, T0, T1); | |
| 522 | - T0 = th; | |
| 523 | - RETURN(); | |
| 524 | -} | |
| 525 | -#endif | |
| 526 | - | |
| 527 | -/* multiply high word unsigned */ | |
| 528 | -void OPPROTO op_mulhwu (void) | |
| 529 | -{ | |
| 530 | - T0 = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1) >> 32; | |
| 531 | - RETURN(); | |
| 532 | -} | |
| 533 | - | |
| 534 | -#if defined(TARGET_PPC64) | |
| 535 | -void OPPROTO op_mulhdu (void) | |
| 536 | -{ | |
| 537 | - uint64_t tl, th; | |
| 538 | - | |
| 539 | - mulu64(&tl, &th, T0, T1); | |
| 540 | - T0 = th; | |
| 541 | - RETURN(); | |
| 542 | -} | |
| 543 | -#endif | |
| 544 | - | |
| 545 | -/* multiply low immediate */ | |
| 546 | -void OPPROTO op_mulli (void) | |
| 547 | -{ | |
| 548 | - T0 = ((int32_t)T0 * (int32_t)PARAM1); | |
| 549 | - RETURN(); | |
| 550 | -} | |
| 551 | - | |
| 552 | -/* multiply low word */ | |
| 553 | -void OPPROTO op_mullw (void) | |
| 554 | -{ | |
| 555 | -#if defined(TARGET_PPC64) | |
| 556 | - T0 = (int64_t)(int32_t)T0 * (int64_t)(int32_t)T1; | |
| 557 | -#else | |
| 558 | - T0 = (int32_t)(T0 * T1); | |
| 559 | -#endif | |
| 560 | - RETURN(); | |
| 561 | -} | |
| 562 | - | |
| 563 | -#if defined(TARGET_PPC64) | |
| 564 | -void OPPROTO op_mulld (void) | |
| 565 | -{ | |
| 566 | - T0 *= T1; | |
| 567 | - RETURN(); | |
| 568 | -} | |
| 569 | -#endif | |
| 570 | - | |
| 571 | -void OPPROTO op_mullwo (void) | |
| 572 | -{ | |
| 573 | - do_mullwo(); | |
| 574 | - RETURN(); | |
| 575 | -} | |
| 576 | - | |
| 577 | -#if defined(TARGET_PPC64) | |
| 578 | -void OPPROTO op_mulldo (void) | |
| 579 | -{ | |
| 580 | - do_mulldo(); | |
| 581 | - RETURN(); | |
| 582 | -} | |
| 583 | -#endif | |
| 584 | - | |
| 585 | -/* negate */ | |
| 586 | -void OPPROTO op_neg (void) | |
| 587 | -{ | |
| 588 | - if (likely(T0 != INT32_MIN)) { | |
| 589 | - T0 = -(int32_t)T0; | |
| 590 | - } | |
| 591 | - RETURN(); | |
| 592 | -} | |
| 593 | - | |
| 594 | -#if defined(TARGET_PPC64) | |
| 595 | -void OPPROTO op_neg_64 (void) | |
| 596 | -{ | |
| 597 | - if (likely(T0 != INT64_MIN)) { | |
| 598 | - T0 = -(int64_t)T0; | |
| 599 | - } | |
| 600 | - RETURN(); | |
| 601 | -} | |
| 602 | -#endif | |
| 603 | - | |
| 604 | -void OPPROTO op_nego (void) | |
| 605 | -{ | |
| 606 | - do_nego(); | |
| 607 | - RETURN(); | |
| 608 | -} | |
| 609 | - | |
| 610 | -#if defined(TARGET_PPC64) | |
| 611 | -void OPPROTO op_nego_64 (void) | |
| 612 | -{ | |
| 613 | - do_nego_64(); | |
| 614 | - RETURN(); | |
| 615 | -} | |
| 616 | -#endif | |
| 617 | - | |
| 618 | -/* subtract from carrying */ | |
| 619 | -void OPPROTO op_check_subfc (void) | |
| 620 | -{ | |
| 621 | - if (likely((uint32_t)T0 > (uint32_t)T1)) { | |
| 622 | - env->xer &= ~(1 << XER_CA); | |
| 623 | - } else { | |
| 624 | - env->xer |= (1 << XER_CA); | |
| 625 | - } | |
| 626 | - RETURN(); | |
| 627 | -} | |
| 628 | - | |
| 629 | -#if defined(TARGET_PPC64) | |
| 630 | -void OPPROTO op_check_subfc_64 (void) | |
| 631 | -{ | |
| 632 | - if (likely((uint64_t)T0 > (uint64_t)T1)) { | |
| 633 | - env->xer &= ~(1 << XER_CA); | |
| 634 | - } else { | |
| 635 | - env->xer |= (1 << XER_CA); | |
| 636 | - } | |
| 637 | - RETURN(); | |
| 638 | -} | |
| 639 | -#endif | |
| 640 | - | |
| 641 | -/* subtract from extended */ | |
| 642 | -void OPPROTO op_subfe (void) | |
| 643 | -{ | |
| 644 | - do_subfe(); | |
| 645 | - RETURN(); | |
| 646 | -} | |
| 647 | - | |
| 648 | -#if defined(TARGET_PPC64) | |
| 649 | -void OPPROTO op_subfe_64 (void) | |
| 650 | -{ | |
| 651 | - do_subfe_64(); | |
| 652 | - RETURN(); | |
| 653 | -} | |
| 654 | -#endif | |
| 655 | - | |
| 656 | -/* subtract from immediate carrying */ | |
| 657 | -void OPPROTO op_subfic (void) | |
| 658 | -{ | |
| 659 | - T0 = (int32_t)PARAM1 + ~T0 + 1; | |
| 660 | - if ((uint32_t)T0 <= (uint32_t)PARAM1) { | |
| 661 | - env->xer |= (1 << XER_CA); | |
| 662 | - } else { | |
| 663 | - env->xer &= ~(1 << XER_CA); | |
| 664 | - } | |
| 665 | - RETURN(); | |
| 666 | -} | |
| 667 | - | |
| 668 | -#if defined(TARGET_PPC64) | |
| 669 | -void OPPROTO op_subfic_64 (void) | |
| 670 | -{ | |
| 671 | - T0 = (int64_t)PARAM1 + ~T0 + 1; | |
| 672 | - if ((uint64_t)T0 <= (uint64_t)PARAM1) { | |
| 673 | - env->xer |= (1 << XER_CA); | |
| 674 | - } else { | |
| 675 | - env->xer &= ~(1 << XER_CA); | |
| 676 | - } | |
| 677 | - RETURN(); | |
| 678 | -} | |
| 679 | -#endif | |
| 680 | - | |
| 681 | -/* subtract from minus one extended */ | |
| 682 | -void OPPROTO op_subfme (void) | |
| 683 | -{ | |
| 684 | - T0 = ~T0 + xer_ca - 1; | |
| 685 | - if (likely((uint32_t)T0 != UINT32_MAX)) | |
| 686 | - env->xer |= (1 << XER_CA); | |
| 687 | - RETURN(); | |
| 688 | -} | |
| 689 | - | |
| 690 | -#if defined(TARGET_PPC64) | |
| 691 | -void OPPROTO op_subfme_64 (void) | |
| 692 | -{ | |
| 693 | - T0 = ~T0 + xer_ca - 1; | |
| 694 | - if (likely((uint64_t)T0 != UINT64_MAX)) | |
| 695 | - env->xer |= (1 << XER_CA); | |
| 696 | - RETURN(); | |
| 697 | -} | |
| 698 | -#endif | |
| 699 | - | |
| 700 | -void OPPROTO op_subfmeo (void) | |
| 701 | -{ | |
| 702 | - do_subfmeo(); | |
| 703 | - RETURN(); | |
| 704 | -} | |
| 705 | - | |
| 706 | -#if defined(TARGET_PPC64) | |
| 707 | -void OPPROTO op_subfmeo_64 (void) | |
| 708 | -{ | |
| 709 | - do_subfmeo_64(); | |
| 710 | - RETURN(); | |
| 711 | -} | |
| 712 | -#endif | |
| 713 | - | |
| 714 | -/* subtract from zero extended */ | |
| 715 | -void OPPROTO op_subfze (void) | |
| 716 | -{ | |
| 717 | - T1 = ~T0; | |
| 718 | - T0 = T1 + xer_ca; | |
| 719 | - if ((uint32_t)T0 < (uint32_t)T1) { | |
| 720 | - env->xer |= (1 << XER_CA); | |
| 721 | - } else { | |
| 722 | - env->xer &= ~(1 << XER_CA); | |
| 723 | - } | |
| 724 | - RETURN(); | |
| 725 | -} | |
| 726 | - | |
| 727 | -#if defined(TARGET_PPC64) | |
| 728 | -void OPPROTO op_subfze_64 (void) | |
| 729 | -{ | |
| 730 | - T1 = ~T0; | |
| 731 | - T0 = T1 + xer_ca; | |
| 732 | - if ((uint64_t)T0 < (uint64_t)T1) { | |
| 733 | - env->xer |= (1 << XER_CA); | |
| 734 | - } else { | |
| 735 | - env->xer &= ~(1 << XER_CA); | |
| 736 | - } | |
| 737 | - RETURN(); | |
| 738 | -} | |
| 739 | -#endif | |
| 740 | - | |
| 741 | -void OPPROTO op_subfzeo (void) | |
| 742 | -{ | |
| 743 | - do_subfzeo(); | |
| 744 | - RETURN(); | |
| 745 | -} | |
| 746 | - | |
| 747 | -#if defined(TARGET_PPC64) | |
| 748 | -void OPPROTO op_subfzeo_64 (void) | |
| 749 | -{ | |
| 750 | - do_subfzeo_64(); | |
| 751 | - RETURN(); | |
| 752 | -} | |
| 753 | -#endif | |
| 754 | - | |
| 755 | 357 | /*** Integer shift ***/ |
| 756 | 358 | void OPPROTO op_srli_T1 (void) |
| 757 | 359 | { | ... | ... |
target-ppc/op_helper.c
| ... | ... | @@ -113,258 +113,39 @@ void ppc_store_dump_spr (int sprn, target_ulong val) |
| 113 | 113 | |
| 114 | 114 | /*****************************************************************************/ |
| 115 | 115 | /* Fixed point operations helpers */ |
| 116 | -void do_adde (void) | |
| 117 | -{ | |
| 118 | - T2 = T0; | |
| 119 | - T0 += T1 + xer_ca; | |
| 120 | - if (likely(!((uint32_t)T0 < (uint32_t)T2 || | |
| 121 | - (xer_ca == 1 && (uint32_t)T0 == (uint32_t)T2)))) { | |
| 122 | - env->xer &= ~(1 << XER_CA); | |
| 123 | - } else { | |
| 124 | - env->xer |= (1 << XER_CA); | |
| 125 | - } | |
| 126 | -} | |
| 127 | - | |
| 128 | 116 | #if defined(TARGET_PPC64) |
| 129 | -void do_adde_64 (void) | |
| 130 | -{ | |
| 131 | - T2 = T0; | |
| 132 | - T0 += T1 + xer_ca; | |
| 133 | - if (likely(!((uint64_t)T0 < (uint64_t)T2 || | |
| 134 | - (xer_ca == 1 && (uint64_t)T0 == (uint64_t)T2)))) { | |
| 135 | - env->xer &= ~(1 << XER_CA); | |
| 136 | - } else { | |
| 137 | - env->xer |= (1 << XER_CA); | |
| 138 | - } | |
| 139 | -} | |
| 140 | -#endif | |
| 141 | 117 | |
| 142 | -void do_addmeo (void) | |
| 118 | +/* multiply high word */ | |
| 119 | +uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2) | |
| 143 | 120 | { |
| 144 | - int ov; | |
| 145 | - T1 = T0; | |
| 146 | - T0 += xer_ca + (-1); | |
| 147 | - ov = ((uint32_t)T1 & ((uint32_t)T1 ^ (uint32_t)T0)) >> 31; | |
| 148 | - if (ov) { | |
| 149 | - env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 150 | - } else { | |
| 151 | - env->xer &= ~(1 << XER_OV); | |
| 152 | - } | |
| 153 | - if (likely((uint32_t)T1 != 0)) | |
| 154 | - env->xer |= (1 << XER_CA); | |
| 155 | -} | |
| 121 | + uint64_t tl, th; | |
| 156 | 122 | |
| 157 | -#if defined(TARGET_PPC64) | |
| 158 | -void do_addmeo_64 (void) | |
| 159 | -{ | |
| 160 | - int ov; | |
| 161 | - T1 = T0; | |
| 162 | - T0 += xer_ca + (-1); | |
| 163 | - ov = ((uint64_t)T1 & ((uint64_t)T1 ^ (uint64_t)T0)) >> 63; | |
| 164 | - if (ov) { | |
| 165 | - env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 166 | - } else { | |
| 167 | - env->xer &= ~(1 << XER_OV); | |
| 168 | - } | |
| 169 | - if (likely((uint64_t)T1 != 0)) | |
| 170 | - env->xer |= (1 << XER_CA); | |
| 123 | + muls64(&tl, &th, arg1, arg2); | |
| 124 | + return th; | |
| 171 | 125 | } |
| 172 | -#endif | |
| 173 | 126 | |
| 174 | -void do_divwo (void) | |
| 127 | +/* multiply high word unsigned */ | |
| 128 | +uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2) | |
| 175 | 129 | { |
| 176 | - if (likely(!(((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) || | |
| 177 | - (int32_t)T1 == 0))) { | |
| 178 | - env->xer &= ~(1 << XER_OV); | |
| 179 | - T0 = (int32_t)T0 / (int32_t)T1; | |
| 180 | - } else { | |
| 181 | - env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 182 | - T0 = UINT32_MAX * ((uint32_t)T0 >> 31); | |
| 183 | - } | |
| 184 | -} | |
| 130 | + uint64_t tl, th; | |
| 185 | 131 | |
| 186 | -#if defined(TARGET_PPC64) | |
| 187 | -void do_divdo (void) | |
| 188 | -{ | |
| 189 | - if (likely(!(((int64_t)T0 == INT64_MIN && (int64_t)T1 == (int64_t)-1LL) || | |
| 190 | - (int64_t)T1 == 0))) { | |
| 191 | - env->xer &= ~(1 << XER_OV); | |
| 192 | - T0 = (int64_t)T0 / (int64_t)T1; | |
| 193 | - } else { | |
| 194 | - env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 195 | - T0 = UINT64_MAX * ((uint64_t)T0 >> 63); | |
| 196 | - } | |
| 197 | -} | |
| 198 | -#endif | |
| 199 | - | |
| 200 | -void do_divwuo (void) | |
| 201 | -{ | |
| 202 | - if (likely((uint32_t)T1 != 0)) { | |
| 203 | - env->xer &= ~(1 << XER_OV); | |
| 204 | - T0 = (uint32_t)T0 / (uint32_t)T1; | |
| 205 | - } else { | |
| 206 | - env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 207 | - T0 = 0; | |
| 208 | - } | |
| 132 | + mulu64(&tl, &th, arg1, arg2); | |
| 133 | + return th; | |
| 209 | 134 | } |
| 210 | 135 | |
| 211 | -#if defined(TARGET_PPC64) | |
| 212 | -void do_divduo (void) | |
| 213 | -{ | |
| 214 | - if (likely((uint64_t)T1 != 0)) { | |
| 215 | - env->xer &= ~(1 << XER_OV); | |
| 216 | - T0 = (uint64_t)T0 / (uint64_t)T1; | |
| 217 | - } else { | |
| 218 | - env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 219 | - T0 = 0; | |
| 220 | - } | |
| 221 | -} | |
| 222 | -#endif | |
| 223 | - | |
| 224 | -void do_mullwo (void) | |
| 225 | -{ | |
| 226 | - int64_t res = (int64_t)(int32_t)T0 * (int64_t)(int32_t)T1; | |
| 227 | - | |
| 228 | - if (likely((int32_t)res == res)) { | |
| 229 | - env->xer &= ~(1 << XER_OV); | |
| 230 | - } else { | |
| 231 | - env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 232 | - } | |
| 233 | - T0 = (int32_t)res; | |
| 234 | -} | |
| 235 | - | |
| 236 | -#if defined(TARGET_PPC64) | |
| 237 | -void do_mulldo (void) | |
| 136 | +uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2) | |
| 238 | 137 | { |
| 239 | 138 | int64_t th; |
| 240 | 139 | uint64_t tl; |
| 241 | 140 | |
| 242 | - muls64(&tl, (uint64_t *)&th, T0, T1); | |
| 243 | - T0 = (int64_t)tl; | |
| 141 | + muls64(&tl, (uint64_t *)&th, arg1, arg2); | |
| 244 | 142 | /* If th != 0 && th != -1, then we had an overflow */ |
| 245 | 143 | if (likely((uint64_t)(th + 1) <= 1)) { |
| 246 | 144 | env->xer &= ~(1 << XER_OV); |
| 247 | 145 | } else { |
| 248 | 146 | env->xer |= (1 << XER_OV) | (1 << XER_SO); |
| 249 | 147 | } |
| 250 | -} | |
| 251 | -#endif | |
| 252 | - | |
| 253 | -void do_nego (void) | |
| 254 | -{ | |
| 255 | - if (likely((int32_t)T0 != INT32_MIN)) { | |
| 256 | - env->xer &= ~(1 << XER_OV); | |
| 257 | - T0 = -(int32_t)T0; | |
| 258 | - } else { | |
| 259 | - env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 260 | - } | |
| 261 | -} | |
| 262 | - | |
| 263 | -#if defined(TARGET_PPC64) | |
| 264 | -void do_nego_64 (void) | |
| 265 | -{ | |
| 266 | - if (likely((int64_t)T0 != INT64_MIN)) { | |
| 267 | - env->xer &= ~(1 << XER_OV); | |
| 268 | - T0 = -(int64_t)T0; | |
| 269 | - } else { | |
| 270 | - env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 271 | - } | |
| 272 | -} | |
| 273 | -#endif | |
| 274 | - | |
| 275 | -void do_subfe (void) | |
| 276 | -{ | |
| 277 | - T0 = T1 + ~T0 + xer_ca; | |
| 278 | - if (likely((uint32_t)T0 >= (uint32_t)T1 && | |
| 279 | - (xer_ca == 0 || (uint32_t)T0 != (uint32_t)T1))) { | |
| 280 | - env->xer &= ~(1 << XER_CA); | |
| 281 | - } else { | |
| 282 | - env->xer |= (1 << XER_CA); | |
| 283 | - } | |
| 284 | -} | |
| 285 | - | |
| 286 | -#if defined(TARGET_PPC64) | |
| 287 | -void do_subfe_64 (void) | |
| 288 | -{ | |
| 289 | - T0 = T1 + ~T0 + xer_ca; | |
| 290 | - if (likely((uint64_t)T0 >= (uint64_t)T1 && | |
| 291 | - (xer_ca == 0 || (uint64_t)T0 != (uint64_t)T1))) { | |
| 292 | - env->xer &= ~(1 << XER_CA); | |
| 293 | - } else { | |
| 294 | - env->xer |= (1 << XER_CA); | |
| 295 | - } | |
| 296 | -} | |
| 297 | -#endif | |
| 298 | - | |
| 299 | -void do_subfmeo (void) | |
| 300 | -{ | |
| 301 | - int ov; | |
| 302 | - T1 = T0; | |
| 303 | - T0 = ~T0 + xer_ca - 1; | |
| 304 | - ov = ((uint32_t)~T1 & ((uint32_t)~T1 ^ (uint32_t)T0)) >> 31; | |
| 305 | - if (ov) { | |
| 306 | - env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 307 | - } else { | |
| 308 | - env->xer &= ~(1 << XER_OV); | |
| 309 | - } | |
| 310 | - if (likely((uint32_t)T1 != UINT32_MAX)) | |
| 311 | - env->xer |= (1 << XER_CA); | |
| 312 | -} | |
| 313 | - | |
| 314 | -#if defined(TARGET_PPC64) | |
| 315 | -void do_subfmeo_64 (void) | |
| 316 | -{ | |
| 317 | - int ov; | |
| 318 | - T1 = T0; | |
| 319 | - T0 = ~T0 + xer_ca - 1; | |
| 320 | - ov = ((uint64_t)~T1 & ((uint64_t)~T1 ^ (uint64_t)T0)) >> 63; | |
| 321 | - if (ov) { | |
| 322 | - env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 323 | - } else { | |
| 324 | - env->xer &= ~(1 << XER_OV); | |
| 325 | - } | |
| 326 | - if (likely((uint64_t)T1 != UINT64_MAX)) | |
| 327 | - env->xer |= (1 << XER_CA); | |
| 328 | -} | |
| 329 | -#endif | |
| 330 | - | |
| 331 | -void do_subfzeo (void) | |
| 332 | -{ | |
| 333 | - int ov; | |
| 334 | - T1 = T0; | |
| 335 | - T0 = ~T0 + xer_ca; | |
| 336 | - ov = (((uint32_t)~T1 ^ UINT32_MAX) & | |
| 337 | - ((uint32_t)(~T1) ^ (uint32_t)T0)) >> 31; | |
| 338 | - if (ov) { | |
| 339 | - env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 340 | - } else { | |
| 341 | - env->xer &= ~(1 << XER_OV); | |
| 342 | - } | |
| 343 | - if (likely((uint32_t)T0 >= (uint32_t)~T1)) { | |
| 344 | - env->xer &= ~(1 << XER_CA); | |
| 345 | - } else { | |
| 346 | - env->xer |= (1 << XER_CA); | |
| 347 | - } | |
| 348 | -} | |
| 349 | - | |
| 350 | -#if defined(TARGET_PPC64) | |
| 351 | -void do_subfzeo_64 (void) | |
| 352 | -{ | |
| 353 | - int ov; | |
| 354 | - T1 = T0; | |
| 355 | - T0 = ~T0 + xer_ca; | |
| 356 | - ov = (((uint64_t)~T1 ^ UINT64_MAX) & | |
| 357 | - ((uint64_t)(~T1) ^ (uint64_t)T0)) >> 63; | |
| 358 | - if (ov) { | |
| 359 | - env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 360 | - } else { | |
| 361 | - env->xer &= ~(1 << XER_OV); | |
| 362 | - } | |
| 363 | - if (likely((uint64_t)T0 >= (uint64_t)~T1)) { | |
| 364 | - env->xer &= ~(1 << XER_CA); | |
| 365 | - } else { | |
| 366 | - env->xer |= (1 << XER_CA); | |
| 367 | - } | |
| 148 | + return (int64_t)tl; | |
| 368 | 149 | } |
| 369 | 150 | #endif |
| 370 | 151 | ... | ... |
target-ppc/op_helper.h
| ... | ... | @@ -61,38 +61,6 @@ void do_store_fpscr (uint32_t mask); |
| 61 | 61 | target_ulong ppc_load_dump_spr (int sprn); |
| 62 | 62 | void ppc_store_dump_spr (int sprn, target_ulong val); |
| 63 | 63 | |
| 64 | -/* Integer arithmetic helpers */ | |
| 65 | -void do_adde (void); | |
| 66 | -void do_addmeo (void); | |
| 67 | -void do_divwo (void); | |
| 68 | -void do_divwuo (void); | |
| 69 | -void do_mullwo (void); | |
| 70 | -void do_nego (void); | |
| 71 | -void do_subfe (void); | |
| 72 | -void do_subfmeo (void); | |
| 73 | -void do_subfzeo (void); | |
| 74 | -void do_cntlzw (void); | |
| 75 | -#if defined(TARGET_PPC64) | |
| 76 | -void do_cntlzd (void); | |
| 77 | -#endif | |
| 78 | -void do_sraw (void); | |
| 79 | -#if defined(TARGET_PPC64) | |
| 80 | -void do_adde_64 (void); | |
| 81 | -void do_addmeo_64 (void); | |
| 82 | -void do_divdo (void); | |
| 83 | -void do_divduo (void); | |
| 84 | -void do_mulldo (void); | |
| 85 | -void do_nego_64 (void); | |
| 86 | -void do_subfe_64 (void); | |
| 87 | -void do_subfmeo_64 (void); | |
| 88 | -void do_subfzeo_64 (void); | |
| 89 | -void do_srad (void); | |
| 90 | -#endif | |
| 91 | -void do_popcntb (void); | |
| 92 | -#if defined(TARGET_PPC64) | |
| 93 | -void do_popcntb_64 (void); | |
| 94 | -#endif | |
| 95 | - | |
| 96 | 64 | /* Floating-point arithmetic helpers */ |
| 97 | 65 | void do_compute_fprf (int set_class); |
| 98 | 66 | #ifdef CONFIG_SOFTFLOAT | ... | ... |
target-ppc/translate.c
| ... | ... | @@ -852,431 +852,603 @@ GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL) |
| 852 | 852 | } |
| 853 | 853 | |
| 854 | 854 | /*** Integer arithmetic ***/ |
| 855 | -#define __GEN_INT_ARITH2(name, opc1, opc2, opc3, inval, type) \ | |
| 856 | -GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \ | |
| 857 | -{ \ | |
| 858 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); \ | |
| 859 | - tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]); \ | |
| 860 | - gen_op_##name(); \ | |
| 861 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); \ | |
| 862 | - if (unlikely(Rc(ctx->opcode) != 0)) \ | |
| 863 | - gen_set_Rc0(ctx, cpu_T[0]); \ | |
| 864 | -} | |
| 865 | 855 | |
| 866 | -#define __GEN_INT_ARITH2_O(name, opc1, opc2, opc3, inval, type) \ | |
| 867 | -GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \ | |
| 868 | -{ \ | |
| 869 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); \ | |
| 870 | - tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]); \ | |
| 871 | - gen_op_##name(); \ | |
| 872 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); \ | |
| 873 | - if (unlikely(Rc(ctx->opcode) != 0)) \ | |
| 874 | - gen_set_Rc0(ctx, cpu_T[0]); \ | |
| 875 | -} | |
| 856 | +static always_inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0, TCGv arg1, TCGv arg2, int sub) | |
| 857 | +{ | |
| 858 | + int l1; | |
| 859 | + TCGv t0; | |
| 876 | 860 | |
| 877 | -#define __GEN_INT_ARITH1(name, opc1, opc2, opc3, type) \ | |
| 878 | -GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, type) \ | |
| 879 | -{ \ | |
| 880 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); \ | |
| 881 | - gen_op_##name(); \ | |
| 882 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); \ | |
| 883 | - if (unlikely(Rc(ctx->opcode) != 0)) \ | |
| 884 | - gen_set_Rc0(ctx, cpu_T[0]); \ | |
| 861 | + l1 = gen_new_label(); | |
| 862 | + /* Start with XER OV disabled, the most likely case */ | |
| 863 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV)); | |
| 864 | + t0 = tcg_temp_local_new(TCG_TYPE_TL); | |
| 865 | + tcg_gen_xor_tl(t0, arg0, arg1); | |
| 866 | +#if defined(TARGET_PPC64) | |
| 867 | + if (!ctx->sf_mode) | |
| 868 | + tcg_gen_ext32s_tl(t0, t0); | |
| 869 | +#endif | |
| 870 | + if (sub) | |
| 871 | + tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0, l1); | |
| 872 | + else | |
| 873 | + tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1); | |
| 874 | + tcg_gen_xor_tl(t0, arg1, arg2); | |
| 875 | +#if defined(TARGET_PPC64) | |
| 876 | + if (!ctx->sf_mode) | |
| 877 | + tcg_gen_ext32s_tl(t0, t0); | |
| 878 | +#endif | |
| 879 | + if (sub) | |
| 880 | + tcg_gen_brcondi_tl(TCG_COND_GE, t0, 0, l1); | |
| 881 | + else | |
| 882 | + tcg_gen_brcondi_tl(TCG_COND_LT, t0, 0, l1); | |
| 883 | + tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO)); | |
| 884 | + gen_set_label(l1); | |
| 885 | + tcg_temp_free(t0); | |
| 885 | 886 | } |
| 886 | -#define __GEN_INT_ARITH1_O(name, opc1, opc2, opc3, type) \ | |
| 887 | -GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, type) \ | |
| 888 | -{ \ | |
| 889 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); \ | |
| 890 | - gen_op_##name(); \ | |
| 891 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); \ | |
| 892 | - if (unlikely(Rc(ctx->opcode) != 0)) \ | |
| 893 | - gen_set_Rc0(ctx, cpu_T[0]); \ | |
| 887 | + | |
| 888 | +static always_inline void gen_op_arith_compute_ca(DisasContext *ctx, TCGv arg1, TCGv arg2, int sub) | |
| 889 | +{ | |
| 890 | + int l1 = gen_new_label(); | |
| 891 | + | |
| 892 | +#if defined(TARGET_PPC64) | |
| 893 | + if (!(ctx->sf_mode)) { | |
| 894 | + TCGv t0, t1; | |
| 895 | + t0 = tcg_temp_new(TCG_TYPE_TL); | |
| 896 | + t1 = tcg_temp_new(TCG_TYPE_TL); | |
| 897 | + | |
| 898 | + tcg_gen_ext32u_tl(t0, arg1); | |
| 899 | + tcg_gen_ext32u_tl(t1, arg2); | |
| 900 | + if (sub) { | |
| 901 | + tcg_gen_brcond_tl(TCG_COND_GTU, t0, t1, l1); | |
| 902 | + } else { | |
| 903 | + tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1); | |
| 904 | + } | |
| 905 | + } else | |
| 906 | +#endif | |
| 907 | + if (sub) { | |
| 908 | + tcg_gen_brcond_tl(TCG_COND_GTU, arg1, arg2, l1); | |
| 909 | + } else { | |
| 910 | + tcg_gen_brcond_tl(TCG_COND_GEU, arg1, arg2, l1); | |
| 911 | + } | |
| 912 | + tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA); | |
| 913 | + gen_set_label(l1); | |
| 894 | 914 | } |
| 895 | 915 | |
| 896 | -/* Two operands arithmetic functions */ | |
| 897 | -#define GEN_INT_ARITH2(name, opc1, opc2, opc3, type) \ | |
| 898 | -__GEN_INT_ARITH2(name, opc1, opc2, opc3, 0x00000000, type) \ | |
| 899 | -__GEN_INT_ARITH2_O(name##o, opc1, opc2, opc3 | 0x10, 0x00000000, type) | |
| 916 | +/* Common add function */ | |
| 917 | +static always_inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, | |
| 918 | + int add_ca, int compute_ca, int compute_ov) | |
| 919 | +{ | |
| 920 | + TCGv t0, t1; | |
| 900 | 921 | |
| 901 | -/* Two operands arithmetic functions with no overflow allowed */ | |
| 902 | -#define GEN_INT_ARITHN(name, opc1, opc2, opc3, type) \ | |
| 903 | -__GEN_INT_ARITH2(name, opc1, opc2, opc3, 0x00000400, type) | |
| 922 | + if ((!compute_ca && !compute_ov) || | |
| 923 | + (GET_TCGV(ret) != GET_TCGV(arg1) && GET_TCGV(ret) != GET_TCGV(arg2))) { | |
| 924 | + t0 = ret; | |
| 925 | + } else { | |
| 926 | + t0 = tcg_temp_local_new(TCG_TYPE_TL); | |
| 927 | + } | |
| 904 | 928 | |
| 905 | -/* One operand arithmetic functions */ | |
| 906 | -#define GEN_INT_ARITH1(name, opc1, opc2, opc3, type) \ | |
| 907 | -__GEN_INT_ARITH1(name, opc1, opc2, opc3, type) \ | |
| 908 | -__GEN_INT_ARITH1_O(name##o, opc1, opc2, opc3 | 0x10, type) | |
| 929 | + if (add_ca) { | |
| 930 | + t1 = tcg_temp_local_new(TCG_TYPE_TL); | |
| 931 | + tcg_gen_andi_tl(t1, cpu_xer, (1 << XER_CA)); | |
| 932 | + tcg_gen_shri_tl(t1, t1, XER_CA); | |
| 933 | + } | |
| 909 | 934 | |
| 910 | -#if defined(TARGET_PPC64) | |
| 911 | -#define __GEN_INT_ARITH2_64(name, opc1, opc2, opc3, inval, type) \ | |
| 912 | -GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \ | |
| 913 | -{ \ | |
| 914 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); \ | |
| 915 | - tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]); \ | |
| 916 | - if (ctx->sf_mode) \ | |
| 917 | - gen_op_##name##_64(); \ | |
| 918 | - else \ | |
| 919 | - gen_op_##name(); \ | |
| 920 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); \ | |
| 921 | - if (unlikely(Rc(ctx->opcode) != 0)) \ | |
| 922 | - gen_set_Rc0(ctx, cpu_T[0]); \ | |
| 923 | -} | |
| 935 | + if (compute_ca && compute_ov) { | |
| 936 | + /* Start with XER CA and OV disabled, the most likely case */ | |
| 937 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~((1 << XER_CA) | (1 << XER_OV))); | |
| 938 | + } else if (compute_ca) { | |
| 939 | + /* Start with XER CA disabled, the most likely case */ | |
| 940 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA)); | |
| 941 | + } else if (compute_ov) { | |
| 942 | + /* Start with XER OV disabled, the most likely case */ | |
| 943 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV)); | |
| 944 | + } | |
| 924 | 945 | |
| 925 | -#define __GEN_INT_ARITH2_O_64(name, opc1, opc2, opc3, inval, type) \ | |
| 926 | -GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \ | |
| 927 | -{ \ | |
| 928 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); \ | |
| 929 | - tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]); \ | |
| 930 | - if (ctx->sf_mode) \ | |
| 931 | - gen_op_##name##_64(); \ | |
| 932 | - else \ | |
| 933 | - gen_op_##name(); \ | |
| 934 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); \ | |
| 935 | - if (unlikely(Rc(ctx->opcode) != 0)) \ | |
| 936 | - gen_set_Rc0(ctx, cpu_T[0]); \ | |
| 937 | -} | |
| 946 | + tcg_gen_add_tl(t0, arg1, arg2); | |
| 938 | 947 | |
| 939 | -#define __GEN_INT_ARITH1_64(name, opc1, opc2, opc3, type) \ | |
| 940 | -GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, type) \ | |
| 941 | -{ \ | |
| 942 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); \ | |
| 943 | - if (ctx->sf_mode) \ | |
| 944 | - gen_op_##name##_64(); \ | |
| 945 | - else \ | |
| 946 | - gen_op_##name(); \ | |
| 947 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); \ | |
| 948 | - if (unlikely(Rc(ctx->opcode) != 0)) \ | |
| 949 | - gen_set_Rc0(ctx, cpu_T[0]); \ | |
| 948 | + if (compute_ca) { | |
| 949 | + gen_op_arith_compute_ca(ctx, t0, arg1, 0); | |
| 950 | + } | |
| 951 | + if (add_ca) { | |
| 952 | + tcg_gen_add_tl(t0, t0, t1); | |
| 953 | + gen_op_arith_compute_ca(ctx, t0, t1, 0); | |
| 954 | + tcg_temp_free(t1); | |
| 955 | + } | |
| 956 | + if (compute_ov) { | |
| 957 | + gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0); | |
| 958 | + } | |
| 959 | + | |
| 960 | + if (unlikely(Rc(ctx->opcode) != 0)) | |
| 961 | + gen_set_Rc0(ctx, t0); | |
| 962 | + | |
| 963 | + if (GET_TCGV(t0) != GET_TCGV(ret)) { | |
| 964 | + tcg_gen_mov_tl(ret, t0); | |
| 965 | + tcg_temp_free(t0); | |
| 966 | + } | |
| 950 | 967 | } |
| 951 | -#define __GEN_INT_ARITH1_O_64(name, opc1, opc2, opc3, type) \ | |
| 952 | -GEN_HANDLER(name, opc1, opc2, opc3, 0x0000F800, type) \ | |
| 968 | +/* Add functions with two operands */ | |
| 969 | +#define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \ | |
| 970 | +GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER) \ | |
| 953 | 971 | { \ |
| 954 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); \ | |
| 955 | - if (ctx->sf_mode) \ | |
| 956 | - gen_op_##name##_64(); \ | |
| 957 | - else \ | |
| 958 | - gen_op_##name(); \ | |
| 959 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); \ | |
| 960 | - if (unlikely(Rc(ctx->opcode) != 0)) \ | |
| 961 | - gen_set_Rc0(ctx, cpu_T[0]); \ | |
| 972 | + gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ | |
| 973 | + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ | |
| 974 | + add_ca, compute_ca, compute_ov); \ | |
| 975 | +} | |
| 976 | +/* Add functions with one operand and one immediate */ | |
| 977 | +#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \ | |
| 978 | + add_ca, compute_ca, compute_ov) \ | |
| 979 | +GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER) \ | |
| 980 | +{ \ | |
| 981 | + TCGv t0 = tcg_const_local_tl(const_val); \ | |
| 982 | + gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ | |
| 983 | + cpu_gpr[rA(ctx->opcode)], t0, \ | |
| 984 | + add_ca, compute_ca, compute_ov); \ | |
| 985 | + tcg_temp_free(t0); \ | |
| 962 | 986 | } |
| 963 | 987 | |
| 964 | -/* Two operands arithmetic functions */ | |
| 965 | -#define GEN_INT_ARITH2_64(name, opc1, opc2, opc3, type) \ | |
| 966 | -__GEN_INT_ARITH2_64(name, opc1, opc2, opc3, 0x00000000, type) \ | |
| 967 | -__GEN_INT_ARITH2_O_64(name##o, opc1, opc2, opc3 | 0x10, 0x00000000, type) | |
| 988 | +/* add add. addo addo. */ | |
| 989 | +GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0) | |
| 990 | +GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1) | |
| 991 | +/* addc addc. addco addco. */ | |
| 992 | +GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0) | |
| 993 | +GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1) | |
| 994 | +/* adde adde. addeo addeo. */ | |
| 995 | +GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0) | |
| 996 | +GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1) | |
| 997 | +/* addme addme. addmeo addmeo. */ | |
| 998 | +GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0) | |
| 999 | +GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1) | |
| 1000 | +/* addze addze. addzeo addzeo.*/ | |
| 1001 | +GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0) | |
| 1002 | +GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1) | |
| 1003 | +/* addi */ | |
| 1004 | +GEN_HANDLER(addi, 0x0E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) | |
| 1005 | +{ | |
| 1006 | + target_long simm = SIMM(ctx->opcode); | |
| 968 | 1007 | |
| 969 | -/* Two operands arithmetic functions with no overflow allowed */ | |
| 970 | -#define GEN_INT_ARITHN_64(name, opc1, opc2, opc3, type) \ | |
| 971 | -__GEN_INT_ARITH2_64(name, opc1, opc2, opc3, 0x00000400, type) | |
| 1008 | + if (rA(ctx->opcode) == 0) { | |
| 1009 | + /* li case */ | |
| 1010 | + tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm); | |
| 1011 | + } else { | |
| 1012 | + tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], simm); | |
| 1013 | + } | |
| 1014 | +} | |
| 1015 | +/* addic addic.*/ | |
| 1016 | +static always_inline void gen_op_addic (DisasContext *ctx, TCGv ret, TCGv arg1, | |
| 1017 | + int compute_Rc0) | |
| 1018 | +{ | |
| 1019 | + target_long simm = SIMM(ctx->opcode); | |
| 972 | 1020 | |
| 973 | -/* One operand arithmetic functions */ | |
| 974 | -#define GEN_INT_ARITH1_64(name, opc1, opc2, opc3, type) \ | |
| 975 | -__GEN_INT_ARITH1_64(name, opc1, opc2, opc3, type) \ | |
| 976 | -__GEN_INT_ARITH1_O_64(name##o, opc1, opc2, opc3 | 0x10, type) | |
| 977 | -#else | |
| 978 | -#define GEN_INT_ARITH2_64 GEN_INT_ARITH2 | |
| 979 | -#define GEN_INT_ARITHN_64 GEN_INT_ARITHN | |
| 980 | -#define GEN_INT_ARITH1_64 GEN_INT_ARITH1 | |
| 981 | -#endif | |
| 1021 | + /* Start with XER CA and OV disabled, the most likely case */ | |
| 1022 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA)); | |
| 982 | 1023 | |
| 983 | -/* add add. addo addo. */ | |
| 984 | -static always_inline void gen_op_add (void) | |
| 985 | -{ | |
| 986 | - tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]); | |
| 1024 | + if (likely(simm != 0)) { | |
| 1025 | + TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL); | |
| 1026 | + tcg_gen_addi_tl(t0, arg1, simm); | |
| 1027 | + gen_op_arith_compute_ca(ctx, t0, arg1, 0); | |
| 1028 | + tcg_gen_mov_tl(ret, t0); | |
| 1029 | + tcg_temp_free(t0); | |
| 1030 | + } else { | |
| 1031 | + tcg_gen_mov_tl(ret, arg1); | |
| 1032 | + } | |
| 1033 | + if (compute_Rc0) { | |
| 1034 | + gen_set_Rc0(ctx, ret); | |
| 1035 | + } | |
| 987 | 1036 | } |
| 988 | -static always_inline void gen_op_addo (void) | |
| 1037 | +GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) | |
| 989 | 1038 | { |
| 990 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 991 | - tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]); | |
| 992 | - gen_op_check_addo(); | |
| 1039 | + gen_op_addic(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0); | |
| 993 | 1040 | } |
| 994 | -#if defined(TARGET_PPC64) | |
| 995 | -#define gen_op_add_64 gen_op_add | |
| 996 | -static always_inline void gen_op_addo_64 (void) | |
| 1041 | +GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) | |
| 997 | 1042 | { |
| 998 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 999 | - tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]); | |
| 1000 | - gen_op_check_addo_64(); | |
| 1043 | + gen_op_addic(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1); | |
| 1001 | 1044 | } |
| 1002 | -#endif | |
| 1003 | -GEN_INT_ARITH2_64 (add, 0x1F, 0x0A, 0x08, PPC_INTEGER); | |
| 1004 | -/* addc addc. addco addco. */ | |
| 1005 | -static always_inline void gen_op_addc (void) | |
| 1045 | +/* addis */ | |
| 1046 | +GEN_HANDLER(addis, 0x0F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) | |
| 1006 | 1047 | { |
| 1007 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 1008 | - tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]); | |
| 1009 | - gen_op_check_addc(); | |
| 1048 | + target_long simm = SIMM(ctx->opcode); | |
| 1049 | + | |
| 1050 | + if (rA(ctx->opcode) == 0) { | |
| 1051 | + /* lis case */ | |
| 1052 | + tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16); | |
| 1053 | + } else { | |
| 1054 | + tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], simm << 16); | |
| 1055 | + } | |
| 1010 | 1056 | } |
| 1011 | -static always_inline void gen_op_addco (void) | |
| 1057 | + | |
| 1058 | +static always_inline void gen_op_arith_divw (DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, | |
| 1059 | + int sign, int compute_ov) | |
| 1012 | 1060 | { |
| 1013 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 1014 | - tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]); | |
| 1015 | - gen_op_check_addc(); | |
| 1016 | - gen_op_check_addo(); | |
| 1017 | -} | |
| 1061 | + int l1, l2, l3; | |
| 1062 | + TCGv t0, t1, t2; | |
| 1063 | + | |
| 1018 | 1064 | #if defined(TARGET_PPC64) |
| 1019 | -static always_inline void gen_op_addc_64 (void) | |
| 1020 | -{ | |
| 1021 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 1022 | - tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]); | |
| 1023 | - gen_op_check_addc_64(); | |
| 1024 | -} | |
| 1025 | -static always_inline void gen_op_addco_64 (void) | |
| 1026 | -{ | |
| 1027 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 1028 | - tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]); | |
| 1029 | - gen_op_check_addc_64(); | |
| 1030 | - gen_op_check_addo_64(); | |
| 1031 | -} | |
| 1065 | + t0 = tcg_temp_local_new(TCG_TYPE_I32); | |
| 1066 | + t1 = t0; | |
| 1067 | + t2 = tcg_temp_local_new(TCG_TYPE_I32); | |
| 1068 | + tcg_gen_trunc_i64_i32(t1, arg1); | |
| 1069 | + tcg_gen_trunc_i64_i32(t2, arg2); | |
| 1070 | +#else | |
| 1071 | + t0 = ret; | |
| 1072 | + t1 = arg1; | |
| 1073 | + t2 = arg2; | |
| 1032 | 1074 | #endif |
| 1033 | -GEN_INT_ARITH2_64 (addc, 0x1F, 0x0A, 0x00, PPC_INTEGER); | |
| 1034 | -/* adde adde. addeo addeo. */ | |
| 1035 | -static always_inline void gen_op_addeo (void) | |
| 1036 | -{ | |
| 1037 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 1038 | - gen_op_adde(); | |
| 1039 | - gen_op_check_addo(); | |
| 1040 | -} | |
| 1075 | + l1 = gen_new_label(); | |
| 1076 | + l2 = gen_new_label(); | |
| 1077 | + tcg_gen_brcondi_i32(TCG_COND_EQ, t2, 0, l1); | |
| 1078 | + if (sign) { | |
| 1079 | + l3 = gen_new_label(); | |
| 1080 | + tcg_gen_brcondi_i32(TCG_COND_NE, t2, -1, l3); | |
| 1081 | + tcg_gen_brcondi_i32(TCG_COND_EQ, t1, INT32_MIN, l1); | |
| 1082 | + gen_set_label(l3); | |
| 1083 | + } | |
| 1084 | + if (sign) { | |
| 1085 | + tcg_gen_div_i32(t0, t1, t2); | |
| 1086 | + } else { | |
| 1087 | + tcg_gen_divu_i32(t0, t1, t2); | |
| 1088 | + } | |
| 1089 | + if (compute_ov) { | |
| 1090 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV)); | |
| 1091 | + } | |
| 1092 | + tcg_gen_br(l2); | |
| 1093 | + gen_set_label(l1); | |
| 1094 | + if (sign) { | |
| 1095 | + tcg_gen_sari_i32(t0, t1, 31); | |
| 1096 | + } else { | |
| 1097 | + tcg_gen_movi_i32(t0, 0); | |
| 1098 | + } | |
| 1099 | + if (compute_ov) { | |
| 1100 | + tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO)); | |
| 1101 | + } | |
| 1102 | + gen_set_label(l2); | |
| 1041 | 1103 | #if defined(TARGET_PPC64) |
| 1042 | -static always_inline void gen_op_addeo_64 (void) | |
| 1043 | -{ | |
| 1044 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 1045 | - gen_op_adde_64(); | |
| 1046 | - gen_op_check_addo_64(); | |
| 1047 | -} | |
| 1104 | + tcg_gen_extu_i32_i64(ret, t0); | |
| 1105 | + tcg_temp_free(t0); | |
| 1048 | 1106 | #endif |
| 1049 | -GEN_INT_ARITH2_64 (adde, 0x1F, 0x0A, 0x04, PPC_INTEGER); | |
| 1050 | -/* addme addme. addmeo addmeo. */ | |
| 1051 | -static always_inline void gen_op_addme (void) | |
| 1052 | -{ | |
| 1053 | - tcg_gen_mov_tl(cpu_T[1], cpu_T[0]); | |
| 1054 | - gen_op_add_me(); | |
| 1107 | + if (unlikely(Rc(ctx->opcode) != 0)) | |
| 1108 | + gen_set_Rc0(ctx, ret); | |
| 1055 | 1109 | } |
| 1110 | +/* Div functions */ | |
| 1111 | +#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ | |
| 1112 | +GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER) \ | |
| 1113 | +{ \ | |
| 1114 | + gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \ | |
| 1115 | + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ | |
| 1116 | + sign, compute_ov); \ | |
| 1117 | +} | |
| 1118 | +/* divwu divwu. divwuo divwuo. */ | |
| 1119 | +GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0); | |
| 1120 | +GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1); | |
| 1121 | +/* divw divw. divwo divwo. */ | |
| 1122 | +GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0); | |
| 1123 | +GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1); | |
| 1056 | 1124 | #if defined(TARGET_PPC64) |
| 1057 | -static always_inline void gen_op_addme_64 (void) | |
| 1125 | +static always_inline void gen_op_divd (DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, | |
| 1126 | + int sign, int compute_ov) | |
| 1058 | 1127 | { |
| 1059 | - tcg_gen_mov_tl(cpu_T[1], cpu_T[0]); | |
| 1060 | - gen_op_add_me_64(); | |
| 1128 | + int l1, l2, l3; | |
| 1129 | + | |
| 1130 | + l1 = gen_new_label(); | |
| 1131 | + l2 = gen_new_label(); | |
| 1132 | + | |
| 1133 | + tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1); | |
| 1134 | + if (sign) { | |
| 1135 | + l3 = gen_new_label(); | |
| 1136 | + tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3); | |
| 1137 | + tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1); | |
| 1138 | + gen_set_label(l3); | |
| 1139 | + } | |
| 1140 | + if (sign) { | |
| 1141 | + tcg_gen_div_i64(ret, arg1, arg2); | |
| 1142 | + } else { | |
| 1143 | + tcg_gen_divu_i64(ret, arg1, arg2); | |
| 1144 | + } | |
| 1145 | + if (compute_ov) { | |
| 1146 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV)); | |
| 1147 | + } | |
| 1148 | + tcg_gen_br(l2); | |
| 1149 | + gen_set_label(l1); | |
| 1150 | + if (sign) { | |
| 1151 | + tcg_gen_sari_i64(ret, arg1, 63); | |
| 1152 | + } else { | |
| 1153 | + tcg_gen_movi_i64(ret, 0); | |
| 1154 | + } | |
| 1155 | + if (compute_ov) { | |
| 1156 | + tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO)); | |
| 1157 | + } | |
| 1158 | + gen_set_label(l2); | |
| 1159 | + if (unlikely(Rc(ctx->opcode) != 0)) | |
| 1160 | + gen_set_Rc0(ctx, ret); | |
| 1061 | 1161 | } |
| 1162 | +#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ | |
| 1163 | +GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B) \ | |
| 1164 | +{ \ | |
| 1165 | + gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \ | |
| 1166 | + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ | |
| 1167 | + sign, compute_ov); \ | |
| 1168 | +} | |
| 1169 | +/* divwu divwu. divwuo divwuo. */ | |
| 1170 | +GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0); | |
| 1171 | +GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1); | |
| 1172 | +/* divw divw. divwo divwo. */ | |
| 1173 | +GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0); | |
| 1174 | +GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1); | |
| 1062 | 1175 | #endif |
| 1063 | -GEN_INT_ARITH1_64 (addme, 0x1F, 0x0A, 0x07, PPC_INTEGER); | |
| 1064 | -/* addze addze. addzeo addzeo. */ | |
| 1065 | -static always_inline void gen_op_addze (void) | |
| 1176 | + | |
| 1177 | +/* mulhw mulhw. */ | |
| 1178 | +GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER) | |
| 1066 | 1179 | { |
| 1067 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 1068 | - gen_op_add_ze(); | |
| 1069 | - gen_op_check_addc(); | |
| 1180 | + TCGv t0, t1; | |
| 1181 | + | |
| 1182 | + t0 = tcg_temp_new(TCG_TYPE_I64); | |
| 1183 | + t1 = tcg_temp_new(TCG_TYPE_I64); | |
| 1184 | +#if defined(TARGET_PPC64) | |
| 1185 | + tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]); | |
| 1186 | + tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]); | |
| 1187 | + tcg_gen_mul_i64(t0, t0, t1); | |
| 1188 | + tcg_gen_shri_i64(cpu_gpr[rD(ctx->opcode)], t0, 32); | |
| 1189 | +#else | |
| 1190 | + tcg_gen_ext_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]); | |
| 1191 | + tcg_gen_ext_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]); | |
| 1192 | + tcg_gen_mul_i64(t0, t0, t1); | |
| 1193 | + tcg_gen_shri_i64(t0, t0, 32); | |
| 1194 | + tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0); | |
| 1195 | +#endif | |
| 1196 | + tcg_temp_free(t0); | |
| 1197 | + tcg_temp_free(t1); | |
| 1198 | + if (unlikely(Rc(ctx->opcode) != 0)) | |
| 1199 | + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); | |
| 1070 | 1200 | } |
| 1071 | -static always_inline void gen_op_addzeo (void) | |
| 1201 | +/* mulhwu mulhwu. */ | |
| 1202 | +GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER) | |
| 1072 | 1203 | { |
| 1073 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 1074 | - gen_op_add_ze(); | |
| 1075 | - gen_op_check_addc(); | |
| 1076 | - gen_op_check_addo(); | |
| 1077 | -} | |
| 1204 | + TCGv t0, t1; | |
| 1205 | + | |
| 1206 | + t0 = tcg_temp_new(TCG_TYPE_I64); | |
| 1207 | + t1 = tcg_temp_new(TCG_TYPE_I64); | |
| 1078 | 1208 | #if defined(TARGET_PPC64) |
| 1079 | -static always_inline void gen_op_addze_64 (void) | |
| 1080 | -{ | |
| 1081 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 1082 | - gen_op_add_ze(); | |
| 1083 | - gen_op_check_addc_64(); | |
| 1209 | + tcg_gen_ext32u_i64(t0, cpu_gpr[rA(ctx->opcode)]); | |
| 1210 | + tcg_gen_ext32u_i64(t1, cpu_gpr[rB(ctx->opcode)]); | |
| 1211 | + tcg_gen_mul_i64(t0, t0, t1); | |
| 1212 | + tcg_gen_shri_i64(cpu_gpr[rD(ctx->opcode)], t0, 32); | |
| 1213 | +#else | |
| 1214 | + tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]); | |
| 1215 | + tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]); | |
| 1216 | + tcg_gen_mul_i64(t0, t0, t1); | |
| 1217 | + tcg_gen_shri_i64(t0, t0, 32); | |
| 1218 | + tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0); | |
| 1219 | +#endif | |
| 1220 | + tcg_temp_free(t0); | |
| 1221 | + tcg_temp_free(t1); | |
| 1222 | + if (unlikely(Rc(ctx->opcode) != 0)) | |
| 1223 | + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); | |
| 1084 | 1224 | } |
| 1085 | -static always_inline void gen_op_addzeo_64 (void) | |
| 1225 | +/* mullw mullw. */ | |
| 1226 | +GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER) | |
| 1086 | 1227 | { |
| 1087 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 1088 | - gen_op_add_ze(); | |
| 1089 | - gen_op_check_addc_64(); | |
| 1090 | - gen_op_check_addo_64(); | |
| 1091 | -} | |
| 1228 | +#if defined(TARGET_PPC64) | |
| 1229 | + TCGv t0, t1; | |
| 1230 | + t0 = tcg_temp_new(TCG_TYPE_TL); | |
| 1231 | + t1 = tcg_temp_new(TCG_TYPE_TL); | |
| 1232 | + tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]); | |
| 1233 | + tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]); | |
| 1234 | + tcg_gen_mul_tl(t0, t0, t1); | |
| 1235 | + tcg_temp_free(t0); | |
| 1236 | + tcg_gen_ext32s_tl(cpu_gpr[rD(ctx->opcode)], t0); | |
| 1237 | + tcg_temp_free(t1); | |
| 1238 | +#else | |
| 1239 | + tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], | |
| 1240 | + cpu_gpr[rB(ctx->opcode)]); | |
| 1092 | 1241 | #endif |
| 1093 | -GEN_INT_ARITH1_64 (addze, 0x1F, 0x0A, 0x06, PPC_INTEGER); | |
| 1094 | -/* divw divw. divwo divwo. */ | |
| 1095 | -GEN_INT_ARITH2 (divw, 0x1F, 0x0B, 0x0F, PPC_INTEGER); | |
| 1096 | -/* divwu divwu. divwuo divwuo. */ | |
| 1097 | -GEN_INT_ARITH2 (divwu, 0x1F, 0x0B, 0x0E, PPC_INTEGER); | |
| 1098 | -/* mulhw mulhw. */ | |
| 1099 | -GEN_INT_ARITHN (mulhw, 0x1F, 0x0B, 0x02, PPC_INTEGER); | |
| 1100 | -/* mulhwu mulhwu. */ | |
| 1101 | -GEN_INT_ARITHN (mulhwu, 0x1F, 0x0B, 0x00, PPC_INTEGER); | |
| 1102 | -/* mullw mullw. mullwo mullwo. */ | |
| 1103 | -GEN_INT_ARITH2 (mullw, 0x1F, 0x0B, 0x07, PPC_INTEGER); | |
| 1104 | -/* neg neg. nego nego. */ | |
| 1105 | -GEN_INT_ARITH1_64 (neg, 0x1F, 0x08, 0x03, PPC_INTEGER); | |
| 1106 | -/* subf subf. subfo subfo. */ | |
| 1107 | -static always_inline void gen_op_subf (void) | |
| 1108 | -{ | |
| 1109 | - tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]); | |
| 1242 | + if (unlikely(Rc(ctx->opcode) != 0)) | |
| 1243 | + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); | |
| 1110 | 1244 | } |
| 1111 | -static always_inline void gen_op_subfo (void) | |
| 1245 | +/* mullwo mullwo. */ | |
| 1246 | +GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER) | |
| 1112 | 1247 | { |
| 1113 | - tcg_gen_not_tl(cpu_T[2], cpu_T[0]); | |
| 1114 | - tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]); | |
| 1115 | - gen_op_check_addo(); | |
| 1116 | -} | |
| 1248 | + int l1; | |
| 1249 | + TCGv t0, t1; | |
| 1250 | + | |
| 1251 | + t0 = tcg_temp_local_new(TCG_TYPE_I64); | |
| 1252 | + t1 = tcg_temp_local_new(TCG_TYPE_I64); | |
| 1253 | + l1 = gen_new_label(); | |
| 1254 | + /* Start with XER OV disabled, the most likely case */ | |
| 1255 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV)); | |
| 1117 | 1256 | #if defined(TARGET_PPC64) |
| 1118 | -#define gen_op_subf_64 gen_op_subf | |
| 1119 | -static always_inline void gen_op_subfo_64 (void) | |
| 1120 | -{ | |
| 1121 | - tcg_gen_not_i64(cpu_T[2], cpu_T[0]); | |
| 1122 | - tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]); | |
| 1123 | - gen_op_check_addo_64(); | |
| 1124 | -} | |
| 1257 | + tcg_gen_ext32s_i64(t0, cpu_gpr[rA(ctx->opcode)]); | |
| 1258 | + tcg_gen_ext32s_i64(t1, cpu_gpr[rB(ctx->opcode)]); | |
| 1259 | +#else | |
| 1260 | + tcg_gen_ext_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]); | |
| 1261 | + tcg_gen_ext_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]); | |
| 1125 | 1262 | #endif |
| 1126 | -GEN_INT_ARITH2_64 (subf, 0x1F, 0x08, 0x01, PPC_INTEGER); | |
| 1127 | -/* subfc subfc. subfco subfco. */ | |
| 1128 | -static always_inline void gen_op_subfc (void) | |
| 1129 | -{ | |
| 1130 | - tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]); | |
| 1131 | - gen_op_check_subfc(); | |
| 1263 | + tcg_gen_mul_i64(t0, t0, t1); | |
| 1264 | +#if defined(TARGET_PPC64) | |
| 1265 | + tcg_gen_ext32s_i64(cpu_gpr[rD(ctx->opcode)], t0); | |
| 1266 | + tcg_gen_brcond_i64(TCG_COND_EQ, t0, cpu_gpr[rD(ctx->opcode)], l1); | |
| 1267 | +#else | |
| 1268 | + tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t0); | |
| 1269 | + tcg_gen_ext32s_i64(t1, t0); | |
| 1270 | + tcg_gen_brcond_i64(TCG_COND_EQ, t0, t1, l1); | |
| 1271 | +#endif | |
| 1272 | + tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO)); | |
| 1273 | + gen_set_label(l1); | |
| 1274 | + if (unlikely(Rc(ctx->opcode) != 0)) | |
| 1275 | + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); | |
| 1132 | 1276 | } |
| 1133 | -static always_inline void gen_op_subfco (void) | |
| 1277 | +/* mulli */ | |
| 1278 | +GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) | |
| 1134 | 1279 | { |
| 1135 | - tcg_gen_not_tl(cpu_T[2], cpu_T[0]); | |
| 1136 | - tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]); | |
| 1137 | - gen_op_check_subfc(); | |
| 1138 | - gen_op_check_addo(); | |
| 1280 | + tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], | |
| 1281 | + SIMM(ctx->opcode)); | |
| 1139 | 1282 | } |
| 1140 | 1283 | #if defined(TARGET_PPC64) |
| 1141 | -static always_inline void gen_op_subfc_64 (void) | |
| 1142 | -{ | |
| 1143 | - tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]); | |
| 1144 | - gen_op_check_subfc_64(); | |
| 1284 | +#define GEN_INT_ARITH_MUL_HELPER(name, opc3) \ | |
| 1285 | +GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B) \ | |
| 1286 | +{ \ | |
| 1287 | + tcg_gen_helper_1_2(helper_##name, cpu_gpr[rD(ctx->opcode)], \ | |
| 1288 | + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \ | |
| 1289 | + if (unlikely(Rc(ctx->opcode) != 0)) \ | |
| 1290 | + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \ | |
| 1145 | 1291 | } |
| 1146 | -static always_inline void gen_op_subfco_64 (void) | |
| 1292 | +/* mulhd mulhd. */ | |
| 1293 | +GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00); | |
| 1294 | +/* mulhdu mulhdu. */ | |
| 1295 | +GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02); | |
| 1296 | +/* mulld mulld. */ | |
| 1297 | +GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B) | |
| 1147 | 1298 | { |
| 1148 | - tcg_gen_not_i64(cpu_T[2], cpu_T[0]); | |
| 1149 | - tcg_gen_sub_tl(cpu_T[0], cpu_T[1], cpu_T[0]); | |
| 1150 | - gen_op_check_subfc_64(); | |
| 1151 | - gen_op_check_addo_64(); | |
| 1299 | + tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], | |
| 1300 | + cpu_gpr[rB(ctx->opcode)]); | |
| 1301 | + if (unlikely(Rc(ctx->opcode) != 0)) | |
| 1302 | + gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); | |
| 1152 | 1303 | } |
| 1304 | +/* mulldo mulldo. */ | |
| 1305 | +GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17); | |
| 1153 | 1306 | #endif |
| 1154 | -GEN_INT_ARITH2_64 (subfc, 0x1F, 0x08, 0x00, PPC_INTEGER); | |
| 1155 | -/* subfe subfe. subfeo subfeo. */ | |
| 1156 | -static always_inline void gen_op_subfeo (void) | |
| 1307 | + | |
| 1308 | +/* neg neg. nego nego. */ | |
| 1309 | +static always_inline void gen_op_neg (DisasContext *ctx, TCGv ret, TCGv arg1, int ov_check) | |
| 1157 | 1310 | { |
| 1158 | - tcg_gen_not_tl(cpu_T[2], cpu_T[0]); | |
| 1159 | - gen_op_subfe(); | |
| 1160 | - gen_op_check_addo(); | |
| 1161 | -} | |
| 1311 | + int l1, l2; | |
| 1312 | + | |
| 1313 | + l1 = gen_new_label(); | |
| 1314 | + l2 = gen_new_label(); | |
| 1162 | 1315 | #if defined(TARGET_PPC64) |
| 1163 | -#define gen_op_subfe_64 gen_op_subfe | |
| 1164 | -static always_inline void gen_op_subfeo_64 (void) | |
| 1316 | + if (ctx->sf_mode) { | |
| 1317 | + tcg_gen_brcondi_tl(TCG_COND_EQ, arg1, INT64_MIN, l1); | |
| 1318 | + } else { | |
| 1319 | + TCGv t0 = tcg_temp_new(TCG_TYPE_TL); | |
| 1320 | + tcg_gen_ext32s_tl(t0, arg1); | |
| 1321 | + tcg_gen_brcondi_tl(TCG_COND_EQ, t0, INT32_MIN, l1); | |
| 1322 | + } | |
| 1323 | +#else | |
| 1324 | + tcg_gen_brcondi_tl(TCG_COND_EQ, arg1, INT32_MIN, l1); | |
| 1325 | +#endif | |
| 1326 | + tcg_gen_neg_tl(ret, arg1); | |
| 1327 | + if (ov_check) { | |
| 1328 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV)); | |
| 1329 | + } | |
| 1330 | + tcg_gen_br(l2); | |
| 1331 | + gen_set_label(l1); | |
| 1332 | + tcg_gen_mov_tl(ret, arg1); | |
| 1333 | + if (ov_check) { | |
| 1334 | + tcg_gen_ori_tl(cpu_xer, cpu_xer, (1 << XER_OV) | (1 << XER_SO)); | |
| 1335 | + } | |
| 1336 | + gen_set_label(l2); | |
| 1337 | + if (unlikely(Rc(ctx->opcode) != 0)) | |
| 1338 | + gen_set_Rc0(ctx, ret); | |
| 1339 | +} | |
| 1340 | +GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER) | |
| 1165 | 1341 | { |
| 1166 | - tcg_gen_not_i64(cpu_T[2], cpu_T[0]); | |
| 1167 | - gen_op_subfe_64(); | |
| 1168 | - gen_op_check_addo_64(); | |
| 1342 | + gen_op_neg(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0); | |
| 1169 | 1343 | } |
| 1170 | -#endif | |
| 1171 | -GEN_INT_ARITH2_64 (subfe, 0x1F, 0x08, 0x04, PPC_INTEGER); | |
| 1172 | -/* subfme subfme. subfmeo subfmeo. */ | |
| 1173 | -GEN_INT_ARITH1_64 (subfme, 0x1F, 0x08, 0x07, PPC_INTEGER); | |
| 1174 | -/* subfze subfze. subfzeo subfzeo. */ | |
| 1175 | -GEN_INT_ARITH1_64 (subfze, 0x1F, 0x08, 0x06, PPC_INTEGER); | |
| 1176 | -/* addi */ | |
| 1177 | -GEN_HANDLER(addi, 0x0E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) | |
| 1344 | +GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER) | |
| 1178 | 1345 | { |
| 1179 | - target_long simm = SIMM(ctx->opcode); | |
| 1180 | - | |
| 1181 | - if (rA(ctx->opcode) == 0) { | |
| 1182 | - /* li case */ | |
| 1183 | - tcg_gen_movi_tl(cpu_T[0], simm); | |
| 1184 | - } else { | |
| 1185 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); | |
| 1186 | - if (likely(simm != 0)) | |
| 1187 | - tcg_gen_addi_tl(cpu_T[0], cpu_T[0], simm); | |
| 1188 | - } | |
| 1189 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); | |
| 1346 | + gen_op_neg(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1); | |
| 1190 | 1347 | } |
| 1191 | -/* addic */ | |
| 1192 | -GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) | |
| 1348 | + | |
| 1349 | +/* Common subf function */ | |
| 1350 | +static always_inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, | |
| 1351 | + int add_ca, int compute_ca, int compute_ov) | |
| 1193 | 1352 | { |
| 1194 | - target_long simm = SIMM(ctx->opcode); | |
| 1353 | + TCGv t0, t1; | |
| 1195 | 1354 | |
| 1196 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); | |
| 1197 | - if (likely(simm != 0)) { | |
| 1198 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 1199 | - tcg_gen_addi_tl(cpu_T[0], cpu_T[0], simm); | |
| 1200 | -#if defined(TARGET_PPC64) | |
| 1201 | - if (ctx->sf_mode) | |
| 1202 | - gen_op_check_addc_64(); | |
| 1203 | - else | |
| 1204 | -#endif | |
| 1205 | - gen_op_check_addc(); | |
| 1355 | + if ((!compute_ca && !compute_ov) || | |
| 1356 | + (GET_TCGV(ret) != GET_TCGV(arg1) && GET_TCGV(ret) != GET_TCGV(arg2))) { | |
| 1357 | + t0 = ret; | |
| 1206 | 1358 | } else { |
| 1207 | - tcg_gen_andi_i32(cpu_xer, cpu_xer, ~(1 << XER_CA)); | |
| 1359 | + t0 = tcg_temp_local_new(TCG_TYPE_TL); | |
| 1208 | 1360 | } |
| 1209 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); | |
| 1210 | -} | |
| 1211 | -/* addic. */ | |
| 1212 | -GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) | |
| 1213 | -{ | |
| 1214 | - target_long simm = SIMM(ctx->opcode); | |
| 1215 | 1361 | |
| 1216 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); | |
| 1217 | - if (likely(simm != 0)) { | |
| 1218 | - tcg_gen_mov_tl(cpu_T[2], cpu_T[0]); | |
| 1219 | - tcg_gen_addi_tl(cpu_T[0], cpu_T[0], simm); | |
| 1220 | -#if defined(TARGET_PPC64) | |
| 1221 | - if (ctx->sf_mode) | |
| 1222 | - gen_op_check_addc_64(); | |
| 1223 | - else | |
| 1224 | -#endif | |
| 1225 | - gen_op_check_addc(); | |
| 1226 | - } else { | |
| 1227 | - tcg_gen_andi_i32(cpu_xer, cpu_xer, ~(1 << XER_CA)); | |
| 1362 | + if (add_ca) { | |
| 1363 | + t1 = tcg_temp_local_new(TCG_TYPE_TL); | |
| 1364 | + tcg_gen_andi_tl(t1, cpu_xer, (1 << XER_CA)); | |
| 1365 | + tcg_gen_shri_tl(t1, t1, XER_CA); | |
| 1228 | 1366 | } |
| 1229 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); | |
| 1230 | - gen_set_Rc0(ctx, cpu_T[0]); | |
| 1231 | -} | |
| 1232 | -/* addis */ | |
| 1233 | -GEN_HANDLER(addis, 0x0F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) | |
| 1234 | -{ | |
| 1235 | - target_long simm = SIMM(ctx->opcode); | |
| 1236 | 1367 | |
| 1237 | - if (rA(ctx->opcode) == 0) { | |
| 1238 | - /* lis case */ | |
| 1239 | - tcg_gen_movi_tl(cpu_T[0], simm << 16); | |
| 1368 | + if (compute_ca && compute_ov) { | |
| 1369 | + /* Start with XER CA and OV disabled, the most likely case */ | |
| 1370 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~((1 << XER_CA) | (1 << XER_OV))); | |
| 1371 | + } else if (compute_ca) { | |
| 1372 | + /* Start with XER CA disabled, the most likely case */ | |
| 1373 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA)); | |
| 1374 | + } else if (compute_ov) { | |
| 1375 | + /* Start with XER OV disabled, the most likely case */ | |
| 1376 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_OV)); | |
| 1377 | + } | |
| 1378 | + | |
| 1379 | + if (add_ca) { | |
| 1380 | + tcg_gen_not_tl(t0, arg1); | |
| 1381 | + tcg_gen_add_tl(t0, t0, arg2); | |
| 1382 | + gen_op_arith_compute_ca(ctx, t0, arg2, 0); | |
| 1383 | + tcg_gen_add_tl(t0, t0, t1); | |
| 1384 | + gen_op_arith_compute_ca(ctx, t0, t1, 0); | |
| 1385 | + tcg_temp_free(t1); | |
| 1240 | 1386 | } else { |
| 1241 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); | |
| 1242 | - if (likely(simm != 0)) | |
| 1243 | - tcg_gen_addi_tl(cpu_T[0], cpu_T[0], simm << 16); | |
| 1387 | + tcg_gen_sub_tl(t0, arg2, arg1); | |
| 1388 | + if (compute_ca) { | |
| 1389 | + gen_op_arith_compute_ca(ctx, t0, arg2, 1); | |
| 1390 | + } | |
| 1391 | + } | |
| 1392 | + if (compute_ov) { | |
| 1393 | + gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1); | |
| 1394 | + } | |
| 1395 | + | |
| 1396 | + if (unlikely(Rc(ctx->opcode) != 0)) | |
| 1397 | + gen_set_Rc0(ctx, t0); | |
| 1398 | + | |
| 1399 | + if (GET_TCGV(t0) != GET_TCGV(ret)) { | |
| 1400 | + tcg_gen_mov_tl(ret, t0); | |
| 1401 | + tcg_temp_free(t0); | |
| 1244 | 1402 | } |
| 1245 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); | |
| 1246 | 1403 | } |
| 1247 | -/* mulli */ | |
| 1248 | -GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) | |
| 1249 | -{ | |
| 1250 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); | |
| 1251 | - gen_op_mulli(SIMM(ctx->opcode)); | |
| 1252 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); | |
| 1404 | +/* Sub functions with Two operands functions */ | |
| 1405 | +#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \ | |
| 1406 | +GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER) \ | |
| 1407 | +{ \ | |
| 1408 | + gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ | |
| 1409 | + cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ | |
| 1410 | + add_ca, compute_ca, compute_ov); \ | |
| 1411 | +} | |
| 1412 | +/* Sub functions with one operand and one immediate */ | |
| 1413 | +#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \ | |
| 1414 | + add_ca, compute_ca, compute_ov) \ | |
| 1415 | +GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER) \ | |
| 1416 | +{ \ | |
| 1417 | + TCGv t0 = tcg_const_local_tl(const_val); \ | |
| 1418 | + gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ | |
| 1419 | + cpu_gpr[rA(ctx->opcode)], t0, \ | |
| 1420 | + add_ca, compute_ca, compute_ov); \ | |
| 1421 | + tcg_temp_free(t0); \ | |
| 1253 | 1422 | } |
| 1423 | +/* subf subf. subfo subfo. */ | |
| 1424 | +GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0) | |
| 1425 | +GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1) | |
| 1426 | +/* subfc subfc. subfco subfco. */ | |
| 1427 | +GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0) | |
| 1428 | +GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1) | |
| 1429 | +/* subfe subfe. subfeo subfo. */ | |
| 1430 | +GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0) | |
| 1431 | +GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1) | |
| 1432 | +/* subfme subfme. subfmeo subfmeo. */ | |
| 1433 | +GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0) | |
| 1434 | +GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1) | |
| 1435 | +/* subfze subfze. subfzeo subfzeo.*/ | |
| 1436 | +GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0) | |
| 1437 | +GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1) | |
| 1254 | 1438 | /* subfic */ |
| 1255 | 1439 | GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) |
| 1256 | 1440 | { |
| 1257 | - tcg_gen_mov_tl(cpu_T[0], cpu_gpr[rA(ctx->opcode)]); | |
| 1258 | -#if defined(TARGET_PPC64) | |
| 1259 | - if (ctx->sf_mode) | |
| 1260 | - gen_op_subfic_64(SIMM(ctx->opcode)); | |
| 1261 | - else | |
| 1262 | -#endif | |
| 1263 | - gen_op_subfic(SIMM(ctx->opcode)); | |
| 1264 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); | |
| 1441 | + /* Start with XER CA and OV disabled, the most likely case */ | |
| 1442 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA)); | |
| 1443 | + TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL); | |
| 1444 | + TCGv t1 = tcg_const_local_tl(SIMM(ctx->opcode)); | |
| 1445 | + tcg_gen_sub_tl(t0, t1, cpu_gpr[rA(ctx->opcode)]); | |
| 1446 | + gen_op_arith_compute_ca(ctx, t0, t1, 1); | |
| 1447 | + tcg_temp_free(t1); | |
| 1448 | + tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0); | |
| 1449 | + tcg_temp_free(t0); | |
| 1265 | 1450 | } |
| 1266 | 1451 | |
| 1267 | -#if defined(TARGET_PPC64) | |
| 1268 | -/* mulhd mulhd. */ | |
| 1269 | -GEN_INT_ARITHN (mulhd, 0x1F, 0x09, 0x02, PPC_64B); | |
| 1270 | -/* mulhdu mulhdu. */ | |
| 1271 | -GEN_INT_ARITHN (mulhdu, 0x1F, 0x09, 0x00, PPC_64B); | |
| 1272 | -/* mulld mulld. mulldo mulldo. */ | |
| 1273 | -GEN_INT_ARITH2 (mulld, 0x1F, 0x09, 0x07, PPC_64B); | |
| 1274 | -/* divd divd. divdo divdo. */ | |
| 1275 | -GEN_INT_ARITH2 (divd, 0x1F, 0x09, 0x0F, PPC_64B); | |
| 1276 | -/* divdu divdu. divduo divduo. */ | |
| 1277 | -GEN_INT_ARITH2 (divdu, 0x1F, 0x09, 0x0E, PPC_64B); | |
| 1278 | -#endif | |
| 1279 | - | |
| 1280 | 1452 | /*** Integer logical ***/ |
| 1281 | 1453 | #define GEN_LOGICAL2(name, tcg_op, opc, type) \ |
| 1282 | 1454 | GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type) \ |
| ... | ... | @@ -5090,7 +5262,7 @@ static always_inline void gen_405_mulladd_insn (DisasContext *ctx, |
| 5090 | 5262 | } |
| 5091 | 5263 | if (opc2 & 0x02) { |
| 5092 | 5264 | /* nmultiply-and-accumulate (0x0E) */ |
| 5093 | - gen_op_neg(); | |
| 5265 | + tcg_gen_neg_tl(cpu_T[0], cpu_T[0]); | |
| 5094 | 5266 | } |
| 5095 | 5267 | if (opc2 & 0x04) { |
| 5096 | 5268 | /* (n)multiply-and-accumulate (0x0C - 0x0E) */ | ... | ... |