Commit 3d7b417e13152587df587fe58789740c3ef7abb9
1 parent
d75a0b97
target-ppc: Convert XER accesses to TCG
Define XER bits as a single register and access them individually to avoid defining 5 32-bit registers (TCG doesn't permit to map 8-bit registers). Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5500 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
9 changed files
with
174 additions
and
216 deletions
gdbstub.c
| ... | ... | @@ -445,7 +445,7 @@ static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n) |
| 445 | 445 | } |
| 446 | 446 | case 67: GET_REGL(env->lr); |
| 447 | 447 | case 68: GET_REGL(env->ctr); |
| 448 | - case 69: GET_REG32(ppc_load_xer(env)); | |
| 448 | + case 69: GET_REGL(env->xer); | |
| 449 | 449 | case 70: GET_REG32(0); /* fpscr */ |
| 450 | 450 | } |
| 451 | 451 | } |
| ... | ... | @@ -485,8 +485,8 @@ static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n) |
| 485 | 485 | env->ctr = ldtul_p(mem_buf); |
| 486 | 486 | return sizeof(target_ulong); |
| 487 | 487 | case 69: |
| 488 | - ppc_store_xer(env, ldl_p(mem_buf)); | |
| 489 | - return 4; | |
| 488 | + env->xer = ldtul_p(mem_buf); | |
| 489 | + return sizeof(target_ulong); | |
| 490 | 490 | case 70: |
| 491 | 491 | /* fpscr */ |
| 492 | 492 | return 4; | ... | ... |
monitor.c
| ... | ... | @@ -1582,7 +1582,7 @@ static target_long monitor_get_xer (const struct MonitorDef *md, int val) |
| 1582 | 1582 | CPUState *env = mon_get_cpu(); |
| 1583 | 1583 | if (!env) |
| 1584 | 1584 | return 0; |
| 1585 | - return ppc_load_xer(env); | |
| 1585 | + return env->xer; | |
| 1586 | 1586 | } |
| 1587 | 1587 | |
| 1588 | 1588 | static target_long monitor_get_decr (const struct MonitorDef *md, int val) | ... | ... |
target-ppc/cpu.h
| ... | ... | @@ -553,8 +553,7 @@ struct CPUPPCState { |
| 553 | 553 | /* condition register */ |
| 554 | 554 | uint32_t crf[8]; |
| 555 | 555 | /* XER */ |
| 556 | - /* XXX: We use only 5 fields, but we want to keep the structure aligned */ | |
| 557 | - uint8_t xer[8]; | |
| 556 | + target_ulong xer; | |
| 558 | 557 | /* Reservation address */ |
| 559 | 558 | target_ulong reserve; |
| 560 | 559 | |
| ... | ... | @@ -831,16 +830,16 @@ static inline void cpu_clone_regs(CPUState *env, target_ulong newsp) |
| 831 | 830 | |
| 832 | 831 | /*****************************************************************************/ |
| 833 | 832 | /* Registers definitions */ |
| 834 | -#define XER_SO 31 | |
| 835 | -#define XER_OV 30 | |
| 836 | -#define XER_CA 29 | |
| 837 | -#define XER_CMP 8 | |
| 838 | -#define XER_BC 0 | |
| 839 | -#define xer_so env->xer[4] | |
| 840 | -#define xer_ov env->xer[6] | |
| 841 | -#define xer_ca env->xer[2] | |
| 842 | -#define xer_cmp env->xer[1] | |
| 843 | -#define xer_bc env->xer[0] | |
| 833 | +#define XER_SO 31 | |
| 834 | +#define XER_OV 30 | |
| 835 | +#define XER_CA 29 | |
| 836 | +#define XER_CMP 8 | |
| 837 | +#define XER_BC 0 | |
| 838 | +#define xer_so ((env->xer >> XER_SO) & 1) | |
| 839 | +#define xer_ov ((env->xer >> XER_OV) & 1) | |
| 840 | +#define xer_ca ((env->xer >> XER_CA) & 1) | |
| 841 | +#define xer_cmp ((env->xer >> XER_CMP) & 0xFF) | |
| 842 | +#define xer_bc ((env->xer >> XER_BC) & 0x7F) | |
| 844 | 843 | |
| 845 | 844 | /* SPR definitions */ |
| 846 | 845 | #define SPR_MQ (0x000) | ... | ... |
target-ppc/helper.c
| ... | ... | @@ -2124,16 +2124,6 @@ void do_store_sr (CPUPPCState *env, int srnum, target_ulong value) |
| 2124 | 2124 | } |
| 2125 | 2125 | #endif /* !defined (CONFIG_USER_ONLY) */ |
| 2126 | 2126 | |
| 2127 | -target_ulong ppc_load_xer (CPUPPCState *env) | |
| 2128 | -{ | |
| 2129 | - return hreg_load_xer(env); | |
| 2130 | -} | |
| 2131 | - | |
| 2132 | -void ppc_store_xer (CPUPPCState *env, target_ulong value) | |
| 2133 | -{ | |
| 2134 | - hreg_store_xer(env, value); | |
| 2135 | -} | |
| 2136 | - | |
| 2137 | 2127 | /* GDBstub can read and write MSR... */ |
| 2138 | 2128 | void ppc_store_msr (CPUPPCState *env, target_ulong value) |
| 2139 | 2129 | { | ... | ... |
target-ppc/helper_regs.h
| ... | ... | @@ -21,24 +21,6 @@ |
| 21 | 21 | #if !defined(__HELPER_REGS_H__) |
| 22 | 22 | #define __HELPER_REGS_H__ |
| 23 | 23 | |
| 24 | -static always_inline target_ulong hreg_load_xer (CPUPPCState *env) | |
| 25 | -{ | |
| 26 | - return (xer_so << XER_SO) | | |
| 27 | - (xer_ov << XER_OV) | | |
| 28 | - (xer_ca << XER_CA) | | |
| 29 | - (xer_bc << XER_BC) | | |
| 30 | - (xer_cmp << XER_CMP); | |
| 31 | -} | |
| 32 | - | |
| 33 | -static always_inline void hreg_store_xer (CPUPPCState *env, target_ulong value) | |
| 34 | -{ | |
| 35 | - xer_so = (value >> XER_SO) & 0x01; | |
| 36 | - xer_ov = (value >> XER_OV) & 0x01; | |
| 37 | - xer_ca = (value >> XER_CA) & 0x01; | |
| 38 | - xer_cmp = (value >> XER_CMP) & 0xFF; | |
| 39 | - xer_bc = (value >> XER_BC) & 0x7F; | |
| 40 | -} | |
| 41 | - | |
| 42 | 24 | /* Swap temporary saved registers with GPRs */ |
| 43 | 25 | static always_inline void hreg_swap_gpr_tgpr (CPUPPCState *env) |
| 44 | 26 | { | ... | ... |
target-ppc/op.c
| ... | ... | @@ -58,49 +58,6 @@ void OPPROTO op_store_cr (void) |
| 58 | 58 | RETURN(); |
| 59 | 59 | } |
| 60 | 60 | |
| 61 | -void OPPROTO op_load_xer_cr (void) | |
| 62 | -{ | |
| 63 | - T0 = (xer_so << 3) | (xer_ov << 2) | (xer_ca << 1); | |
| 64 | - RETURN(); | |
| 65 | -} | |
| 66 | - | |
| 67 | -void OPPROTO op_clear_xer_ov (void) | |
| 68 | -{ | |
| 69 | - xer_so = 0; | |
| 70 | - xer_ov = 0; | |
| 71 | - RETURN(); | |
| 72 | -} | |
| 73 | - | |
| 74 | -void OPPROTO op_clear_xer_ca (void) | |
| 75 | -{ | |
| 76 | - xer_ca = 0; | |
| 77 | - RETURN(); | |
| 78 | -} | |
| 79 | - | |
| 80 | -void OPPROTO op_load_xer_bc (void) | |
| 81 | -{ | |
| 82 | - T1 = xer_bc; | |
| 83 | - RETURN(); | |
| 84 | -} | |
| 85 | - | |
| 86 | -void OPPROTO op_store_xer_bc (void) | |
| 87 | -{ | |
| 88 | - xer_bc = T0; | |
| 89 | - RETURN(); | |
| 90 | -} | |
| 91 | - | |
| 92 | -void OPPROTO op_load_xer (void) | |
| 93 | -{ | |
| 94 | - T0 = hreg_load_xer(env); | |
| 95 | - RETURN(); | |
| 96 | -} | |
| 97 | - | |
| 98 | -void OPPROTO op_store_xer (void) | |
| 99 | -{ | |
| 100 | - hreg_store_xer(env, T0); | |
| 101 | - RETURN(); | |
| 102 | -} | |
| 103 | - | |
| 104 | 61 | #if defined(TARGET_PPC64) |
| 105 | 62 | void OPPROTO op_store_pri (void) |
| 106 | 63 | { |
| ... | ... | @@ -574,18 +531,26 @@ void OPPROTO op_dec_ctr (void) |
| 574 | 531 | /* add */ |
| 575 | 532 | void OPPROTO op_check_addo (void) |
| 576 | 533 | { |
| 577 | - xer_ov = (((uint32_t)T2 ^ (uint32_t)T1 ^ UINT32_MAX) & | |
| 534 | + int ov = (((uint32_t)T2 ^ (uint32_t)T1 ^ UINT32_MAX) & | |
| 578 | 535 | ((uint32_t)T2 ^ (uint32_t)T0)) >> 31; |
| 579 | - xer_so |= xer_ov; | |
| 536 | + if (ov) { | |
| 537 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 538 | + } else { | |
| 539 | + env->xer &= ~(1 << XER_OV); | |
| 540 | + } | |
| 580 | 541 | RETURN(); |
| 581 | 542 | } |
| 582 | 543 | |
| 583 | 544 | #if defined(TARGET_PPC64) |
| 584 | 545 | void OPPROTO op_check_addo_64 (void) |
| 585 | 546 | { |
| 586 | - xer_ov = (((uint64_t)T2 ^ (uint64_t)T1 ^ UINT64_MAX) & | |
| 547 | + int ov = (((uint64_t)T2 ^ (uint64_t)T1 ^ UINT64_MAX) & | |
| 587 | 548 | ((uint64_t)T2 ^ (uint64_t)T0)) >> 63; |
| 588 | - xer_so |= xer_ov; | |
| 549 | + if (ov) { | |
| 550 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 551 | + } else { | |
| 552 | + env->xer &= ~(1 << XER_OV); | |
| 553 | + } | |
| 589 | 554 | RETURN(); |
| 590 | 555 | } |
| 591 | 556 | #endif |
| ... | ... | @@ -594,9 +559,9 @@ void OPPROTO op_check_addo_64 (void) |
| 594 | 559 | void OPPROTO op_check_addc (void) |
| 595 | 560 | { |
| 596 | 561 | if (likely((uint32_t)T0 >= (uint32_t)T2)) { |
| 597 | - xer_ca = 0; | |
| 562 | + env->xer &= ~(1 << XER_CA); | |
| 598 | 563 | } else { |
| 599 | - xer_ca = 1; | |
| 564 | + env->xer |= (1 << XER_CA); | |
| 600 | 565 | } |
| 601 | 566 | RETURN(); |
| 602 | 567 | } |
| ... | ... | @@ -605,9 +570,9 @@ void OPPROTO op_check_addc (void) |
| 605 | 570 | void OPPROTO op_check_addc_64 (void) |
| 606 | 571 | { |
| 607 | 572 | if (likely((uint64_t)T0 >= (uint64_t)T2)) { |
| 608 | - xer_ca = 0; | |
| 573 | + env->xer &= ~(1 << XER_CA); | |
| 609 | 574 | } else { |
| 610 | - xer_ca = 1; | |
| 575 | + env->xer |= (1 << XER_CA); | |
| 611 | 576 | } |
| 612 | 577 | RETURN(); |
| 613 | 578 | } |
| ... | ... | @@ -633,7 +598,7 @@ void OPPROTO op_add_me (void) |
| 633 | 598 | { |
| 634 | 599 | T0 += xer_ca + (-1); |
| 635 | 600 | if (likely((uint32_t)T1 != 0)) |
| 636 | - xer_ca = 1; | |
| 601 | + env->xer |= (1 << XER_CA); | |
| 637 | 602 | RETURN(); |
| 638 | 603 | } |
| 639 | 604 | |
| ... | ... | @@ -642,7 +607,7 @@ void OPPROTO op_add_me_64 (void) |
| 642 | 607 | { |
| 643 | 608 | T0 += xer_ca + (-1); |
| 644 | 609 | if (likely((uint64_t)T1 != 0)) |
| 645 | - xer_ca = 1; | |
| 610 | + env->xer |= (1 << XER_CA); | |
| 646 | 611 | RETURN(); |
| 647 | 612 | } |
| 648 | 613 | #endif |
| ... | ... | @@ -855,9 +820,9 @@ void OPPROTO op_nego_64 (void) |
| 855 | 820 | void OPPROTO op_check_subfc (void) |
| 856 | 821 | { |
| 857 | 822 | if (likely((uint32_t)T0 > (uint32_t)T1)) { |
| 858 | - xer_ca = 0; | |
| 823 | + env->xer &= ~(1 << XER_CA); | |
| 859 | 824 | } else { |
| 860 | - xer_ca = 1; | |
| 825 | + env->xer |= (1 << XER_CA); | |
| 861 | 826 | } |
| 862 | 827 | RETURN(); |
| 863 | 828 | } |
| ... | ... | @@ -866,9 +831,9 @@ void OPPROTO op_check_subfc (void) |
| 866 | 831 | void OPPROTO op_check_subfc_64 (void) |
| 867 | 832 | { |
| 868 | 833 | if (likely((uint64_t)T0 > (uint64_t)T1)) { |
| 869 | - xer_ca = 0; | |
| 834 | + env->xer &= ~(1 << XER_CA); | |
| 870 | 835 | } else { |
| 871 | - xer_ca = 1; | |
| 836 | + env->xer |= (1 << XER_CA); | |
| 872 | 837 | } |
| 873 | 838 | RETURN(); |
| 874 | 839 | } |
| ... | ... | @@ -894,9 +859,9 @@ void OPPROTO op_subfic (void) |
| 894 | 859 | { |
| 895 | 860 | T0 = (int32_t)PARAM1 + ~T0 + 1; |
| 896 | 861 | if ((uint32_t)T0 <= (uint32_t)PARAM1) { |
| 897 | - xer_ca = 1; | |
| 862 | + env->xer |= (1 << XER_CA); | |
| 898 | 863 | } else { |
| 899 | - xer_ca = 0; | |
| 864 | + env->xer &= ~(1 << XER_CA); | |
| 900 | 865 | } |
| 901 | 866 | RETURN(); |
| 902 | 867 | } |
| ... | ... | @@ -906,9 +871,9 @@ void OPPROTO op_subfic_64 (void) |
| 906 | 871 | { |
| 907 | 872 | T0 = (int64_t)PARAM1 + ~T0 + 1; |
| 908 | 873 | if ((uint64_t)T0 <= (uint64_t)PARAM1) { |
| 909 | - xer_ca = 1; | |
| 874 | + env->xer |= (1 << XER_CA); | |
| 910 | 875 | } else { |
| 911 | - xer_ca = 0; | |
| 876 | + env->xer &= ~(1 << XER_CA); | |
| 912 | 877 | } |
| 913 | 878 | RETURN(); |
| 914 | 879 | } |
| ... | ... | @@ -919,7 +884,7 @@ void OPPROTO op_subfme (void) |
| 919 | 884 | { |
| 920 | 885 | T0 = ~T0 + xer_ca - 1; |
| 921 | 886 | if (likely((uint32_t)T0 != UINT32_MAX)) |
| 922 | - xer_ca = 1; | |
| 887 | + env->xer |= (1 << XER_CA); | |
| 923 | 888 | RETURN(); |
| 924 | 889 | } |
| 925 | 890 | |
| ... | ... | @@ -928,7 +893,7 @@ void OPPROTO op_subfme_64 (void) |
| 928 | 893 | { |
| 929 | 894 | T0 = ~T0 + xer_ca - 1; |
| 930 | 895 | if (likely((uint64_t)T0 != UINT64_MAX)) |
| 931 | - xer_ca = 1; | |
| 896 | + env->xer |= (1 << XER_CA); | |
| 932 | 897 | RETURN(); |
| 933 | 898 | } |
| 934 | 899 | #endif |
| ... | ... | @@ -953,9 +918,9 @@ void OPPROTO op_subfze (void) |
| 953 | 918 | T1 = ~T0; |
| 954 | 919 | T0 = T1 + xer_ca; |
| 955 | 920 | if ((uint32_t)T0 < (uint32_t)T1) { |
| 956 | - xer_ca = 1; | |
| 921 | + env->xer |= (1 << XER_CA); | |
| 957 | 922 | } else { |
| 958 | - xer_ca = 0; | |
| 923 | + env->xer &= ~(1 << XER_CA); | |
| 959 | 924 | } |
| 960 | 925 | RETURN(); |
| 961 | 926 | } |
| ... | ... | @@ -966,9 +931,9 @@ void OPPROTO op_subfze_64 (void) |
| 966 | 931 | T1 = ~T0; |
| 967 | 932 | T0 = T1 + xer_ca; |
| 968 | 933 | if ((uint64_t)T0 < (uint64_t)T1) { |
| 969 | - xer_ca = 1; | |
| 934 | + env->xer |= (1 << XER_CA); | |
| 970 | 935 | } else { |
| 971 | - xer_ca = 0; | |
| 936 | + env->xer &= ~(1 << XER_CA); | |
| 972 | 937 | } |
| 973 | 938 | RETURN(); |
| 974 | 939 | } |
| ... | ... | @@ -1317,9 +1282,9 @@ void OPPROTO op_srawi (void) |
| 1317 | 1282 | |
| 1318 | 1283 | T0 = (int32_t)T0 >> PARAM1; |
| 1319 | 1284 | if ((int32_t)T1 < 0 && (T1 & mask) != 0) { |
| 1320 | - xer_ca = 1; | |
| 1285 | + env->xer |= (1 << XER_CA); | |
| 1321 | 1286 | } else { |
| 1322 | - xer_ca = 0; | |
| 1287 | + env->xer &= ~(1 << XER_CA); | |
| 1323 | 1288 | } |
| 1324 | 1289 | RETURN(); |
| 1325 | 1290 | } |
| ... | ... | @@ -1331,9 +1296,9 @@ void OPPROTO op_sradi (void) |
| 1331 | 1296 | |
| 1332 | 1297 | T0 = (int64_t)T0 >> PARAM1; |
| 1333 | 1298 | if ((int64_t)T1 < 0 && ((uint64_t)T1 & mask) != 0) { |
| 1334 | - xer_ca = 1; | |
| 1299 | + env->xer |= (1 << XER_CA); | |
| 1335 | 1300 | } else { |
| 1336 | - xer_ca = 0; | |
| 1301 | + env->xer &= ~(1 << XER_CA); | |
| 1337 | 1302 | } |
| 1338 | 1303 | RETURN(); |
| 1339 | 1304 | } |
| ... | ... | @@ -1975,7 +1940,7 @@ void OPPROTO op_POWER_nabso (void) |
| 1975 | 1940 | /* nabs never overflows */ |
| 1976 | 1941 | if (T0 > 0) |
| 1977 | 1942 | T0 = -T0; |
| 1978 | - xer_ov = 0; | |
| 1943 | + env->xer &= ~(1 << XER_OV); | |
| 1979 | 1944 | RETURN(); |
| 1980 | 1945 | } |
| 1981 | 1946 | |
| ... | ... | @@ -2189,10 +2154,9 @@ void OPPROTO op_405_check_sat (void) |
| 2189 | 2154 | void OPPROTO op_405_check_ovu (void) |
| 2190 | 2155 | { |
| 2191 | 2156 | if (likely(T0 >= T2)) { |
| 2192 | - xer_ov = 0; | |
| 2157 | + env->xer &= ~(1 << XER_OV); | |
| 2193 | 2158 | } else { |
| 2194 | - xer_ov = 1; | |
| 2195 | - xer_so = 1; | |
| 2159 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 2196 | 2160 | } |
| 2197 | 2161 | RETURN(); |
| 2198 | 2162 | } | ... | ... |
target-ppc/op_helper.c
| ... | ... | @@ -119,9 +119,9 @@ void do_adde (void) |
| 119 | 119 | T0 += T1 + xer_ca; |
| 120 | 120 | if (likely(!((uint32_t)T0 < (uint32_t)T2 || |
| 121 | 121 | (xer_ca == 1 && (uint32_t)T0 == (uint32_t)T2)))) { |
| 122 | - xer_ca = 0; | |
| 122 | + env->xer &= ~(1 << XER_CA); | |
| 123 | 123 | } else { |
| 124 | - xer_ca = 1; | |
| 124 | + env->xer |= (1 << XER_CA); | |
| 125 | 125 | } |
| 126 | 126 | } |
| 127 | 127 | |
| ... | ... | @@ -132,32 +132,42 @@ void do_adde_64 (void) |
| 132 | 132 | T0 += T1 + xer_ca; |
| 133 | 133 | if (likely(!((uint64_t)T0 < (uint64_t)T2 || |
| 134 | 134 | (xer_ca == 1 && (uint64_t)T0 == (uint64_t)T2)))) { |
| 135 | - xer_ca = 0; | |
| 135 | + env->xer &= ~(1 << XER_CA); | |
| 136 | 136 | } else { |
| 137 | - xer_ca = 1; | |
| 137 | + env->xer |= (1 << XER_CA); | |
| 138 | 138 | } |
| 139 | 139 | } |
| 140 | 140 | #endif |
| 141 | 141 | |
| 142 | 142 | void do_addmeo (void) |
| 143 | 143 | { |
| 144 | + int ov; | |
| 144 | 145 | T1 = T0; |
| 145 | 146 | T0 += xer_ca + (-1); |
| 146 | - xer_ov = ((uint32_t)T1 & ((uint32_t)T1 ^ (uint32_t)T0)) >> 31; | |
| 147 | - xer_so |= xer_ov; | |
| 147 | + ov = ((uint32_t)T1 & ((uint32_t)T1 ^ (uint32_t)T0)) >> 31; | |
| 148 | + if (ov) { | |
| 149 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 150 | + } else { | |
| 151 | + env->xer &= ~(1 << XER_OV); | |
| 152 | + } | |
| 148 | 153 | if (likely((uint32_t)T1 != 0)) |
| 149 | - xer_ca = 1; | |
| 154 | + env->xer |= (1 << XER_CA); | |
| 150 | 155 | } |
| 151 | 156 | |
| 152 | 157 | #if defined(TARGET_PPC64) |
| 153 | 158 | void do_addmeo_64 (void) |
| 154 | 159 | { |
| 160 | + int ov; | |
| 155 | 161 | T1 = T0; |
| 156 | 162 | T0 += xer_ca + (-1); |
| 157 | - xer_ov = ((uint64_t)T1 & ((uint64_t)T1 ^ (uint64_t)T0)) >> 63; | |
| 158 | - xer_so |= xer_ov; | |
| 163 | + ov = ((uint64_t)T1 & ((uint64_t)T1 ^ (uint64_t)T0)) >> 63; | |
| 164 | + if (ov) { | |
| 165 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 166 | + } else { | |
| 167 | + env->xer &= ~(1 << XER_OV); | |
| 168 | + } | |
| 159 | 169 | if (likely((uint64_t)T1 != 0)) |
| 160 | - xer_ca = 1; | |
| 170 | + env->xer |= (1 << XER_CA); | |
| 161 | 171 | } |
| 162 | 172 | #endif |
| 163 | 173 | |
| ... | ... | @@ -165,13 +175,12 @@ void do_divwo (void) |
| 165 | 175 | { |
| 166 | 176 | if (likely(!(((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) || |
| 167 | 177 | (int32_t)T1 == 0))) { |
| 168 | - xer_ov = 0; | |
| 178 | + env->xer &= ~(1 << XER_OV); | |
| 169 | 179 | T0 = (int32_t)T0 / (int32_t)T1; |
| 170 | 180 | } else { |
| 171 | - xer_ov = 1; | |
| 181 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 172 | 182 | T0 = UINT32_MAX * ((uint32_t)T0 >> 31); |
| 173 | 183 | } |
| 174 | - xer_so |= xer_ov; | |
| 175 | 184 | } |
| 176 | 185 | |
| 177 | 186 | #if defined(TARGET_PPC64) |
| ... | ... | @@ -179,24 +188,22 @@ void do_divdo (void) |
| 179 | 188 | { |
| 180 | 189 | if (likely(!(((int64_t)T0 == INT64_MIN && (int64_t)T1 == (int64_t)-1LL) || |
| 181 | 190 | (int64_t)T1 == 0))) { |
| 182 | - xer_ov = 0; | |
| 191 | + env->xer &= ~(1 << XER_OV); | |
| 183 | 192 | T0 = (int64_t)T0 / (int64_t)T1; |
| 184 | 193 | } else { |
| 185 | - xer_ov = 1; | |
| 194 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 186 | 195 | T0 = UINT64_MAX * ((uint64_t)T0 >> 63); |
| 187 | 196 | } |
| 188 | - xer_so |= xer_ov; | |
| 189 | 197 | } |
| 190 | 198 | #endif |
| 191 | 199 | |
| 192 | 200 | void do_divwuo (void) |
| 193 | 201 | { |
| 194 | 202 | if (likely((uint32_t)T1 != 0)) { |
| 195 | - xer_ov = 0; | |
| 203 | + env->xer &= ~(1 << XER_OV); | |
| 196 | 204 | T0 = (uint32_t)T0 / (uint32_t)T1; |
| 197 | 205 | } else { |
| 198 | - xer_ov = 1; | |
| 199 | - xer_so = 1; | |
| 206 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 200 | 207 | T0 = 0; |
| 201 | 208 | } |
| 202 | 209 | } |
| ... | ... | @@ -205,11 +212,10 @@ void do_divwuo (void) |
| 205 | 212 | void do_divduo (void) |
| 206 | 213 | { |
| 207 | 214 | if (likely((uint64_t)T1 != 0)) { |
| 208 | - xer_ov = 0; | |
| 215 | + env->xer &= ~(1 << XER_OV); | |
| 209 | 216 | T0 = (uint64_t)T0 / (uint64_t)T1; |
| 210 | 217 | } else { |
| 211 | - xer_ov = 1; | |
| 212 | - xer_so = 1; | |
| 218 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 213 | 219 | T0 = 0; |
| 214 | 220 | } |
| 215 | 221 | } |
| ... | ... | @@ -220,10 +226,9 @@ void do_mullwo (void) |
| 220 | 226 | int64_t res = (int64_t)(int32_t)T0 * (int64_t)(int32_t)T1; |
| 221 | 227 | |
| 222 | 228 | if (likely((int32_t)res == res)) { |
| 223 | - xer_ov = 0; | |
| 229 | + env->xer &= ~(1 << XER_OV); | |
| 224 | 230 | } else { |
| 225 | - xer_ov = 1; | |
| 226 | - xer_so = 1; | |
| 231 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 227 | 232 | } |
| 228 | 233 | T0 = (int32_t)res; |
| 229 | 234 | } |
| ... | ... | @@ -238,22 +243,20 @@ void do_mulldo (void) |
| 238 | 243 | T0 = (int64_t)tl; |
| 239 | 244 | /* If th != 0 && th != -1, then we had an overflow */ |
| 240 | 245 | if (likely((uint64_t)(th + 1) <= 1)) { |
| 241 | - xer_ov = 0; | |
| 246 | + env->xer &= ~(1 << XER_OV); | |
| 242 | 247 | } else { |
| 243 | - xer_ov = 1; | |
| 248 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 244 | 249 | } |
| 245 | - xer_so |= xer_ov; | |
| 246 | 250 | } |
| 247 | 251 | #endif |
| 248 | 252 | |
| 249 | 253 | void do_nego (void) |
| 250 | 254 | { |
| 251 | 255 | if (likely((int32_t)T0 != INT32_MIN)) { |
| 252 | - xer_ov = 0; | |
| 256 | + env->xer &= ~(1 << XER_OV); | |
| 253 | 257 | T0 = -(int32_t)T0; |
| 254 | 258 | } else { |
| 255 | - xer_ov = 1; | |
| 256 | - xer_so = 1; | |
| 259 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 257 | 260 | } |
| 258 | 261 | } |
| 259 | 262 | |
| ... | ... | @@ -261,11 +264,10 @@ void do_nego (void) |
| 261 | 264 | void do_nego_64 (void) |
| 262 | 265 | { |
| 263 | 266 | if (likely((int64_t)T0 != INT64_MIN)) { |
| 264 | - xer_ov = 0; | |
| 267 | + env->xer &= ~(1 << XER_OV); | |
| 265 | 268 | T0 = -(int64_t)T0; |
| 266 | 269 | } else { |
| 267 | - xer_ov = 1; | |
| 268 | - xer_so = 1; | |
| 270 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 269 | 271 | } |
| 270 | 272 | } |
| 271 | 273 | #endif |
| ... | ... | @@ -275,9 +277,9 @@ void do_subfe (void) |
| 275 | 277 | T0 = T1 + ~T0 + xer_ca; |
| 276 | 278 | if (likely((uint32_t)T0 >= (uint32_t)T1 && |
| 277 | 279 | (xer_ca == 0 || (uint32_t)T0 != (uint32_t)T1))) { |
| 278 | - xer_ca = 0; | |
| 280 | + env->xer &= ~(1 << XER_CA); | |
| 279 | 281 | } else { |
| 280 | - xer_ca = 1; | |
| 282 | + env->xer |= (1 << XER_CA); | |
| 281 | 283 | } |
| 282 | 284 | } |
| 283 | 285 | |
| ... | ... | @@ -287,61 +289,81 @@ void do_subfe_64 (void) |
| 287 | 289 | T0 = T1 + ~T0 + xer_ca; |
| 288 | 290 | if (likely((uint64_t)T0 >= (uint64_t)T1 && |
| 289 | 291 | (xer_ca == 0 || (uint64_t)T0 != (uint64_t)T1))) { |
| 290 | - xer_ca = 0; | |
| 292 | + env->xer &= ~(1 << XER_CA); | |
| 291 | 293 | } else { |
| 292 | - xer_ca = 1; | |
| 294 | + env->xer |= (1 << XER_CA); | |
| 293 | 295 | } |
| 294 | 296 | } |
| 295 | 297 | #endif |
| 296 | 298 | |
| 297 | 299 | void do_subfmeo (void) |
| 298 | 300 | { |
| 301 | + int ov; | |
| 299 | 302 | T1 = T0; |
| 300 | 303 | T0 = ~T0 + xer_ca - 1; |
| 301 | - xer_ov = ((uint32_t)~T1 & ((uint32_t)~T1 ^ (uint32_t)T0)) >> 31; | |
| 302 | - xer_so |= xer_ov; | |
| 304 | + ov = ((uint32_t)~T1 & ((uint32_t)~T1 ^ (uint32_t)T0)) >> 31; | |
| 305 | + if (ov) { | |
| 306 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 307 | + } else { | |
| 308 | + env->xer &= ~(1 << XER_OV); | |
| 309 | + } | |
| 303 | 310 | if (likely((uint32_t)T1 != UINT32_MAX)) |
| 304 | - xer_ca = 1; | |
| 311 | + env->xer |= (1 << XER_CA); | |
| 305 | 312 | } |
| 306 | 313 | |
| 307 | 314 | #if defined(TARGET_PPC64) |
| 308 | 315 | void do_subfmeo_64 (void) |
| 309 | 316 | { |
| 317 | + int ov; | |
| 310 | 318 | T1 = T0; |
| 311 | 319 | T0 = ~T0 + xer_ca - 1; |
| 312 | - xer_ov = ((uint64_t)~T1 & ((uint64_t)~T1 ^ (uint64_t)T0)) >> 63; | |
| 313 | - xer_so |= xer_ov; | |
| 320 | + ov = ((uint64_t)~T1 & ((uint64_t)~T1 ^ (uint64_t)T0)) >> 63; | |
| 321 | + if (ov) { | |
| 322 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 323 | + } else { | |
| 324 | + env->xer &= ~(1 << XER_OV); | |
| 325 | + } | |
| 314 | 326 | if (likely((uint64_t)T1 != UINT64_MAX)) |
| 315 | - xer_ca = 1; | |
| 327 | + env->xer |= (1 << XER_CA); | |
| 316 | 328 | } |
| 317 | 329 | #endif |
| 318 | 330 | |
| 319 | 331 | void do_subfzeo (void) |
| 320 | 332 | { |
| 333 | + int ov; | |
| 321 | 334 | T1 = T0; |
| 322 | 335 | T0 = ~T0 + xer_ca; |
| 323 | - xer_ov = (((uint32_t)~T1 ^ UINT32_MAX) & | |
| 324 | - ((uint32_t)(~T1) ^ (uint32_t)T0)) >> 31; | |
| 325 | - xer_so |= xer_ov; | |
| 336 | + ov = (((uint32_t)~T1 ^ UINT32_MAX) & | |
| 337 | + ((uint32_t)(~T1) ^ (uint32_t)T0)) >> 31; | |
| 338 | + if (ov) { | |
| 339 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 340 | + } else { | |
| 341 | + env->xer &= ~(1 << XER_OV); | |
| 342 | + } | |
| 326 | 343 | if (likely((uint32_t)T0 >= (uint32_t)~T1)) { |
| 327 | - xer_ca = 0; | |
| 344 | + env->xer &= ~(1 << XER_CA); | |
| 328 | 345 | } else { |
| 329 | - xer_ca = 1; | |
| 346 | + env->xer |= (1 << XER_CA); | |
| 330 | 347 | } |
| 331 | 348 | } |
| 332 | 349 | |
| 333 | 350 | #if defined(TARGET_PPC64) |
| 334 | 351 | void do_subfzeo_64 (void) |
| 335 | 352 | { |
| 353 | + int ov; | |
| 336 | 354 | T1 = T0; |
| 337 | 355 | T0 = ~T0 + xer_ca; |
| 338 | - xer_ov = (((uint64_t)~T1 ^ UINT64_MAX) & | |
| 339 | - ((uint64_t)(~T1) ^ (uint64_t)T0)) >> 63; | |
| 340 | - xer_so |= xer_ov; | |
| 356 | + ov = (((uint64_t)~T1 ^ UINT64_MAX) & | |
| 357 | + ((uint64_t)(~T1) ^ (uint64_t)T0)) >> 63; | |
| 358 | + if (ov) { | |
| 359 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 360 | + } else { | |
| 361 | + env->xer &= ~(1 << XER_OV); | |
| 362 | + } | |
| 341 | 363 | if (likely((uint64_t)T0 >= (uint64_t)~T1)) { |
| 342 | - xer_ca = 0; | |
| 364 | + env->xer &= ~(1 << XER_CA); | |
| 343 | 365 | } else { |
| 344 | - xer_ca = 1; | |
| 366 | + env->xer |= (1 << XER_CA); | |
| 345 | 367 | } |
| 346 | 368 | } |
| 347 | 369 | #endif |
| ... | ... | @@ -367,20 +389,20 @@ void do_sraw (void) |
| 367 | 389 | if (likely((uint32_t)T1 != 0)) { |
| 368 | 390 | ret = (int32_t)T0 >> (T1 & 0x1fUL); |
| 369 | 391 | if (likely(ret >= 0 || ((int32_t)T0 & ((1 << T1) - 1)) == 0)) { |
| 370 | - xer_ca = 0; | |
| 392 | + env->xer &= ~(1 << XER_CA); | |
| 371 | 393 | } else { |
| 372 | - xer_ca = 1; | |
| 394 | + env->xer |= (1 << XER_CA); | |
| 373 | 395 | } |
| 374 | 396 | } else { |
| 375 | 397 | ret = T0; |
| 376 | - xer_ca = 0; | |
| 398 | + env->xer &= ~(1 << XER_CA); | |
| 377 | 399 | } |
| 378 | 400 | } else { |
| 379 | 401 | ret = UINT32_MAX * ((uint32_t)T0 >> 31); |
| 380 | 402 | if (likely(ret >= 0 || ((uint32_t)T0 & ~0x80000000UL) == 0)) { |
| 381 | - xer_ca = 0; | |
| 403 | + env->xer &= ~(1 << XER_CA); | |
| 382 | 404 | } else { |
| 383 | - xer_ca = 1; | |
| 405 | + env->xer |= (1 << XER_CA); | |
| 384 | 406 | } |
| 385 | 407 | } |
| 386 | 408 | T0 = ret; |
| ... | ... | @@ -395,20 +417,20 @@ void do_srad (void) |
| 395 | 417 | if (likely((uint64_t)T1 != 0)) { |
| 396 | 418 | ret = (int64_t)T0 >> (T1 & 0x3FUL); |
| 397 | 419 | if (likely(ret >= 0 || ((int64_t)T0 & ((1 << T1) - 1)) == 0)) { |
| 398 | - xer_ca = 0; | |
| 420 | + env->xer &= ~(1 << XER_CA); | |
| 399 | 421 | } else { |
| 400 | - xer_ca = 1; | |
| 422 | + env->xer |= (1 << XER_CA); | |
| 401 | 423 | } |
| 402 | 424 | } else { |
| 403 | 425 | ret = T0; |
| 404 | - xer_ca = 0; | |
| 426 | + env->xer &= ~(1 << XER_CA); | |
| 405 | 427 | } |
| 406 | 428 | } else { |
| 407 | 429 | ret = UINT64_MAX * ((uint64_t)T0 >> 63); |
| 408 | 430 | if (likely(ret >= 0 || ((uint64_t)T0 & ~0x8000000000000000ULL) == 0)) { |
| 409 | - xer_ca = 0; | |
| 431 | + env->xer &= ~(1 << XER_CA); | |
| 410 | 432 | } else { |
| 411 | - xer_ca = 1; | |
| 433 | + env->xer |= (1 << XER_CA); | |
| 412 | 434 | } |
| 413 | 435 | } |
| 414 | 436 | T0 = ret; |
| ... | ... | @@ -1478,14 +1500,13 @@ void do_POWER_abso (void) |
| 1478 | 1500 | { |
| 1479 | 1501 | if ((int32_t)T0 == INT32_MIN) { |
| 1480 | 1502 | T0 = INT32_MAX; |
| 1481 | - xer_ov = 1; | |
| 1503 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 1482 | 1504 | } else if ((int32_t)T0 < 0) { |
| 1483 | 1505 | T0 = -T0; |
| 1484 | - xer_ov = 0; | |
| 1506 | + env->xer &= ~(1 << XER_OV); | |
| 1485 | 1507 | } else { |
| 1486 | - xer_ov = 0; | |
| 1508 | + env->xer &= ~(1 << XER_OV); | |
| 1487 | 1509 | } |
| 1488 | - xer_so |= xer_ov; | |
| 1489 | 1510 | } |
| 1490 | 1511 | |
| 1491 | 1512 | void do_POWER_clcs (void) |
| ... | ... | @@ -1538,19 +1559,18 @@ void do_POWER_divo (void) |
| 1538 | 1559 | (int32_t)T1 == 0) { |
| 1539 | 1560 | T0 = UINT32_MAX * ((uint32_t)T0 >> 31); |
| 1540 | 1561 | env->spr[SPR_MQ] = 0; |
| 1541 | - xer_ov = 1; | |
| 1562 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 1542 | 1563 | } else { |
| 1543 | 1564 | tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ]; |
| 1544 | 1565 | env->spr[SPR_MQ] = tmp % T1; |
| 1545 | 1566 | tmp /= (int32_t)T1; |
| 1546 | 1567 | if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) { |
| 1547 | - xer_ov = 1; | |
| 1568 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 1548 | 1569 | } else { |
| 1549 | - xer_ov = 0; | |
| 1570 | + env->xer &= ~(1 << XER_OV); | |
| 1550 | 1571 | } |
| 1551 | 1572 | T0 = tmp; |
| 1552 | 1573 | } |
| 1553 | - xer_so |= xer_ov; | |
| 1554 | 1574 | } |
| 1555 | 1575 | |
| 1556 | 1576 | void do_POWER_divs (void) |
| ... | ... | @@ -1571,13 +1591,12 @@ void do_POWER_divso (void) |
| 1571 | 1591 | (int32_t)T1 == 0) { |
| 1572 | 1592 | T0 = UINT32_MAX * ((uint32_t)T0 >> 31); |
| 1573 | 1593 | env->spr[SPR_MQ] = 0; |
| 1574 | - xer_ov = 1; | |
| 1594 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 1575 | 1595 | } else { |
| 1576 | 1596 | T0 = (int32_t)T0 / (int32_t)T1; |
| 1577 | 1597 | env->spr[SPR_MQ] = (int32_t)T0 % (int32_t)T1; |
| 1578 | - xer_ov = 0; | |
| 1598 | + env->xer &= ~(1 << XER_OV); | |
| 1579 | 1599 | } |
| 1580 | - xer_so |= xer_ov; | |
| 1581 | 1600 | } |
| 1582 | 1601 | |
| 1583 | 1602 | void do_POWER_dozo (void) |
| ... | ... | @@ -1587,14 +1606,13 @@ void do_POWER_dozo (void) |
| 1587 | 1606 | T0 = T1 - T0; |
| 1588 | 1607 | if (((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) & |
| 1589 | 1608 | ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)) { |
| 1590 | - xer_ov = 1; | |
| 1591 | - xer_so = 1; | |
| 1609 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 1592 | 1610 | } else { |
| 1593 | - xer_ov = 0; | |
| 1611 | + env->xer &= ~(1 << XER_OV); | |
| 1594 | 1612 | } |
| 1595 | 1613 | } else { |
| 1596 | 1614 | T0 = 0; |
| 1597 | - xer_ov = 0; | |
| 1615 | + env->xer &= ~(1 << XER_OV); | |
| 1598 | 1616 | } |
| 1599 | 1617 | } |
| 1600 | 1618 | |
| ... | ... | @@ -1621,10 +1639,9 @@ void do_POWER_mulo (void) |
| 1621 | 1639 | env->spr[SPR_MQ] = tmp >> 32; |
| 1622 | 1640 | T0 = tmp; |
| 1623 | 1641 | if (tmp >> 32 != ((uint64_t)T0 >> 16) * ((uint64_t)T1 >> 16)) { |
| 1624 | - xer_ov = 1; | |
| 1625 | - xer_so = 1; | |
| 1642 | + env->xer |= (1 << XER_OV) | (1 << XER_SO); | |
| 1626 | 1643 | } else { |
| 1627 | - xer_ov = 0; | |
| 1644 | + env->xer &= ~(1 << XER_OV); | |
| 1628 | 1645 | } |
| 1629 | 1646 | } |
| 1630 | 1647 | ... | ... |
target-ppc/translate.c
| ... | ... | @@ -62,6 +62,7 @@ static TCGv cpu_crf[8]; |
| 62 | 62 | static TCGv cpu_nip; |
| 63 | 63 | static TCGv cpu_ctr; |
| 64 | 64 | static TCGv cpu_lr; |
| 65 | +static TCGv cpu_xer; | |
| 65 | 66 | |
| 66 | 67 | /* dyngen register indexes */ |
| 67 | 68 | static TCGv cpu_T[3]; |
| ... | ... | @@ -175,6 +176,9 @@ void ppc_translate_init(void) |
| 175 | 176 | cpu_lr = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0, |
| 176 | 177 | offsetof(CPUState, lr), "lr"); |
| 177 | 178 | |
| 179 | + cpu_xer = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0, | |
| 180 | + offsetof(CPUState, xer), "xer"); | |
| 181 | + | |
| 178 | 182 | /* register helpers */ |
| 179 | 183 | #undef DEF_HELPER |
| 180 | 184 | #define DEF_HELPER(ret, name, params) tcg_register_helper(name, #name); |
| ... | ... | @@ -1057,7 +1061,7 @@ GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) |
| 1057 | 1061 | #endif |
| 1058 | 1062 | gen_op_check_addc(); |
| 1059 | 1063 | } else { |
| 1060 | - gen_op_clear_xer_ca(); | |
| 1064 | + tcg_gen_andi_i32(cpu_xer, cpu_xer, ~(1 << XER_CA)); | |
| 1061 | 1065 | } |
| 1062 | 1066 | tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); |
| 1063 | 1067 | } |
| ... | ... | @@ -1077,7 +1081,7 @@ GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) |
| 1077 | 1081 | #endif |
| 1078 | 1082 | gen_op_check_addc(); |
| 1079 | 1083 | } else { |
| 1080 | - gen_op_clear_xer_ca(); | |
| 1084 | + tcg_gen_andi_i32(cpu_xer, cpu_xer, ~(1 << XER_CA)); | |
| 1081 | 1085 | } |
| 1082 | 1086 | tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); |
| 1083 | 1087 | gen_set_Rc0(ctx); |
| ... | ... | @@ -2852,7 +2856,7 @@ GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING) |
| 2852 | 2856 | if (ra == 0) { |
| 2853 | 2857 | ra = rb; |
| 2854 | 2858 | } |
| 2855 | - gen_op_load_xer_bc(); | |
| 2859 | + tcg_gen_andi_tl(cpu_T[1], cpu_xer, 0x7F); | |
| 2856 | 2860 | op_ldstsx(lswx, rD(ctx->opcode), ra, rb); |
| 2857 | 2861 | } |
| 2858 | 2862 | |
| ... | ... | @@ -2876,7 +2880,7 @@ GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING) |
| 2876 | 2880 | /* NIP cannot be restored if the memory exception comes from an helper */ |
| 2877 | 2881 | gen_update_nip(ctx, ctx->nip - 4); |
| 2878 | 2882 | gen_addr_reg_index(cpu_T[0], ctx); |
| 2879 | - gen_op_load_xer_bc(); | |
| 2883 | + tcg_gen_andi_tl(cpu_T[1], cpu_xer, 0x7F); | |
| 2880 | 2884 | op_ldsts(stsw, rS(ctx->opcode)); |
| 2881 | 2885 | } |
| 2882 | 2886 | |
| ... | ... | @@ -3509,10 +3513,9 @@ GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B) |
| 3509 | 3513 | /* mcrxr */ |
| 3510 | 3514 | GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC) |
| 3511 | 3515 | { |
| 3512 | - gen_op_load_xer_cr(); | |
| 3513 | - tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_T[0], 0xf); | |
| 3514 | - gen_op_clear_xer_ov(); | |
| 3515 | - gen_op_clear_xer_ca(); | |
| 3516 | + tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], cpu_xer); | |
| 3517 | + tcg_gen_shri_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], XER_CA); | |
| 3518 | + tcg_gen_andi_i32(cpu_xer, cpu_xer, ~(1 << XER_SO | 1 << XER_OV | 1 << XER_CA)); | |
| 3516 | 3519 | } |
| 3517 | 3520 | |
| 3518 | 3521 | /* mfcr */ |
| ... | ... | @@ -4310,10 +4313,12 @@ GEN_HANDLER(lscbx, 0x1F, 0x15, 0x08, 0x00000000, PPC_POWER_BR) |
| 4310 | 4313 | } |
| 4311 | 4314 | /* NIP cannot be restored if the memory exception comes from an helper */ |
| 4312 | 4315 | gen_update_nip(ctx, ctx->nip - 4); |
| 4313 | - gen_op_load_xer_bc(); | |
| 4314 | - gen_op_load_xer_cmp(); | |
| 4316 | + tcg_gen_andi_tl(cpu_T[1], cpu_xer, 0x7F); | |
| 4317 | + tcg_gen_shri_tl(cpu_T[2], cpu_xer, XER_CMP); | |
| 4318 | + tcg_gen_andi_tl(cpu_T[2], cpu_T[2], 0xFF); | |
| 4315 | 4319 | op_POWER_lscbx(rD(ctx->opcode), ra, rb); |
| 4316 | - gen_op_store_xer_bc(); | |
| 4320 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~0x7F); | |
| 4321 | + tcg_gen_or_tl(cpu_xer, cpu_xer, cpu_T[0]); | |
| 4317 | 4322 | if (unlikely(Rc(ctx->opcode) != 0)) |
| 4318 | 4323 | gen_set_Rc0(ctx); |
| 4319 | 4324 | } |
| ... | ... | @@ -5500,7 +5505,8 @@ GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC) |
| 5500 | 5505 | tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rB(ctx->opcode)]); |
| 5501 | 5506 | gen_op_440_dlmzb(); |
| 5502 | 5507 | tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); |
| 5503 | - gen_op_store_xer_bc(); | |
| 5508 | + tcg_gen_andi_tl(cpu_xer, cpu_xer, ~0x7F); | |
| 5509 | + tcg_gen_or_tl(cpu_xer, cpu_xer, cpu_T[0]); | |
| 5504 | 5510 | if (Rc(ctx->opcode)) { |
| 5505 | 5511 | gen_op_440_dlmzb_update_Rc(); |
| 5506 | 5512 | tcg_gen_andi_i32(cpu_crf[0], cpu_T[0], 0xf); |
| ... | ... | @@ -6391,7 +6397,7 @@ void cpu_dump_state (CPUState *env, FILE *f, |
| 6391 | 6397 | int i; |
| 6392 | 6398 | |
| 6393 | 6399 | cpu_fprintf(f, "NIP " ADDRX " LR " ADDRX " CTR " ADDRX " XER %08x\n", |
| 6394 | - env->nip, env->lr, env->ctr, hreg_load_xer(env)); | |
| 6400 | + env->nip, env->lr, env->ctr, env->xer); | |
| 6395 | 6401 | cpu_fprintf(f, "MSR " ADDRX " HID0 " ADDRX " HF " ADDRX " idx %d\n", |
| 6396 | 6402 | env->msr, env->spr[SPR_HID0], env->hflags, env->mmu_idx); |
| 6397 | 6403 | #if !defined(NO_TIMER_DUMP) | ... | ... |
target-ppc/translate_init.c
| ... | ... | @@ -99,12 +99,12 @@ static void spr_write_clear (void *opaque, int sprn) |
| 99 | 99 | /* XER */ |
| 100 | 100 | static void spr_read_xer (void *opaque, int sprn) |
| 101 | 101 | { |
| 102 | - gen_op_load_xer(); | |
| 102 | + tcg_gen_mov_tl(cpu_T[0], cpu_xer); | |
| 103 | 103 | } |
| 104 | 104 | |
| 105 | 105 | static void spr_write_xer (void *opaque, int sprn) |
| 106 | 106 | { |
| 107 | - gen_op_store_xer(); | |
| 107 | + tcg_gen_mov_tl(cpu_xer, cpu_T[0]); | |
| 108 | 108 | } |
| 109 | 109 | |
| 110 | 110 | /* LR */ | ... | ... |