Commit b61f2753a7239f784b5c2f1d3a0249371cd2e164
1 parent
19f98ff6
ppc: convert integer load/store to TCG
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5493 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
2 changed files
with
413 additions
and
269 deletions
target-ppc/op_mem.h
| ... | ... | @@ -20,111 +20,6 @@ |
| 20 | 20 | |
| 21 | 21 | #include "op_mem_access.h" |
| 22 | 22 | |
| 23 | -/*** Integer load ***/ | |
| 24 | -#define PPC_LD_OP(name, op) \ | |
| 25 | -void OPPROTO glue(glue(op_l, name), MEMSUFFIX) (void) \ | |
| 26 | -{ \ | |
| 27 | - T1 = glue(op, MEMSUFFIX)((uint32_t)T0); \ | |
| 28 | - RETURN(); \ | |
| 29 | -} | |
| 30 | - | |
| 31 | -#if defined(TARGET_PPC64) | |
| 32 | -#define PPC_LD_OP_64(name, op) \ | |
| 33 | -void OPPROTO glue(glue(glue(op_l, name), _64), MEMSUFFIX) (void) \ | |
| 34 | -{ \ | |
| 35 | - T1 = glue(op, MEMSUFFIX)((uint64_t)T0); \ | |
| 36 | - RETURN(); \ | |
| 37 | -} | |
| 38 | -#endif | |
| 39 | - | |
| 40 | -#define PPC_ST_OP(name, op) \ | |
| 41 | -void OPPROTO glue(glue(op_st, name), MEMSUFFIX) (void) \ | |
| 42 | -{ \ | |
| 43 | - glue(op, MEMSUFFIX)((uint32_t)T0, T1); \ | |
| 44 | - RETURN(); \ | |
| 45 | -} | |
| 46 | - | |
| 47 | -#if defined(TARGET_PPC64) | |
| 48 | -#define PPC_ST_OP_64(name, op) \ | |
| 49 | -void OPPROTO glue(glue(glue(op_st, name), _64), MEMSUFFIX) (void) \ | |
| 50 | -{ \ | |
| 51 | - glue(op, MEMSUFFIX)((uint64_t)T0, T1); \ | |
| 52 | - RETURN(); \ | |
| 53 | -} | |
| 54 | -#endif | |
| 55 | - | |
| 56 | -PPC_LD_OP(bz, ldu8); | |
| 57 | -PPC_LD_OP(ha, lds16); | |
| 58 | -PPC_LD_OP(hz, ldu16); | |
| 59 | -PPC_LD_OP(wz, ldu32); | |
| 60 | -#if defined(TARGET_PPC64) | |
| 61 | -PPC_LD_OP(wa, lds32); | |
| 62 | -PPC_LD_OP(d, ldu64); | |
| 63 | -PPC_LD_OP_64(bz, ldu8); | |
| 64 | -PPC_LD_OP_64(ha, lds16); | |
| 65 | -PPC_LD_OP_64(hz, ldu16); | |
| 66 | -PPC_LD_OP_64(wz, ldu32); | |
| 67 | -PPC_LD_OP_64(wa, lds32); | |
| 68 | -PPC_LD_OP_64(d, ldu64); | |
| 69 | -#endif | |
| 70 | - | |
| 71 | -PPC_LD_OP(ha_le, lds16r); | |
| 72 | -PPC_LD_OP(hz_le, ldu16r); | |
| 73 | -PPC_LD_OP(wz_le, ldu32r); | |
| 74 | -#if defined(TARGET_PPC64) | |
| 75 | -PPC_LD_OP(wa_le, lds32r); | |
| 76 | -PPC_LD_OP(d_le, ldu64r); | |
| 77 | -PPC_LD_OP_64(ha_le, lds16r); | |
| 78 | -PPC_LD_OP_64(hz_le, ldu16r); | |
| 79 | -PPC_LD_OP_64(wz_le, ldu32r); | |
| 80 | -PPC_LD_OP_64(wa_le, lds32r); | |
| 81 | -PPC_LD_OP_64(d_le, ldu64r); | |
| 82 | -#endif | |
| 83 | - | |
| 84 | -/*** Integer store ***/ | |
| 85 | -PPC_ST_OP(b, st8); | |
| 86 | -PPC_ST_OP(h, st16); | |
| 87 | -PPC_ST_OP(w, st32); | |
| 88 | -#if defined(TARGET_PPC64) | |
| 89 | -PPC_ST_OP(d, st64); | |
| 90 | -PPC_ST_OP_64(b, st8); | |
| 91 | -PPC_ST_OP_64(h, st16); | |
| 92 | -PPC_ST_OP_64(w, st32); | |
| 93 | -PPC_ST_OP_64(d, st64); | |
| 94 | -#endif | |
| 95 | - | |
| 96 | -PPC_ST_OP(h_le, st16r); | |
| 97 | -PPC_ST_OP(w_le, st32r); | |
| 98 | -#if defined(TARGET_PPC64) | |
| 99 | -PPC_ST_OP(d_le, st64r); | |
| 100 | -PPC_ST_OP_64(h_le, st16r); | |
| 101 | -PPC_ST_OP_64(w_le, st32r); | |
| 102 | -PPC_ST_OP_64(d_le, st64r); | |
| 103 | -#endif | |
| 104 | - | |
| 105 | -/*** Integer load and store with byte reverse ***/ | |
| 106 | -PPC_LD_OP(hbr, ldu16r); | |
| 107 | -PPC_LD_OP(wbr, ldu32r); | |
| 108 | -PPC_ST_OP(hbr, st16r); | |
| 109 | -PPC_ST_OP(wbr, st32r); | |
| 110 | -#if defined(TARGET_PPC64) | |
| 111 | -PPC_LD_OP_64(hbr, ldu16r); | |
| 112 | -PPC_LD_OP_64(wbr, ldu32r); | |
| 113 | -PPC_ST_OP_64(hbr, st16r); | |
| 114 | -PPC_ST_OP_64(wbr, st32r); | |
| 115 | -#endif | |
| 116 | - | |
| 117 | -PPC_LD_OP(hbr_le, ldu16); | |
| 118 | -PPC_LD_OP(wbr_le, ldu32); | |
| 119 | -PPC_ST_OP(hbr_le, st16); | |
| 120 | -PPC_ST_OP(wbr_le, st32); | |
| 121 | -#if defined(TARGET_PPC64) | |
| 122 | -PPC_LD_OP_64(hbr_le, ldu16); | |
| 123 | -PPC_LD_OP_64(wbr_le, ldu32); | |
| 124 | -PPC_ST_OP_64(hbr_le, st16); | |
| 125 | -PPC_ST_OP_64(wbr_le, st32); | |
| 126 | -#endif | |
| 127 | - | |
| 128 | 23 | /*** Integer load and store multiple ***/ |
| 129 | 24 | void OPPROTO glue(op_lmw, MEMSUFFIX) (void) |
| 130 | 25 | { |
| ... | ... | @@ -985,12 +880,10 @@ _PPC_SPE_ST_OP_64(name, op) |
| 985 | 880 | _PPC_SPE_ST_OP(name, op) |
| 986 | 881 | #endif |
| 987 | 882 | |
| 988 | -#if !defined(TARGET_PPC64) | |
| 989 | 883 | PPC_SPE_LD_OP(dd, ldu64); |
| 990 | 884 | PPC_SPE_ST_OP(dd, st64); |
| 991 | 885 | PPC_SPE_LD_OP(dd_le, ldu64r); |
| 992 | 886 | PPC_SPE_ST_OP(dd_le, st64r); |
| 993 | -#endif | |
| 994 | 887 | static always_inline uint64_t glue(spe_ldw, MEMSUFFIX) (target_ulong EA) |
| 995 | 888 | { |
| 996 | 889 | uint64_t ret; |
| ... | ... | @@ -1135,7 +1028,6 @@ static always_inline void glue(spe_stwho_le, MEMSUFFIX) (target_ulong EA, |
| 1135 | 1028 | glue(st16r, MEMSUFFIX)(EA + 2, data); |
| 1136 | 1029 | } |
| 1137 | 1030 | PPC_SPE_ST_OP(who_le, spe_stwho_le); |
| 1138 | -#if !defined(TARGET_PPC64) | |
| 1139 | 1031 | static always_inline void glue(spe_stwwo, MEMSUFFIX) (target_ulong EA, |
| 1140 | 1032 | uint64_t data) |
| 1141 | 1033 | { |
| ... | ... | @@ -1148,7 +1040,6 @@ static always_inline void glue(spe_stwwo_le, MEMSUFFIX) (target_ulong EA, |
| 1148 | 1040 | glue(st32r, MEMSUFFIX)(EA, data); |
| 1149 | 1041 | } |
| 1150 | 1042 | PPC_SPE_ST_OP(wwo_le, spe_stwwo_le); |
| 1151 | -#endif | |
| 1152 | 1043 | static always_inline uint64_t glue(spe_lh, MEMSUFFIX) (target_ulong EA) |
| 1153 | 1044 | { |
| 1154 | 1045 | uint16_t tmp; | ... | ... |
target-ppc/translate.c
| ... | ... | @@ -2172,23 +2172,6 @@ static always_inline void gen_addr_register (TCGv EA, |
| 2172 | 2172 | |
| 2173 | 2173 | /*** Integer load ***/ |
| 2174 | 2174 | #define op_ldst(name) (*gen_op_##name[ctx->mem_idx])() |
| 2175 | -/* Byte access routine are endian safe */ | |
| 2176 | -#define gen_op_lbz_le_raw gen_op_lbz_raw | |
| 2177 | -#define gen_op_lbz_le_user gen_op_lbz_user | |
| 2178 | -#define gen_op_lbz_le_kernel gen_op_lbz_kernel | |
| 2179 | -#define gen_op_lbz_le_hypv gen_op_lbz_hypv | |
| 2180 | -#define gen_op_lbz_le_64_raw gen_op_lbz_64_raw | |
| 2181 | -#define gen_op_lbz_le_64_user gen_op_lbz_64_user | |
| 2182 | -#define gen_op_lbz_le_64_kernel gen_op_lbz_64_kernel | |
| 2183 | -#define gen_op_lbz_le_64_hypv gen_op_lbz_64_hypv | |
| 2184 | -#define gen_op_stb_le_raw gen_op_stb_raw | |
| 2185 | -#define gen_op_stb_le_user gen_op_stb_user | |
| 2186 | -#define gen_op_stb_le_kernel gen_op_stb_kernel | |
| 2187 | -#define gen_op_stb_le_hypv gen_op_stb_hypv | |
| 2188 | -#define gen_op_stb_le_64_raw gen_op_stb_64_raw | |
| 2189 | -#define gen_op_stb_le_64_user gen_op_stb_64_user | |
| 2190 | -#define gen_op_stb_le_64_kernel gen_op_stb_64_kernel | |
| 2191 | -#define gen_op_stb_le_64_hypv gen_op_stb_64_hypv | |
| 2192 | 2175 | #define OP_LD_TABLE(width) \ |
| 2193 | 2176 | static GenOpFunc *gen_op_l##width[NB_MEM_FUNCS] = { \ |
| 2194 | 2177 | GEN_MEM_FUNCS(l##width), \ |
| ... | ... | @@ -2198,81 +2181,354 @@ static GenOpFunc *gen_op_st##width[NB_MEM_FUNCS] = { \ |
| 2198 | 2181 | GEN_MEM_FUNCS(st##width), \ |
| 2199 | 2182 | }; |
| 2200 | 2183 | |
| 2184 | + | |
| 2185 | +#if defined(TARGET_PPC64) | |
| 2186 | +#define GEN_QEMU_LD_PPC64(width) \ | |
| 2187 | +static always_inline void gen_qemu_ld##width##_ppc64(TCGv t0, TCGv t1, int flags)\ | |
| 2188 | +{ \ | |
| 2189 | + if (likely(flags & 2)) \ | |
| 2190 | + tcg_gen_qemu_ld##width(t0, t1, flags >> 2); \ | |
| 2191 | + else { \ | |
| 2192 | + TCGv addr = tcg_temp_new(TCG_TYPE_TL); \ | |
| 2193 | + tcg_gen_ext32u_tl(addr, t1); \ | |
| 2194 | + tcg_gen_qemu_ld##width(t0, addr, flags >> 2); \ | |
| 2195 | + tcg_temp_free(addr); \ | |
| 2196 | + } \ | |
| 2197 | +} | |
| 2198 | +GEN_QEMU_LD_PPC64(8u) | |
| 2199 | +GEN_QEMU_LD_PPC64(8s) | |
| 2200 | +GEN_QEMU_LD_PPC64(16u) | |
| 2201 | +GEN_QEMU_LD_PPC64(16s) | |
| 2202 | +GEN_QEMU_LD_PPC64(32u) | |
| 2203 | +GEN_QEMU_LD_PPC64(32s) | |
| 2204 | +GEN_QEMU_LD_PPC64(64) | |
| 2205 | + | |
| 2206 | +#define GEN_QEMU_ST_PPC64(width) \ | |
| 2207 | +static always_inline void gen_qemu_st##width##_ppc64(TCGv t0, TCGv t1, int flags)\ | |
| 2208 | +{ \ | |
| 2209 | + if (likely(flags & 2)) \ | |
| 2210 | + tcg_gen_qemu_st##width(t0, t1, flags >> 2); \ | |
| 2211 | + else { \ | |
| 2212 | + TCGv addr = tcg_temp_new(TCG_TYPE_TL); \ | |
| 2213 | + tcg_gen_ext32u_tl(addr, t1); \ | |
| 2214 | + tcg_gen_qemu_st##width(t0, addr, flags >> 2); \ | |
| 2215 | + tcg_temp_free(addr); \ | |
| 2216 | + } \ | |
| 2217 | +} | |
| 2218 | +GEN_QEMU_ST_PPC64(8) | |
| 2219 | +GEN_QEMU_ST_PPC64(16) | |
| 2220 | +GEN_QEMU_ST_PPC64(32) | |
| 2221 | +GEN_QEMU_ST_PPC64(64) | |
| 2222 | + | |
| 2223 | +static always_inline void gen_qemu_ld8u(TCGv t0, TCGv t1, int flags) | |
| 2224 | +{ | |
| 2225 | + gen_qemu_ld8u_ppc64(t0, t1, flags); | |
| 2226 | +} | |
| 2227 | + | |
| 2228 | +static always_inline void gen_qemu_ld8s(TCGv t0, TCGv t1, int flags) | |
| 2229 | +{ | |
| 2230 | + gen_qemu_ld8s_ppc64(t0, t1, flags); | |
| 2231 | +} | |
| 2232 | + | |
| 2233 | +static always_inline void gen_qemu_ld16u(TCGv t0, TCGv t1, int flags) | |
| 2234 | +{ | |
| 2235 | + if (unlikely(flags & 1)) { | |
| 2236 | + TCGv t0_32; | |
| 2237 | + gen_qemu_ld16u_ppc64(t0, t1, flags); | |
| 2238 | + t0_32 = tcg_temp_new(TCG_TYPE_I32); | |
| 2239 | + tcg_gen_trunc_tl_i32(t0_32, t0); | |
| 2240 | + tcg_gen_bswap16_i32(t0_32, t0_32); | |
| 2241 | + tcg_gen_extu_i32_tl(t0, t0_32); | |
| 2242 | + tcg_temp_free(t0_32); | |
| 2243 | + } else | |
| 2244 | + gen_qemu_ld16u_ppc64(t0, t1, flags); | |
| 2245 | +} | |
| 2246 | + | |
| 2247 | +static always_inline void gen_qemu_ld16s(TCGv t0, TCGv t1, int flags) | |
| 2248 | +{ | |
| 2249 | + if (unlikely(flags & 1)) { | |
| 2250 | + TCGv t0_32; | |
| 2251 | + gen_qemu_ld16u_ppc64(t0, t1, flags); | |
| 2252 | + t0_32 = tcg_temp_new(TCG_TYPE_I32); | |
| 2253 | + tcg_gen_trunc_tl_i32(t0_32, t0); | |
| 2254 | + tcg_gen_bswap16_i32(t0_32, t0_32); | |
| 2255 | + tcg_gen_extu_i32_tl(t0, t0_32); | |
| 2256 | + tcg_gen_ext16s_tl(t0, t0); | |
| 2257 | + tcg_temp_free(t0_32); | |
| 2258 | + } else | |
| 2259 | + gen_qemu_ld16s_ppc64(t0, t1, flags); | |
| 2260 | +} | |
| 2261 | + | |
| 2262 | +static always_inline void gen_qemu_ld32u(TCGv t0, TCGv t1, int flags) | |
| 2263 | +{ | |
| 2264 | + if (unlikely(flags & 1)) { | |
| 2265 | + TCGv t0_32; | |
| 2266 | + gen_qemu_ld32u_ppc64(t0, t1, flags); | |
| 2267 | + t0_32 = tcg_temp_new(TCG_TYPE_I32); | |
| 2268 | + tcg_gen_trunc_tl_i32(t0_32, t0); | |
| 2269 | + tcg_gen_bswap_i32(t0_32, t0_32); | |
| 2270 | + tcg_gen_extu_i32_tl(t0, t0_32); | |
| 2271 | + tcg_temp_free(t0_32); | |
| 2272 | + } else | |
| 2273 | + gen_qemu_ld32u_ppc64(t0, t1, flags); | |
| 2274 | +} | |
| 2275 | + | |
| 2276 | +static always_inline void gen_qemu_ld32s(TCGv t0, TCGv t1, int flags) | |
| 2277 | +{ | |
| 2278 | + if (unlikely(flags & 1)) { | |
| 2279 | + TCGv t0_32; | |
| 2280 | + gen_qemu_ld32u_ppc64(t0, t1, flags); | |
| 2281 | + t0_32 = tcg_temp_new(TCG_TYPE_I32); | |
| 2282 | + tcg_gen_trunc_tl_i32(t0_32, t0); | |
| 2283 | + tcg_gen_bswap_i32(t0_32, t0_32); | |
| 2284 | + tcg_gen_ext_i32_tl(t0, t0_32); | |
| 2285 | + tcg_temp_free(t0_32); | |
| 2286 | + } else | |
| 2287 | + gen_qemu_ld32s_ppc64(t0, t1, flags); | |
| 2288 | +} | |
| 2289 | + | |
| 2290 | +static always_inline void gen_qemu_ld64(TCGv t0, TCGv t1, int flags) | |
| 2291 | +{ | |
| 2292 | + gen_qemu_ld64_ppc64(t0, t1, flags); | |
| 2293 | + if (unlikely(flags & 1)) | |
| 2294 | + tcg_gen_bswap_i64(t0, t0); | |
| 2295 | +} | |
| 2296 | + | |
| 2297 | +static always_inline void gen_qemu_st8(TCGv t0, TCGv t1, int flags) | |
| 2298 | +{ | |
| 2299 | + gen_qemu_st8_ppc64(t0, t1, flags); | |
| 2300 | +} | |
| 2301 | + | |
| 2302 | +static always_inline void gen_qemu_st16(TCGv t0, TCGv t1, int flags) | |
| 2303 | +{ | |
| 2304 | + if (unlikely(flags & 1)) { | |
| 2305 | + TCGv temp1, temp2; | |
| 2306 | + temp1 = tcg_temp_new(TCG_TYPE_I32); | |
| 2307 | + tcg_gen_trunc_tl_i32(temp1, t0); | |
| 2308 | + tcg_gen_ext16u_i32(temp1, temp1); | |
| 2309 | + tcg_gen_bswap16_i32(temp1, temp1); | |
| 2310 | + temp2 = tcg_temp_new(TCG_TYPE_I64); | |
| 2311 | + tcg_gen_extu_i32_tl(temp2, temp1); | |
| 2312 | + tcg_temp_free(temp1); | |
| 2313 | + gen_qemu_st16_ppc64(temp2, t1, flags); | |
| 2314 | + tcg_temp_free(temp2); | |
| 2315 | + } else | |
| 2316 | + gen_qemu_st16_ppc64(t0, t1, flags); | |
| 2317 | +} | |
| 2318 | + | |
| 2319 | +static always_inline void gen_qemu_st32(TCGv t0, TCGv t1, int flags) | |
| 2320 | +{ | |
| 2321 | + if (unlikely(flags & 1)) { | |
| 2322 | + TCGv temp1, temp2; | |
| 2323 | + temp1 = tcg_temp_new(TCG_TYPE_I32); | |
| 2324 | + tcg_gen_trunc_tl_i32(temp1, t0); | |
| 2325 | + tcg_gen_bswap_i32(temp1, temp1); | |
| 2326 | + temp2 = tcg_temp_new(TCG_TYPE_I64); | |
| 2327 | + tcg_gen_extu_i32_tl(temp2, temp1); | |
| 2328 | + tcg_temp_free(temp1); | |
| 2329 | + gen_qemu_st32_ppc64(temp2, t1, flags); | |
| 2330 | + tcg_temp_free(temp2); | |
| 2331 | + } else | |
| 2332 | + gen_qemu_st32_ppc64(t0, t1, flags); | |
| 2333 | +} | |
| 2334 | + | |
| 2335 | +static always_inline void gen_qemu_st64(TCGv t0, TCGv t1, int flags) | |
| 2336 | +{ | |
| 2337 | + if (unlikely(flags & 1)) { | |
| 2338 | + TCGv temp = tcg_temp_new(TCG_TYPE_I64); | |
| 2339 | + tcg_gen_bswap_i64(temp, t0); | |
| 2340 | + gen_qemu_st64_ppc64(temp, t1, flags); | |
| 2341 | + tcg_temp_free(temp); | |
| 2342 | + } else | |
| 2343 | + gen_qemu_st64_ppc64(t0, t1, flags); | |
| 2344 | +} | |
| 2345 | + | |
| 2346 | + | |
| 2347 | +#else /* defined(TARGET_PPC64) */ | |
| 2348 | +#define GEN_QEMU_LD_PPC32(width) \ | |
| 2349 | +static always_inline void gen_qemu_ld##width##_ppc32(TCGv t0, TCGv t1, int flags)\ | |
| 2350 | +{ \ | |
| 2351 | + tcg_gen_qemu_ld##width(t0, t1, flags >> 1); \ | |
| 2352 | +} | |
| 2353 | +GEN_QEMU_LD_PPC32(8u) | |
| 2354 | +GEN_QEMU_LD_PPC32(8s) | |
| 2355 | +GEN_QEMU_LD_PPC32(16u) | |
| 2356 | +GEN_QEMU_LD_PPC32(16s) | |
| 2357 | +GEN_QEMU_LD_PPC32(32u) | |
| 2358 | +GEN_QEMU_LD_PPC32(32s) | |
| 2359 | +GEN_QEMU_LD_PPC32(64) | |
| 2360 | + | |
| 2361 | +#define GEN_QEMU_ST_PPC32(width) \ | |
| 2362 | +static always_inline void gen_qemu_st##width##_ppc32(TCGv t0, TCGv t1, int flags)\ | |
| 2363 | +{ \ | |
| 2364 | + tcg_gen_qemu_st##width(t0, t1, flags >> 1); \ | |
| 2365 | +} | |
| 2366 | +GEN_QEMU_ST_PPC32(8) | |
| 2367 | +GEN_QEMU_ST_PPC32(16) | |
| 2368 | +GEN_QEMU_ST_PPC32(32) | |
| 2369 | +GEN_QEMU_ST_PPC32(64) | |
| 2370 | + | |
| 2371 | +static always_inline void gen_qemu_ld8u(TCGv t0, TCGv t1, int flags) | |
| 2372 | +{ | |
| 2373 | + gen_qemu_ld8u_ppc32(t0, t1, flags >> 1); | |
| 2374 | +} | |
| 2375 | + | |
| 2376 | +static always_inline void gen_qemu_ld8s(TCGv t0, TCGv t1, int flags) | |
| 2377 | +{ | |
| 2378 | + gen_qemu_ld8s_ppc32(t0, t1, flags >> 1); | |
| 2379 | +} | |
| 2380 | + | |
| 2381 | +static always_inline void gen_qemu_ld16u(TCGv t0, TCGv t1, int flags) | |
| 2382 | +{ | |
| 2383 | + gen_qemu_ld16u_ppc32(t0, t1, flags >> 1); | |
| 2384 | + if (unlikely(flags & 1)) | |
| 2385 | + tcg_gen_bswap16_i32(t0, t0); | |
| 2386 | +} | |
| 2387 | + | |
| 2388 | +static always_inline void gen_qemu_ld16s(TCGv t0, TCGv t1, int flags) | |
| 2389 | +{ | |
| 2390 | + if (unlikely(flags & 1)) { | |
| 2391 | + gen_qemu_ld16u_ppc32(t0, t1, flags); | |
| 2392 | + tcg_gen_bswap16_i32(t0, t0); | |
| 2393 | + tcg_gen_ext16s_i32(t0, t0); | |
| 2394 | + } else | |
| 2395 | + gen_qemu_ld16s_ppc32(t0, t1, flags); | |
| 2396 | +} | |
| 2397 | + | |
| 2398 | +static always_inline void gen_qemu_ld32u(TCGv t0, TCGv t1, int flags) | |
| 2399 | +{ | |
| 2400 | + gen_qemu_ld32u_ppc32(t0, t1, flags); | |
| 2401 | + if (unlikely(flags & 1)) | |
| 2402 | + tcg_gen_bswap_i32(t0, t0); | |
| 2403 | +} | |
| 2404 | + | |
| 2405 | +static always_inline void gen_qemu_ld64(TCGv t0, TCGv t1, int flags) | |
| 2406 | +{ | |
| 2407 | + gen_qemu_ld64_ppc32(t0, t1, flags); | |
| 2408 | + if (unlikely(flags & 1)) | |
| 2409 | + tcg_gen_bswap_i64(t0, t0); | |
| 2410 | +} | |
| 2411 | + | |
| 2412 | +static always_inline void gen_qemu_st8(TCGv t0, TCGv t1, int flags) | |
| 2413 | +{ | |
| 2414 | + gen_qemu_st8_ppc32(t0, t1, flags >> 1); | |
| 2415 | +} | |
| 2416 | + | |
| 2417 | +static always_inline void gen_qemu_st16(TCGv t0, TCGv t1, int flags) | |
| 2418 | +{ | |
| 2419 | + if (unlikely(flags & 1)) { | |
| 2420 | + TCGv temp = tcg_temp_new(TCG_TYPE_I32); | |
| 2421 | + tcg_gen_ext16u_i32(temp, t0); | |
| 2422 | + tcg_gen_bswap16_i32(temp, temp); | |
| 2423 | + gen_qemu_st16_ppc32(temp, t1, flags >> 1); | |
| 2424 | + tcg_temp_free(temp); | |
| 2425 | + } else | |
| 2426 | + gen_qemu_st16_ppc32(t0, t1, flags >> 1); | |
| 2427 | +} | |
| 2428 | + | |
| 2429 | +static always_inline void gen_qemu_st32(TCGv t0, TCGv t1, int flags) | |
| 2430 | +{ | |
| 2431 | + if (unlikely(flags & 1)) { | |
| 2432 | + TCGv temp = tcg_temp_new(TCG_TYPE_I32); | |
| 2433 | + tcg_gen_bswap_i32(temp, t0); | |
| 2434 | + gen_qemu_st32_ppc32(temp, t1, flags >> 1); | |
| 2435 | + tcg_temp_free(temp); | |
| 2436 | + } else | |
| 2437 | + gen_qemu_st32_ppc32(t0, t1, flags >> 1); | |
| 2438 | +} | |
| 2439 | + | |
| 2440 | +static always_inline void gen_qemu_st64(TCGv t0, TCGv t1, int flags) | |
| 2441 | +{ | |
| 2442 | + if (unlikely(flags & 1)) { | |
| 2443 | + TCGv temp = tcg_temp_new(TCG_TYPE_I64); | |
| 2444 | + tcg_gen_bswap_i64(temp, t0); | |
| 2445 | + gen_qemu_st64_ppc32(temp, t1, flags >> 1); | |
| 2446 | + tcg_temp_free(temp); | |
| 2447 | + } else | |
| 2448 | + gen_qemu_st64_ppc32(t0, t1, flags >> 1); | |
| 2449 | +} | |
| 2450 | + | |
| 2451 | +#endif | |
| 2452 | + | |
| 2201 | 2453 | #define GEN_LD(width, opc, type) \ |
| 2202 | 2454 | GEN_HANDLER(l##width, opc, 0xFF, 0xFF, 0x00000000, type) \ |
| 2203 | 2455 | { \ |
| 2204 | - gen_addr_imm_index(cpu_T[0], ctx, 0); \ | |
| 2205 | - op_ldst(l##width); \ | |
| 2206 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[1]); \ | |
| 2456 | + TCGv EA = tcg_temp_new(TCG_TYPE_TL); \ | |
| 2457 | + gen_addr_imm_index(EA, ctx, 0); \ | |
| 2458 | + gen_qemu_ld##width(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \ | |
| 2459 | + tcg_temp_free(EA); \ | |
| 2207 | 2460 | } |
| 2208 | 2461 | |
| 2209 | 2462 | #define GEN_LDU(width, opc, type) \ |
| 2210 | 2463 | GEN_HANDLER(l##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \ |
| 2211 | 2464 | { \ |
| 2465 | + TCGv EA; \ | |
| 2212 | 2466 | if (unlikely(rA(ctx->opcode) == 0 || \ |
| 2213 | 2467 | rA(ctx->opcode) == rD(ctx->opcode))) { \ |
| 2214 | 2468 | GEN_EXCP_INVAL(ctx); \ |
| 2215 | 2469 | return; \ |
| 2216 | 2470 | } \ |
| 2471 | + EA = tcg_temp_new(TCG_TYPE_TL); \ | |
| 2217 | 2472 | if (type == PPC_64B) \ |
| 2218 | - gen_addr_imm_index(cpu_T[0], ctx, 0x03); \ | |
| 2473 | + gen_addr_imm_index(EA, ctx, 0x03); \ | |
| 2219 | 2474 | else \ |
| 2220 | - gen_addr_imm_index(cpu_T[0], ctx, 0); \ | |
| 2221 | - op_ldst(l##width); \ | |
| 2222 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[1]); \ | |
| 2223 | - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); \ | |
| 2475 | + gen_addr_imm_index(EA, ctx, 0); \ | |
| 2476 | + gen_qemu_ld##width(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \ | |
| 2477 | + tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ | |
| 2478 | + tcg_temp_free(EA); \ | |
| 2224 | 2479 | } |
| 2225 | 2480 | |
| 2226 | 2481 | #define GEN_LDUX(width, opc2, opc3, type) \ |
| 2227 | 2482 | GEN_HANDLER(l##width##ux, 0x1F, opc2, opc3, 0x00000001, type) \ |
| 2228 | 2483 | { \ |
| 2484 | + TCGv EA; \ | |
| 2229 | 2485 | if (unlikely(rA(ctx->opcode) == 0 || \ |
| 2230 | 2486 | rA(ctx->opcode) == rD(ctx->opcode))) { \ |
| 2231 | 2487 | GEN_EXCP_INVAL(ctx); \ |
| 2232 | 2488 | return; \ |
| 2233 | 2489 | } \ |
| 2234 | - gen_addr_reg_index(cpu_T[0], ctx); \ | |
| 2235 | - op_ldst(l##width); \ | |
| 2236 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[1]); \ | |
| 2237 | - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); \ | |
| 2490 | + EA = tcg_temp_new(TCG_TYPE_TL); \ | |
| 2491 | + gen_addr_reg_index(EA, ctx); \ | |
| 2492 | + gen_qemu_ld##width(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \ | |
| 2493 | + tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ | |
| 2494 | + tcg_temp_free(EA); \ | |
| 2238 | 2495 | } |
| 2239 | 2496 | |
| 2240 | 2497 | #define GEN_LDX(width, opc2, opc3, type) \ |
| 2241 | 2498 | GEN_HANDLER(l##width##x, 0x1F, opc2, opc3, 0x00000001, type) \ |
| 2242 | 2499 | { \ |
| 2243 | - gen_addr_reg_index(cpu_T[0], ctx); \ | |
| 2244 | - op_ldst(l##width); \ | |
| 2245 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[1]); \ | |
| 2500 | + TCGv EA = tcg_temp_new(TCG_TYPE_TL); \ | |
| 2501 | + gen_addr_reg_index(EA, ctx); \ | |
| 2502 | + gen_qemu_ld##width(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \ | |
| 2503 | + tcg_temp_free(EA); \ | |
| 2246 | 2504 | } |
| 2247 | 2505 | |
| 2248 | 2506 | #define GEN_LDS(width, op, type) \ |
| 2249 | -OP_LD_TABLE(width); \ | |
| 2250 | 2507 | GEN_LD(width, op | 0x20, type); \ |
| 2251 | 2508 | GEN_LDU(width, op | 0x21, type); \ |
| 2252 | 2509 | GEN_LDUX(width, 0x17, op | 0x01, type); \ |
| 2253 | 2510 | GEN_LDX(width, 0x17, op | 0x00, type) |
| 2254 | 2511 | |
| 2255 | 2512 | /* lbz lbzu lbzux lbzx */ |
| 2256 | -GEN_LDS(bz, 0x02, PPC_INTEGER); | |
| 2513 | +GEN_LDS(8u, 0x02, PPC_INTEGER); | |
| 2257 | 2514 | /* lha lhau lhaux lhax */ |
| 2258 | -GEN_LDS(ha, 0x0A, PPC_INTEGER); | |
| 2515 | +GEN_LDS(16s, 0x0A, PPC_INTEGER); | |
| 2259 | 2516 | /* lhz lhzu lhzux lhzx */ |
| 2260 | -GEN_LDS(hz, 0x08, PPC_INTEGER); | |
| 2517 | +GEN_LDS(16u, 0x08, PPC_INTEGER); | |
| 2261 | 2518 | /* lwz lwzu lwzux lwzx */ |
| 2262 | -GEN_LDS(wz, 0x00, PPC_INTEGER); | |
| 2519 | +GEN_LDS(32u, 0x00, PPC_INTEGER); | |
| 2263 | 2520 | #if defined(TARGET_PPC64) |
| 2264 | -OP_LD_TABLE(wa); | |
| 2265 | -OP_LD_TABLE(d); | |
| 2266 | 2521 | /* lwaux */ |
| 2267 | -GEN_LDUX(wa, 0x15, 0x0B, PPC_64B); | |
| 2522 | +GEN_LDUX(32s, 0x15, 0x0B, PPC_64B); | |
| 2268 | 2523 | /* lwax */ |
| 2269 | -GEN_LDX(wa, 0x15, 0x0A, PPC_64B); | |
| 2524 | +GEN_LDX(32s, 0x15, 0x0A, PPC_64B); | |
| 2270 | 2525 | /* ldux */ |
| 2271 | -GEN_LDUX(d, 0x15, 0x01, PPC_64B); | |
| 2526 | +GEN_LDUX(64, 0x15, 0x01, PPC_64B); | |
| 2272 | 2527 | /* ldx */ |
| 2273 | -GEN_LDX(d, 0x15, 0x00, PPC_64B); | |
| 2528 | +GEN_LDX(64, 0x15, 0x00, PPC_64B); | |
| 2274 | 2529 | GEN_HANDLER(ld, 0x3A, 0xFF, 0xFF, 0x00000000, PPC_64B) |
| 2275 | 2530 | { |
| 2531 | + TCGv EA; | |
| 2276 | 2532 | if (Rc(ctx->opcode)) { |
| 2277 | 2533 | if (unlikely(rA(ctx->opcode) == 0 || |
| 2278 | 2534 | rA(ctx->opcode) == rD(ctx->opcode))) { |
| ... | ... | @@ -2280,17 +2536,18 @@ GEN_HANDLER(ld, 0x3A, 0xFF, 0xFF, 0x00000000, PPC_64B) |
| 2280 | 2536 | return; |
| 2281 | 2537 | } |
| 2282 | 2538 | } |
| 2283 | - gen_addr_imm_index(cpu_T[0], ctx, 0x03); | |
| 2539 | + EA = tcg_temp_new(TCG_TYPE_TL); | |
| 2540 | + gen_addr_imm_index(EA, ctx, 0x03); | |
| 2284 | 2541 | if (ctx->opcode & 0x02) { |
| 2285 | 2542 | /* lwa (lwau is undefined) */ |
| 2286 | - op_ldst(lwa); | |
| 2543 | + gen_qemu_ld32s(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); | |
| 2287 | 2544 | } else { |
| 2288 | 2545 | /* ld - ldu */ |
| 2289 | - op_ldst(ld); | |
| 2546 | + gen_qemu_ld64(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); | |
| 2290 | 2547 | } |
| 2291 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[1]); | |
| 2292 | 2548 | if (Rc(ctx->opcode)) |
| 2293 | - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); | |
| 2549 | + tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); | |
| 2550 | + tcg_temp_free(EA); | |
| 2294 | 2551 | } |
| 2295 | 2552 | /* lq */ |
| 2296 | 2553 | GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX) |
| ... | ... | @@ -2299,6 +2556,7 @@ GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX) |
| 2299 | 2556 | GEN_EXCP_PRIVOPC(ctx); |
| 2300 | 2557 | #else |
| 2301 | 2558 | int ra, rd; |
| 2559 | + TCGv EA; | |
| 2302 | 2560 | |
| 2303 | 2561 | /* Restore CPU state */ |
| 2304 | 2562 | if (unlikely(ctx->supervisor == 0)) { |
| ... | ... | @@ -2316,12 +2574,12 @@ GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX) |
| 2316 | 2574 | GEN_EXCP(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE); |
| 2317 | 2575 | return; |
| 2318 | 2576 | } |
| 2319 | - gen_addr_imm_index(cpu_T[0], ctx, 0x0F); | |
| 2320 | - op_ldst(ld); | |
| 2321 | - tcg_gen_mov_tl(cpu_gpr[rd], cpu_T[1]); | |
| 2322 | - tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 8); | |
| 2323 | - op_ldst(ld); | |
| 2324 | - tcg_gen_mov_tl(cpu_gpr[rd + 1], cpu_T[1]); | |
| 2577 | + EA = tcg_temp_new(TCG_TYPE_TL); | |
| 2578 | + gen_addr_imm_index(EA, ctx, 0x0F); | |
| 2579 | + gen_qemu_ld64(cpu_gpr[rd], EA, ctx->mem_idx); | |
| 2580 | + tcg_gen_addi_tl(EA, EA, 8); | |
| 2581 | + gen_qemu_ld64(cpu_gpr[rd+1], EA, ctx->mem_idx); | |
| 2582 | + tcg_temp_free(EA); | |
| 2325 | 2583 | #endif |
| 2326 | 2584 | } |
| 2327 | 2585 | #endif |
| ... | ... | @@ -2330,68 +2588,73 @@ GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX) |
| 2330 | 2588 | #define GEN_ST(width, opc, type) \ |
| 2331 | 2589 | GEN_HANDLER(st##width, opc, 0xFF, 0xFF, 0x00000000, type) \ |
| 2332 | 2590 | { \ |
| 2333 | - gen_addr_imm_index(cpu_T[0], ctx, 0); \ | |
| 2334 | - tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]); \ | |
| 2335 | - op_ldst(st##width); \ | |
| 2591 | + TCGv EA = tcg_temp_new(TCG_TYPE_TL); \ | |
| 2592 | + gen_addr_imm_index(EA, ctx, 0); \ | |
| 2593 | + gen_qemu_st##width(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \ | |
| 2594 | + tcg_temp_free(EA); \ | |
| 2336 | 2595 | } |
| 2337 | 2596 | |
| 2338 | 2597 | #define GEN_STU(width, opc, type) \ |
| 2339 | 2598 | GEN_HANDLER(st##width##u, opc, 0xFF, 0xFF, 0x00000000, type) \ |
| 2340 | 2599 | { \ |
| 2600 | + TCGv EA; \ | |
| 2341 | 2601 | if (unlikely(rA(ctx->opcode) == 0)) { \ |
| 2342 | 2602 | GEN_EXCP_INVAL(ctx); \ |
| 2343 | 2603 | return; \ |
| 2344 | 2604 | } \ |
| 2605 | + EA = tcg_temp_new(TCG_TYPE_TL); \ | |
| 2345 | 2606 | if (type == PPC_64B) \ |
| 2346 | - gen_addr_imm_index(cpu_T[0], ctx, 0x03); \ | |
| 2607 | + gen_addr_imm_index(EA, ctx, 0x03); \ | |
| 2347 | 2608 | else \ |
| 2348 | - gen_addr_imm_index(cpu_T[0], ctx, 0); \ | |
| 2349 | - tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]); \ | |
| 2350 | - op_ldst(st##width); \ | |
| 2351 | - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); \ | |
| 2609 | + gen_addr_imm_index(EA, ctx, 0); \ | |
| 2610 | + gen_qemu_st##width(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \ | |
| 2611 | + tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ | |
| 2612 | + tcg_temp_free(EA); \ | |
| 2352 | 2613 | } |
| 2353 | 2614 | |
| 2354 | 2615 | #define GEN_STUX(width, opc2, opc3, type) \ |
| 2355 | 2616 | GEN_HANDLER(st##width##ux, 0x1F, opc2, opc3, 0x00000001, type) \ |
| 2356 | 2617 | { \ |
| 2618 | + TCGv EA; \ | |
| 2357 | 2619 | if (unlikely(rA(ctx->opcode) == 0)) { \ |
| 2358 | 2620 | GEN_EXCP_INVAL(ctx); \ |
| 2359 | 2621 | return; \ |
| 2360 | 2622 | } \ |
| 2361 | - gen_addr_reg_index(cpu_T[0], ctx); \ | |
| 2362 | - tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]); \ | |
| 2363 | - op_ldst(st##width); \ | |
| 2364 | - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); \ | |
| 2623 | + EA = tcg_temp_new(TCG_TYPE_TL); \ | |
| 2624 | + gen_addr_reg_index(EA, ctx); \ | |
| 2625 | + gen_qemu_st##width(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \ | |
| 2626 | + tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ | |
| 2627 | + tcg_temp_free(EA); \ | |
| 2365 | 2628 | } |
| 2366 | 2629 | |
| 2367 | 2630 | #define GEN_STX(width, opc2, opc3, type) \ |
| 2368 | 2631 | GEN_HANDLER(st##width##x, 0x1F, opc2, opc3, 0x00000001, type) \ |
| 2369 | 2632 | { \ |
| 2370 | - gen_addr_reg_index(cpu_T[0], ctx); \ | |
| 2371 | - tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rS(ctx->opcode)]); \ | |
| 2372 | - op_ldst(st##width); \ | |
| 2633 | + TCGv EA = tcg_temp_new(TCG_TYPE_TL); \ | |
| 2634 | + gen_addr_reg_index(EA, ctx); \ | |
| 2635 | + gen_qemu_st##width(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \ | |
| 2636 | + tcg_temp_free(EA); \ | |
| 2373 | 2637 | } |
| 2374 | 2638 | |
| 2375 | 2639 | #define GEN_STS(width, op, type) \ |
| 2376 | -OP_ST_TABLE(width); \ | |
| 2377 | 2640 | GEN_ST(width, op | 0x20, type); \ |
| 2378 | 2641 | GEN_STU(width, op | 0x21, type); \ |
| 2379 | 2642 | GEN_STUX(width, 0x17, op | 0x01, type); \ |
| 2380 | 2643 | GEN_STX(width, 0x17, op | 0x00, type) |
| 2381 | 2644 | |
| 2382 | 2645 | /* stb stbu stbux stbx */ |
| 2383 | -GEN_STS(b, 0x06, PPC_INTEGER); | |
| 2646 | +GEN_STS(8, 0x06, PPC_INTEGER); | |
| 2384 | 2647 | /* sth sthu sthux sthx */ |
| 2385 | -GEN_STS(h, 0x0C, PPC_INTEGER); | |
| 2648 | +GEN_STS(16, 0x0C, PPC_INTEGER); | |
| 2386 | 2649 | /* stw stwu stwux stwx */ |
| 2387 | -GEN_STS(w, 0x04, PPC_INTEGER); | |
| 2650 | +GEN_STS(32, 0x04, PPC_INTEGER); | |
| 2388 | 2651 | #if defined(TARGET_PPC64) |
| 2389 | -OP_ST_TABLE(d); | |
| 2390 | -GEN_STUX(d, 0x15, 0x05, PPC_64B); | |
| 2391 | -GEN_STX(d, 0x15, 0x04, PPC_64B); | |
| 2652 | +GEN_STUX(64, 0x15, 0x05, PPC_64B); | |
| 2653 | +GEN_STX(64, 0x15, 0x04, PPC_64B); | |
| 2392 | 2654 | GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B) |
| 2393 | 2655 | { |
| 2394 | 2656 | int rs; |
| 2657 | + TCGv EA; | |
| 2395 | 2658 | |
| 2396 | 2659 | rs = rS(ctx->opcode); |
| 2397 | 2660 | if ((ctx->opcode & 0x3) == 0x2) { |
| ... | ... | @@ -2412,12 +2675,12 @@ GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B) |
| 2412 | 2675 | GEN_EXCP(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE); |
| 2413 | 2676 | return; |
| 2414 | 2677 | } |
| 2415 | - gen_addr_imm_index(cpu_T[0], ctx, 0x03); | |
| 2416 | - tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rs]); | |
| 2417 | - op_ldst(std); | |
| 2418 | - tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 8); | |
| 2419 | - tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rs + 1]); | |
| 2420 | - op_ldst(std); | |
| 2678 | + EA = tcg_temp_new(TCG_TYPE_TL); | |
| 2679 | + gen_addr_imm_index(EA, ctx, 0x03); | |
| 2680 | + gen_qemu_st64(cpu_gpr[rs], EA, ctx->mem_idx); | |
| 2681 | + tcg_gen_addi_tl(EA, EA, 8); | |
| 2682 | + gen_qemu_st64(cpu_gpr[rs+1], EA, ctx->mem_idx); | |
| 2683 | + tcg_temp_free(EA); | |
| 2421 | 2684 | #endif |
| 2422 | 2685 | } else { |
| 2423 | 2686 | /* std / stdu */ |
| ... | ... | @@ -2427,27 +2690,60 @@ GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B) |
| 2427 | 2690 | return; |
| 2428 | 2691 | } |
| 2429 | 2692 | } |
| 2430 | - gen_addr_imm_index(cpu_T[0], ctx, 0x03); | |
| 2431 | - tcg_gen_mov_tl(cpu_T[1], cpu_gpr[rs]); | |
| 2432 | - op_ldst(std); | |
| 2693 | + EA = tcg_temp_new(TCG_TYPE_TL); | |
| 2694 | + gen_addr_imm_index(EA, ctx, 0x03); | |
| 2695 | + gen_qemu_st64(cpu_gpr[rs], EA, ctx->mem_idx); | |
| 2433 | 2696 | if (Rc(ctx->opcode)) |
| 2434 | - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_T[0]); | |
| 2697 | + tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); | |
| 2698 | + tcg_temp_free(EA); | |
| 2435 | 2699 | } |
| 2436 | 2700 | } |
| 2437 | 2701 | #endif |
| 2438 | 2702 | /*** Integer load and store with byte reverse ***/ |
| 2439 | 2703 | /* lhbrx */ |
| 2440 | -OP_LD_TABLE(hbr); | |
| 2441 | -GEN_LDX(hbr, 0x16, 0x18, PPC_INTEGER); | |
| 2704 | +void always_inline gen_qemu_ld16ur(TCGv t0, TCGv t1, int flags) | |
| 2705 | +{ | |
| 2706 | + TCGv temp = tcg_temp_new(TCG_TYPE_I32); | |
| 2707 | + gen_qemu_ld16u(temp, t1, flags); | |
| 2708 | + tcg_gen_bswap16_i32(temp, temp); | |
| 2709 | + tcg_gen_extu_i32_tl(t0, temp); | |
| 2710 | + tcg_temp_free(temp); | |
| 2711 | +} | |
| 2712 | +GEN_LDX(16ur, 0x16, 0x18, PPC_INTEGER); | |
| 2713 | + | |
| 2442 | 2714 | /* lwbrx */ |
| 2443 | -OP_LD_TABLE(wbr); | |
| 2444 | -GEN_LDX(wbr, 0x16, 0x10, PPC_INTEGER); | |
| 2715 | +void always_inline gen_qemu_ld32ur(TCGv t0, TCGv t1, int flags) | |
| 2716 | +{ | |
| 2717 | + TCGv temp = tcg_temp_new(TCG_TYPE_I32); | |
| 2718 | + gen_qemu_ld32u(temp, t1, flags); | |
| 2719 | + tcg_gen_bswap_i32(temp, temp); | |
| 2720 | + tcg_gen_extu_i32_tl(t0, temp); | |
| 2721 | + tcg_temp_free(temp); | |
| 2722 | +} | |
| 2723 | +GEN_LDX(32ur, 0x16, 0x10, PPC_INTEGER); | |
| 2724 | + | |
| 2445 | 2725 | /* sthbrx */ |
| 2446 | -OP_ST_TABLE(hbr); | |
| 2447 | -GEN_STX(hbr, 0x16, 0x1C, PPC_INTEGER); | |
| 2726 | +void always_inline gen_qemu_st16r(TCGv t0, TCGv t1, int flags) | |
| 2727 | +{ | |
| 2728 | + TCGv temp = tcg_temp_new(TCG_TYPE_I32); | |
| 2729 | + tcg_gen_trunc_tl_i32(temp, t0); | |
| 2730 | + tcg_gen_ext16u_i32(temp, temp); | |
| 2731 | + tcg_gen_bswap16_i32(temp, temp); | |
| 2732 | + gen_qemu_st16(temp, t1, flags); | |
| 2733 | + tcg_temp_free(temp); | |
| 2734 | +} | |
| 2735 | +GEN_STX(16r, 0x16, 0x1C, PPC_INTEGER); | |
| 2736 | + | |
| 2448 | 2737 | /* stwbrx */ |
| 2449 | -OP_ST_TABLE(wbr); | |
| 2450 | -GEN_STX(wbr, 0x16, 0x14, PPC_INTEGER); | |
| 2738 | +void always_inline gen_qemu_st32r(TCGv t0, TCGv t1, int flags) | |
| 2739 | +{ | |
| 2740 | + TCGv temp = tcg_temp_new(TCG_TYPE_I32); | |
| 2741 | + tcg_gen_trunc_tl_i32(temp, t0); | |
| 2742 | + tcg_gen_bswap_i32(temp, temp); | |
| 2743 | + gen_qemu_st32(temp, t1, flags); | |
| 2744 | + tcg_temp_free(temp); | |
| 2745 | +} | |
| 2746 | +GEN_STX(32r, 0x16, 0x14, PPC_INTEGER); | |
| 2451 | 2747 | |
| 2452 | 2748 | /*** Integer load and store multiple ***/ |
| 2453 | 2749 | #define op_ldstm(name, reg) (*gen_op_##name[ctx->mem_idx])(reg) |
| ... | ... | @@ -3444,8 +3740,10 @@ GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000001, PPC_MISC) |
| 3444 | 3740 | GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE) |
| 3445 | 3741 | { |
| 3446 | 3742 | /* XXX: specification says this is treated as a load by the MMU */ |
| 3447 | - gen_addr_reg_index(cpu_T[0], ctx); | |
| 3448 | - op_ldst(lbz); | |
| 3743 | + TCGv temp = tcg_temp_new(TCG_TYPE_TL); | |
| 3744 | + gen_addr_reg_index(temp, ctx); | |
| 3745 | + gen_qemu_ld8u(temp, temp, ctx->mem_idx); | |
| 3746 | + tcg_temp_free(temp); | |
| 3449 | 3747 | } |
| 3450 | 3748 | |
| 3451 | 3749 | /* dcbi (Supervisor only) */ |
| ... | ... | @@ -3454,14 +3752,18 @@ GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE) |
| 3454 | 3752 | #if defined(CONFIG_USER_ONLY) |
| 3455 | 3753 | GEN_EXCP_PRIVOPC(ctx); |
| 3456 | 3754 | #else |
| 3755 | + TCGv EA, val; | |
| 3457 | 3756 | if (unlikely(!ctx->supervisor)) { |
| 3458 | 3757 | GEN_EXCP_PRIVOPC(ctx); |
| 3459 | 3758 | return; |
| 3460 | 3759 | } |
| 3461 | - gen_addr_reg_index(cpu_T[0], ctx); | |
| 3760 | + EA = tcg_temp_new(TCG_TYPE_TL); | |
| 3761 | + gen_addr_reg_index(EA, ctx); | |
| 3462 | 3762 | /* XXX: specification says this should be treated as a store by the MMU */ |
| 3463 | - op_ldst(lbz); | |
| 3464 | - op_ldst(stb); | |
| 3763 | + gen_qemu_ld8u(val, EA, ctx->mem_idx); | |
| 3764 | + gen_qemu_st8(val, EA, ctx->mem_idx); | |
| 3765 | + tcg_temp_free(val); | |
| 3766 | + tcg_temp_free(EA); | |
| 3465 | 3767 | #endif |
| 3466 | 3768 | } |
| 3467 | 3769 | |
| ... | ... | @@ -3469,8 +3771,10 @@ GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE) |
| 3469 | 3771 | GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE) |
| 3470 | 3772 | { |
| 3471 | 3773 | /* XXX: specification say this is treated as a load by the MMU */ |
| 3472 | - gen_addr_reg_index(cpu_T[0], ctx); | |
| 3473 | - op_ldst(lbz); | |
| 3774 | + TCGv temp = tcg_temp_new(TCG_TYPE_TL); | |
| 3775 | + gen_addr_reg_index(temp, ctx); | |
| 3776 | + gen_qemu_ld8u(temp, temp, ctx->mem_idx); | |
| 3777 | + tcg_temp_free(temp); | |
| 3474 | 3778 | } |
| 3475 | 3779 | |
| 3476 | 3780 | /* dcbt */ |
| ... | ... | @@ -4889,13 +5193,18 @@ GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON) |
| 4889 | 5193 | #if defined(CONFIG_USER_ONLY) |
| 4890 | 5194 | GEN_EXCP_PRIVOPC(ctx); |
| 4891 | 5195 | #else |
| 5196 | + TCGv EA, val; | |
| 4892 | 5197 | if (unlikely(!ctx->supervisor)) { |
| 4893 | 5198 | GEN_EXCP_PRIVOPC(ctx); |
| 4894 | 5199 | return; |
| 4895 | 5200 | } |
| 4896 | - gen_addr_reg_index(cpu_T[0], ctx); | |
| 4897 | - op_ldst(lwz); | |
| 4898 | - tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_T[0]); | |
| 5201 | + EA = tcg_temp_new(TCG_TYPE_TL); | |
| 5202 | + gen_addr_reg_index(EA, ctx); | |
| 5203 | + val = tcg_temp_new(TCG_TYPE_TL); | |
| 5204 | + gen_qemu_ld32u(val, EA, ctx->mem_idx); | |
| 5205 | + tcg_temp_free(val); | |
| 5206 | + tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA); | |
| 5207 | + tcg_temp_free(EA); | |
| 4899 | 5208 | #endif |
| 4900 | 5209 | } |
| 4901 | 5210 | |
| ... | ... | @@ -5593,43 +5902,6 @@ GEN_HANDLER2(evsel3, "evsel", 0x04, 0x1f, 0x09, 0x00000000, PPC_SPE) |
| 5593 | 5902 | } |
| 5594 | 5903 | |
| 5595 | 5904 | /* Load and stores */ |
| 5596 | -#if defined(TARGET_PPC64) | |
| 5597 | -/* In that case, we already have 64 bits load & stores | |
| 5598 | - * so, spe_ldd is equivalent to ld and spe_std is equivalent to std | |
| 5599 | - */ | |
| 5600 | -#define gen_op_spe_ldd_raw gen_op_ld_raw | |
| 5601 | -#define gen_op_spe_ldd_user gen_op_ld_user | |
| 5602 | -#define gen_op_spe_ldd_kernel gen_op_ld_kernel | |
| 5603 | -#define gen_op_spe_ldd_hypv gen_op_ld_hypv | |
| 5604 | -#define gen_op_spe_ldd_64_raw gen_op_ld_64_raw | |
| 5605 | -#define gen_op_spe_ldd_64_user gen_op_ld_64_user | |
| 5606 | -#define gen_op_spe_ldd_64_kernel gen_op_ld_64_kernel | |
| 5607 | -#define gen_op_spe_ldd_64_hypv gen_op_ld_64_hypv | |
| 5608 | -#define gen_op_spe_ldd_le_raw gen_op_ld_le_raw | |
| 5609 | -#define gen_op_spe_ldd_le_user gen_op_ld_le_user | |
| 5610 | -#define gen_op_spe_ldd_le_kernel gen_op_ld_le_kernel | |
| 5611 | -#define gen_op_spe_ldd_le_hypv gen_op_ld_le_hypv | |
| 5612 | -#define gen_op_spe_ldd_le_64_raw gen_op_ld_le_64_raw | |
| 5613 | -#define gen_op_spe_ldd_le_64_user gen_op_ld_le_64_user | |
| 5614 | -#define gen_op_spe_ldd_le_64_kernel gen_op_ld_le_64_kernel | |
| 5615 | -#define gen_op_spe_ldd_le_64_hypv gen_op_ld_le_64_hypv | |
| 5616 | -#define gen_op_spe_stdd_raw gen_op_std_raw | |
| 5617 | -#define gen_op_spe_stdd_user gen_op_std_user | |
| 5618 | -#define gen_op_spe_stdd_kernel gen_op_std_kernel | |
| 5619 | -#define gen_op_spe_stdd_hypv gen_op_std_hypv | |
| 5620 | -#define gen_op_spe_stdd_64_raw gen_op_std_64_raw | |
| 5621 | -#define gen_op_spe_stdd_64_user gen_op_std_64_user | |
| 5622 | -#define gen_op_spe_stdd_64_kernel gen_op_std_64_kernel | |
| 5623 | -#define gen_op_spe_stdd_64_hypv gen_op_std_64_hypv | |
| 5624 | -#define gen_op_spe_stdd_le_raw gen_op_std_le_raw | |
| 5625 | -#define gen_op_spe_stdd_le_user gen_op_std_le_user | |
| 5626 | -#define gen_op_spe_stdd_le_kernel gen_op_std_le_kernel | |
| 5627 | -#define gen_op_spe_stdd_le_hypv gen_op_std_le_hypv | |
| 5628 | -#define gen_op_spe_stdd_le_64_raw gen_op_std_le_64_raw | |
| 5629 | -#define gen_op_spe_stdd_le_64_user gen_op_std_le_64_user | |
| 5630 | -#define gen_op_spe_stdd_le_64_kernel gen_op_std_le_64_kernel | |
| 5631 | -#define gen_op_spe_stdd_le_64_hypv gen_op_std_le_64_hypv | |
| 5632 | -#endif /* defined(TARGET_PPC64) */ | |
| 5633 | 5905 | GEN_SPEOP_LDST(dd, 3); |
| 5634 | 5906 | GEN_SPEOP_LDST(dw, 3); |
| 5635 | 5907 | GEN_SPEOP_LDST(dh, 3); |
| ... | ... | @@ -5638,25 +5910,6 @@ GEN_SPEOP_LD(whou, 2); |
| 5638 | 5910 | GEN_SPEOP_LD(whos, 2); |
| 5639 | 5911 | GEN_SPEOP_ST(who, 2); |
| 5640 | 5912 | |
| 5641 | -#if defined(TARGET_PPC64) | |
| 5642 | -/* In that case, spe_stwwo is equivalent to stw */ | |
| 5643 | -#define gen_op_spe_stwwo_raw gen_op_stw_raw | |
| 5644 | -#define gen_op_spe_stwwo_user gen_op_stw_user | |
| 5645 | -#define gen_op_spe_stwwo_kernel gen_op_stw_kernel | |
| 5646 | -#define gen_op_spe_stwwo_hypv gen_op_stw_hypv | |
| 5647 | -#define gen_op_spe_stwwo_le_raw gen_op_stw_le_raw | |
| 5648 | -#define gen_op_spe_stwwo_le_user gen_op_stw_le_user | |
| 5649 | -#define gen_op_spe_stwwo_le_kernel gen_op_stw_le_kernel | |
| 5650 | -#define gen_op_spe_stwwo_le_hypv gen_op_stw_le_hypv | |
| 5651 | -#define gen_op_spe_stwwo_64_raw gen_op_stw_64_raw | |
| 5652 | -#define gen_op_spe_stwwo_64_user gen_op_stw_64_user | |
| 5653 | -#define gen_op_spe_stwwo_64_kernel gen_op_stw_64_kernel | |
| 5654 | -#define gen_op_spe_stwwo_64_hypv gen_op_stw_64_hypv | |
| 5655 | -#define gen_op_spe_stwwo_le_64_raw gen_op_stw_le_64_raw | |
| 5656 | -#define gen_op_spe_stwwo_le_64_user gen_op_stw_le_64_user | |
| 5657 | -#define gen_op_spe_stwwo_le_64_kernel gen_op_stw_le_64_kernel | |
| 5658 | -#define gen_op_spe_stwwo_le_64_hypv gen_op_stw_le_64_hypv | |
| 5659 | -#endif | |
| 5660 | 5913 | #define _GEN_OP_SPE_STWWE(suffix) \ |
| 5661 | 5914 | static always_inline void gen_op_spe_stwwe_##suffix (void) \ |
| 5662 | 5915 | { \ | ... | ... |