Commit dfbc799d8e94d26ab2e6ad4a65dc97fd8fb6ece6
1 parent
37d269df
target-ppc: convert load/store string instructions to TCG
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5828 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
6 changed files
with
135 additions
and
203 deletions
target-ppc/helper.h
... | ... | @@ -9,6 +9,9 @@ DEF_HELPER_3(td, void, tl, tl, i32) |
9 | 9 | |
10 | 10 | DEF_HELPER_2(lmw, void, tl, i32) |
11 | 11 | DEF_HELPER_2(stmw, void, tl, i32) |
12 | +DEF_HELPER_3(lsw, void, tl, i32, i32) | |
13 | +DEF_HELPER_4(lswx, void, tl, i32, i32, i32) | |
14 | +DEF_HELPER_3(stsw, void, tl, i32, i32) | |
12 | 15 | DEF_HELPER_1(dcbz, void, tl) |
13 | 16 | DEF_HELPER_1(dcbz_970, void, tl) |
14 | 17 | DEF_HELPER_1(icbi, void, tl) | ... | ... |
target-ppc/op_helper.c
... | ... | @@ -170,6 +170,99 @@ void helper_stmw (target_ulong addr, uint32_t reg) |
170 | 170 | } |
171 | 171 | } |
172 | 172 | |
173 | +void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg) | |
174 | +{ | |
175 | + int sh; | |
176 | +#ifdef CONFIG_USER_ONLY | |
177 | +#define ldfunl ldl_raw | |
178 | +#define ldfunb ldub_raw | |
179 | +#else | |
180 | + int (*ldfunl)(target_ulong); | |
181 | + int (*ldfunb)(target_ulong); | |
182 | + | |
183 | + switch (env->mmu_idx) { | |
184 | + default: | |
185 | + case 0: | |
186 | + ldfunl = ldl_user; | |
187 | + ldfunb = ldub_user; | |
188 | + break; | |
189 | + case 1: | |
190 | + ldfunl = ldl_kernel; | |
191 | + ldfunb = ldub_kernel; | |
192 | + break; | |
193 | + case 2: | |
194 | + ldfunl = ldl_hypv; | |
195 | + ldfunb = ldub_hypv; | |
196 | + break; | |
197 | + } | |
198 | +#endif | |
199 | + for (; nb > 3; nb -= 4, addr += 4) { | |
200 | + env->gpr[reg] = ldfunl(get_addr(addr)); | |
201 | + reg = (reg + 1) % 32; | |
202 | + } | |
203 | + if (unlikely(nb > 0)) { | |
204 | + env->gpr[reg] = 0; | |
205 | + for (sh = 24; nb > 0; nb--, addr++, sh -= 8) { | |
206 | + env->gpr[reg] |= ldfunb(get_addr(addr)) << sh; | |
207 | + } | |
208 | + } | |
209 | +} | |
210 | +/* PPC32 specification says we must generate an exception if | |
211 | + * rA is in the range of registers to be loaded. | |
212 | + * In an other hand, IBM says this is valid, but rA won't be loaded. | |
213 | + * For now, I'll follow the spec... | |
214 | + */ | |
215 | +void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb) | |
216 | +{ | |
217 | + if (likely(xer_bc != 0)) { | |
218 | + if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) || | |
219 | + (reg < rb && (reg + xer_bc) > rb))) { | |
220 | + raise_exception_err(env, POWERPC_EXCP_PROGRAM, | |
221 | + POWERPC_EXCP_INVAL | | |
222 | + POWERPC_EXCP_INVAL_LSWX); | |
223 | + } else { | |
224 | + helper_lsw(addr, xer_bc, reg); | |
225 | + } | |
226 | + } | |
227 | +} | |
228 | + | |
229 | +void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg) | |
230 | +{ | |
231 | + int sh; | |
232 | +#ifdef CONFIG_USER_ONLY | |
233 | +#define stfunl stl_raw | |
234 | +#define stfunb stb_raw | |
235 | +#else | |
236 | + void (*stfunl)(target_ulong, int); | |
237 | + void (*stfunb)(target_ulong, int); | |
238 | + | |
239 | + switch (env->mmu_idx) { | |
240 | + default: | |
241 | + case 0: | |
242 | + stfunl = stl_user; | |
243 | + stfunb = stb_user; | |
244 | + break; | |
245 | + case 1: | |
246 | + stfunl = stl_kernel; | |
247 | + stfunb = stb_kernel; | |
248 | + break; | |
249 | + case 2: | |
250 | + stfunl = stl_hypv; | |
251 | + stfunb = stb_hypv; | |
252 | + break; | |
253 | + } | |
254 | +#endif | |
255 | + | |
256 | + for (; nb > 3; nb -= 4, addr += 4) { | |
257 | + stfunl(get_addr(addr), env->gpr[reg]); | |
258 | + reg = (reg + 1) % 32; | |
259 | + } | |
260 | + if (unlikely(nb > 0)) { | |
261 | + for (sh = 24; nb > 0; nb--, addr++, sh -= 8) | |
262 | + stfunb(get_addr(addr), (env->gpr[reg] >> sh) & 0xFF); | |
263 | + } | |
264 | +} | |
265 | + | |
173 | 266 | static void do_dcbz(target_ulong addr, int dcache_line_size) |
174 | 267 | { |
175 | 268 | target_long mask = get_addr(~(dcache_line_size - 1)); | ... | ... |
target-ppc/op_helper.h
... | ... | @@ -21,19 +21,12 @@ |
21 | 21 | #if defined(MEMSUFFIX) |
22 | 22 | |
23 | 23 | /* Memory load/store helpers */ |
24 | -void glue(do_lsw, MEMSUFFIX) (int dst); | |
25 | -void glue(do_stsw, MEMSUFFIX) (int src); | |
26 | 24 | void glue(do_POWER_lscbx, MEMSUFFIX) (int dest, int ra, int rb); |
27 | 25 | void glue(do_POWER2_lfq, MEMSUFFIX) (void); |
28 | 26 | void glue(do_POWER2_lfq_le, MEMSUFFIX) (void); |
29 | 27 | void glue(do_POWER2_stfq, MEMSUFFIX) (void); |
30 | 28 | void glue(do_POWER2_stfq_le, MEMSUFFIX) (void); |
31 | 29 | |
32 | -#if defined(TARGET_PPC64) | |
33 | -void glue(do_lsw_64, MEMSUFFIX) (int dst); | |
34 | -void glue(do_stsw_64, MEMSUFFIX) (int src); | |
35 | -#endif | |
36 | - | |
37 | 30 | #else |
38 | 31 | |
39 | 32 | void do_print_mem_EA (target_ulong EA); | ... | ... |
target-ppc/op_helper_mem.h
... | ... | @@ -20,78 +20,6 @@ |
20 | 20 | |
21 | 21 | #include "op_mem_access.h" |
22 | 22 | |
23 | -void glue(do_lsw, MEMSUFFIX) (int dst) | |
24 | -{ | |
25 | - uint32_t tmp; | |
26 | - int sh; | |
27 | - | |
28 | - for (; T1 > 3; T1 -= 4, T0 += 4) { | |
29 | - env->gpr[dst++] = glue(ldu32, MEMSUFFIX)((uint32_t)T0); | |
30 | - if (unlikely(dst == 32)) | |
31 | - dst = 0; | |
32 | - } | |
33 | - if (unlikely(T1 != 0)) { | |
34 | - tmp = 0; | |
35 | - for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) { | |
36 | - tmp |= glue(ldu8, MEMSUFFIX)((uint32_t)T0) << sh; | |
37 | - } | |
38 | - env->gpr[dst] = tmp; | |
39 | - } | |
40 | -} | |
41 | - | |
42 | -#if defined(TARGET_PPC64) | |
43 | -void glue(do_lsw_64, MEMSUFFIX) (int dst) | |
44 | -{ | |
45 | - uint32_t tmp; | |
46 | - int sh; | |
47 | - | |
48 | - for (; T1 > 3; T1 -= 4, T0 += 4) { | |
49 | - env->gpr[dst++] = glue(ldu32, MEMSUFFIX)((uint64_t)T0); | |
50 | - if (unlikely(dst == 32)) | |
51 | - dst = 0; | |
52 | - } | |
53 | - if (unlikely(T1 != 0)) { | |
54 | - tmp = 0; | |
55 | - for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) { | |
56 | - tmp |= glue(ldu8, MEMSUFFIX)((uint64_t)T0) << sh; | |
57 | - } | |
58 | - env->gpr[dst] = tmp; | |
59 | - } | |
60 | -} | |
61 | -#endif | |
62 | - | |
63 | -void glue(do_stsw, MEMSUFFIX) (int src) | |
64 | -{ | |
65 | - int sh; | |
66 | - | |
67 | - for (; T1 > 3; T1 -= 4, T0 += 4) { | |
68 | - glue(st32, MEMSUFFIX)((uint32_t)T0, env->gpr[src++]); | |
69 | - if (unlikely(src == 32)) | |
70 | - src = 0; | |
71 | - } | |
72 | - if (unlikely(T1 != 0)) { | |
73 | - for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) | |
74 | - glue(st8, MEMSUFFIX)((uint32_t)T0, (env->gpr[src] >> sh) & 0xFF); | |
75 | - } | |
76 | -} | |
77 | - | |
78 | -#if defined(TARGET_PPC64) | |
79 | -void glue(do_stsw_64, MEMSUFFIX) (int src) | |
80 | -{ | |
81 | - int sh; | |
82 | - | |
83 | - for (; T1 > 3; T1 -= 4, T0 += 4) { | |
84 | - glue(st32, MEMSUFFIX)((uint64_t)T0, env->gpr[src++]); | |
85 | - if (unlikely(src == 32)) | |
86 | - src = 0; | |
87 | - } | |
88 | - if (unlikely(T1 != 0)) { | |
89 | - for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) | |
90 | - glue(st8, MEMSUFFIX)((uint64_t)T0, (env->gpr[src] >> sh) & 0xFF); | |
91 | - } | |
92 | -} | |
93 | -#endif | |
94 | - | |
95 | 23 | /* PowerPC 601 specific instructions (POWER bridge) */ |
96 | 24 | // XXX: to be tested |
97 | 25 | void glue(do_POWER_lscbx, MEMSUFFIX) (int dest, int ra, int rb) | ... | ... |
target-ppc/op_mem.h
... | ... | @@ -20,74 +20,6 @@ |
20 | 20 | |
21 | 21 | #include "op_mem_access.h" |
22 | 22 | |
23 | -/*** Integer load and store strings ***/ | |
24 | -void OPPROTO glue(op_lswi, MEMSUFFIX) (void) | |
25 | -{ | |
26 | - glue(do_lsw, MEMSUFFIX)(PARAM1); | |
27 | - RETURN(); | |
28 | -} | |
29 | - | |
30 | -#if defined(TARGET_PPC64) | |
31 | -void OPPROTO glue(op_lswi_64, MEMSUFFIX) (void) | |
32 | -{ | |
33 | - glue(do_lsw_64, MEMSUFFIX)(PARAM1); | |
34 | - RETURN(); | |
35 | -} | |
36 | -#endif | |
37 | - | |
38 | -/* PPC32 specification says we must generate an exception if | |
39 | - * rA is in the range of registers to be loaded. | |
40 | - * In an other hand, IBM says this is valid, but rA won't be loaded. | |
41 | - * For now, I'll follow the spec... | |
42 | - */ | |
43 | -void OPPROTO glue(op_lswx, MEMSUFFIX) (void) | |
44 | -{ | |
45 | - /* Note: T1 comes from xer_bc then no cast is needed */ | |
46 | - if (likely(T1 != 0)) { | |
47 | - if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) || | |
48 | - (PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) { | |
49 | - raise_exception_err(env, POWERPC_EXCP_PROGRAM, | |
50 | - POWERPC_EXCP_INVAL | | |
51 | - POWERPC_EXCP_INVAL_LSWX); | |
52 | - } else { | |
53 | - glue(do_lsw, MEMSUFFIX)(PARAM1); | |
54 | - } | |
55 | - } | |
56 | - RETURN(); | |
57 | -} | |
58 | - | |
59 | -#if defined(TARGET_PPC64) | |
60 | -void OPPROTO glue(op_lswx_64, MEMSUFFIX) (void) | |
61 | -{ | |
62 | - /* Note: T1 comes from xer_bc then no cast is needed */ | |
63 | - if (likely(T1 != 0)) { | |
64 | - if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) || | |
65 | - (PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) { | |
66 | - raise_exception_err(env, POWERPC_EXCP_PROGRAM, | |
67 | - POWERPC_EXCP_INVAL | | |
68 | - POWERPC_EXCP_INVAL_LSWX); | |
69 | - } else { | |
70 | - glue(do_lsw_64, MEMSUFFIX)(PARAM1); | |
71 | - } | |
72 | - } | |
73 | - RETURN(); | |
74 | -} | |
75 | -#endif | |
76 | - | |
77 | -void OPPROTO glue(op_stsw, MEMSUFFIX) (void) | |
78 | -{ | |
79 | - glue(do_stsw, MEMSUFFIX)(PARAM1); | |
80 | - RETURN(); | |
81 | -} | |
82 | - | |
83 | -#if defined(TARGET_PPC64) | |
84 | -void OPPROTO glue(op_stsw_64, MEMSUFFIX) (void) | |
85 | -{ | |
86 | - glue(do_stsw_64, MEMSUFFIX)(PARAM1); | |
87 | - RETURN(); | |
88 | -} | |
89 | -#endif | |
90 | - | |
91 | 23 | /* Load and set reservation */ |
92 | 24 | void OPPROTO glue(op_lwarx, MEMSUFFIX) (void) |
93 | 25 | { | ... | ... |
target-ppc/translate.c
... | ... | @@ -3118,43 +3118,6 @@ GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER) |
3118 | 3118 | } |
3119 | 3119 | |
3120 | 3120 | /*** Integer load and store strings ***/ |
3121 | -#define op_ldsts(name, start) (*gen_op_##name[ctx->mem_idx])(start) | |
3122 | -#define op_ldstsx(name, rd, ra, rb) (*gen_op_##name[ctx->mem_idx])(rd, ra, rb) | |
3123 | -/* string load & stores are by definition endian-safe */ | |
3124 | -#define gen_op_lswi_le_raw gen_op_lswi_raw | |
3125 | -#define gen_op_lswi_le_user gen_op_lswi_user | |
3126 | -#define gen_op_lswi_le_kernel gen_op_lswi_kernel | |
3127 | -#define gen_op_lswi_le_hypv gen_op_lswi_hypv | |
3128 | -#define gen_op_lswi_le_64_raw gen_op_lswi_raw | |
3129 | -#define gen_op_lswi_le_64_user gen_op_lswi_user | |
3130 | -#define gen_op_lswi_le_64_kernel gen_op_lswi_kernel | |
3131 | -#define gen_op_lswi_le_64_hypv gen_op_lswi_hypv | |
3132 | -static GenOpFunc1 *gen_op_lswi[NB_MEM_FUNCS] = { | |
3133 | - GEN_MEM_FUNCS(lswi), | |
3134 | -}; | |
3135 | -#define gen_op_lswx_le_raw gen_op_lswx_raw | |
3136 | -#define gen_op_lswx_le_user gen_op_lswx_user | |
3137 | -#define gen_op_lswx_le_kernel gen_op_lswx_kernel | |
3138 | -#define gen_op_lswx_le_hypv gen_op_lswx_hypv | |
3139 | -#define gen_op_lswx_le_64_raw gen_op_lswx_raw | |
3140 | -#define gen_op_lswx_le_64_user gen_op_lswx_user | |
3141 | -#define gen_op_lswx_le_64_kernel gen_op_lswx_kernel | |
3142 | -#define gen_op_lswx_le_64_hypv gen_op_lswx_hypv | |
3143 | -static GenOpFunc3 *gen_op_lswx[NB_MEM_FUNCS] = { | |
3144 | - GEN_MEM_FUNCS(lswx), | |
3145 | -}; | |
3146 | -#define gen_op_stsw_le_raw gen_op_stsw_raw | |
3147 | -#define gen_op_stsw_le_user gen_op_stsw_user | |
3148 | -#define gen_op_stsw_le_kernel gen_op_stsw_kernel | |
3149 | -#define gen_op_stsw_le_hypv gen_op_stsw_hypv | |
3150 | -#define gen_op_stsw_le_64_raw gen_op_stsw_raw | |
3151 | -#define gen_op_stsw_le_64_user gen_op_stsw_user | |
3152 | -#define gen_op_stsw_le_64_kernel gen_op_stsw_kernel | |
3153 | -#define gen_op_stsw_le_64_hypv gen_op_stsw_hypv | |
3154 | -static GenOpFunc1 *gen_op_stsw[NB_MEM_FUNCS] = { | |
3155 | - GEN_MEM_FUNCS(stsw), | |
3156 | -}; | |
3157 | - | |
3158 | 3121 | /* lswi */ |
3159 | 3122 | /* PowerPC32 specification says we must generate an exception if |
3160 | 3123 | * rA is in the range of registers to be loaded. |
... | ... | @@ -3163,6 +3126,8 @@ static GenOpFunc1 *gen_op_stsw[NB_MEM_FUNCS] = { |
3163 | 3126 | */ |
3164 | 3127 | GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING) |
3165 | 3128 | { |
3129 | + TCGv t0; | |
3130 | + TCGv_i32 t1, t2; | |
3166 | 3131 | int nb = NB(ctx->opcode); |
3167 | 3132 | int start = rD(ctx->opcode); |
3168 | 3133 | int ra = rA(ctx->opcode); |
... | ... | @@ -3180,49 +3145,67 @@ GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING) |
3180 | 3145 | } |
3181 | 3146 | /* NIP cannot be restored if the memory exception comes from an helper */ |
3182 | 3147 | gen_update_nip(ctx, ctx->nip - 4); |
3183 | - gen_addr_register(cpu_T[0], ctx); | |
3184 | - tcg_gen_movi_tl(cpu_T[1], nb); | |
3185 | - op_ldsts(lswi, start); | |
3148 | + t0 = tcg_temp_new(); | |
3149 | + gen_addr_register(t0, ctx); | |
3150 | + t1 = tcg_const_i32(nb); | |
3151 | + t2 = tcg_const_i32(start); | |
3152 | + gen_helper_lsw(t0, t1, t2); | |
3153 | + tcg_temp_free(t0); | |
3154 | + tcg_temp_free_i32(t1); | |
3155 | + tcg_temp_free_i32(t2); | |
3186 | 3156 | } |
3187 | 3157 | |
3188 | 3158 | /* lswx */ |
3189 | 3159 | GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING) |
3190 | 3160 | { |
3191 | - int ra = rA(ctx->opcode); | |
3192 | - int rb = rB(ctx->opcode); | |
3193 | - | |
3161 | + TCGv t0 = tcg_temp_new(); | |
3162 | + TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode)); | |
3163 | + TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode)); | |
3164 | + TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode)); | |
3194 | 3165 | /* NIP cannot be restored if the memory exception comes from an helper */ |
3195 | 3166 | gen_update_nip(ctx, ctx->nip - 4); |
3196 | - gen_addr_reg_index(cpu_T[0], ctx); | |
3197 | - if (ra == 0) { | |
3198 | - ra = rb; | |
3199 | - } | |
3200 | - tcg_gen_andi_tl(cpu_T[1], cpu_xer, 0x7F); | |
3201 | - op_ldstsx(lswx, rD(ctx->opcode), ra, rb); | |
3167 | + gen_addr_reg_index(t0, ctx); | |
3168 | + gen_helper_lswx(t0, t1, t2, t3); | |
3169 | + tcg_temp_free(t0); | |
3170 | + tcg_temp_free_i32(t1); | |
3171 | + tcg_temp_free_i32(t2); | |
3172 | + tcg_temp_free_i32(t3); | |
3202 | 3173 | } |
3203 | 3174 | |
3204 | 3175 | /* stswi */ |
3205 | 3176 | GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING) |
3206 | 3177 | { |
3207 | 3178 | int nb = NB(ctx->opcode); |
3208 | - | |
3179 | + TCGv t0 = tcg_temp_new(); | |
3180 | + TCGv_i32 t1; | |
3181 | + TCGv_i32 t2 = tcg_const_i32(rS(ctx->opcode)); | |
3209 | 3182 | /* NIP cannot be restored if the memory exception comes from an helper */ |
3210 | 3183 | gen_update_nip(ctx, ctx->nip - 4); |
3211 | - gen_addr_register(cpu_T[0], ctx); | |
3184 | + gen_addr_register(t0, ctx); | |
3212 | 3185 | if (nb == 0) |
3213 | 3186 | nb = 32; |
3214 | - tcg_gen_movi_tl(cpu_T[1], nb); | |
3215 | - op_ldsts(stsw, rS(ctx->opcode)); | |
3187 | + t1 = tcg_const_i32(nb); | |
3188 | + gen_helper_stsw(t0, t1, t2); | |
3189 | + tcg_temp_free(t0); | |
3190 | + tcg_temp_free_i32(t1); | |
3191 | + tcg_temp_free_i32(t2); | |
3216 | 3192 | } |
3217 | 3193 | |
3218 | 3194 | /* stswx */ |
3219 | 3195 | GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING) |
3220 | 3196 | { |
3197 | + TCGv t0 = tcg_temp_new(); | |
3198 | + TCGv_i32 t1 = tcg_temp_new_i32(); | |
3199 | + TCGv_i32 t2 = tcg_const_i32(rS(ctx->opcode)); | |
3221 | 3200 | /* NIP cannot be restored if the memory exception comes from an helper */ |
3222 | 3201 | gen_update_nip(ctx, ctx->nip - 4); |
3223 | - gen_addr_reg_index(cpu_T[0], ctx); | |
3224 | - tcg_gen_andi_tl(cpu_T[1], cpu_xer, 0x7F); | |
3225 | - op_ldsts(stsw, rS(ctx->opcode)); | |
3202 | + gen_addr_reg_index(t0, ctx); | |
3203 | + tcg_gen_trunc_tl_i32(t1, cpu_xer); | |
3204 | + tcg_gen_andi_i32(t1, t1, 0x7F); | |
3205 | + gen_helper_stsw(t0, t1, t2); | |
3206 | + tcg_temp_free(t0); | |
3207 | + tcg_temp_free_i32(t1); | |
3208 | + tcg_temp_free_i32(t2); | |
3226 | 3209 | } |
3227 | 3210 | |
3228 | 3211 | /*** Memory synchronisation ***/ | ... | ... |