Commit 8289b279756928f6f029731e2eec119231b9e240
1 parent
b25deda7
Preliminary Sparc TCG target
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@3995 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
4 changed files
with
727 additions
and
0 deletions
Makefile.target
... | ... | @@ -177,6 +177,9 @@ LIBOBJS=exec.o kqemu.o translate-all.o cpu-exec.o\ |
177 | 177 | # TCG code generator |
178 | 178 | LIBOBJS+= tcg/tcg.o tcg/tcg-dyngen.o tcg/tcg-runtime.o |
179 | 179 | CPPFLAGS+=-I$(SRC_PATH)/tcg -I$(SRC_PATH)/tcg/$(ARCH) |
180 | +ifeq ($(ARCH),sparc64) | |
181 | +CPPFLAGS+=-I$(SRC_PATH)/tcg/sparc | |
182 | +endif | |
180 | 183 | ifdef CONFIG_SOFTFLOAT |
181 | 184 | LIBOBJS+=fpu/softfloat.o |
182 | 185 | else | ... | ... |
dyngen.c
... | ... | @@ -1615,12 +1615,14 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, |
1615 | 1615 | error("No save at the beginning of %s", name); |
1616 | 1616 | } |
1617 | 1617 | |
1618 | +#if 0 | |
1618 | 1619 | /* Skip a preceeding nop, if present. */ |
1619 | 1620 | if (p > p_start) { |
1620 | 1621 | skip_insn = get32((uint32_t *)(p - 0x4)); |
1621 | 1622 | if (skip_insn == 0x01000000) |
1622 | 1623 | p -= 4; |
1623 | 1624 | } |
1625 | +#endif | |
1624 | 1626 | |
1625 | 1627 | copy_size = p - p_start; |
1626 | 1628 | } | ... | ... |
tcg/sparc/tcg-target.c
0 → 100644
1 | +/* | |
2 | + * Tiny Code Generator for QEMU | |
3 | + * | |
4 | + * Copyright (c) 2008 Fabrice Bellard | |
5 | + * | |
6 | + * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | + * of this software and associated documentation files (the "Software"), to deal | |
8 | + * in the Software without restriction, including without limitation the rights | |
9 | + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | + * copies of the Software, and to permit persons to whom the Software is | |
11 | + * furnished to do so, subject to the following conditions: | |
12 | + * | |
13 | + * The above copyright notice and this permission notice shall be included in | |
14 | + * all copies or substantial portions of the Software. | |
15 | + * | |
16 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | + * THE SOFTWARE. | |
23 | + */ | |
24 | + | |
25 | +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { | |
26 | + "%g0", | |
27 | + "%g1", | |
28 | + "%g2", | |
29 | + "%g3", | |
30 | + "%g4", | |
31 | + "%g5", | |
32 | + "%g6", | |
33 | + "%g7", | |
34 | + "%o0", | |
35 | + "%o1", | |
36 | + "%o2", | |
37 | + "%o3", | |
38 | + "%o4", | |
39 | + "%o5", | |
40 | + "%o6", | |
41 | + "%o7", | |
42 | + "%l0", | |
43 | + "%l1", | |
44 | + "%l2", | |
45 | + "%l3", | |
46 | + "%l4", | |
47 | + "%l5", | |
48 | + "%l6", | |
49 | + "%l7", | |
50 | + "%i0", | |
51 | + "%i1", | |
52 | + "%i2", | |
53 | + "%i3", | |
54 | + "%i4", | |
55 | + "%i5", | |
56 | + "%i6", | |
57 | + "%i7", | |
58 | +}; | |
59 | + | |
60 | +static const int tcg_target_reg_alloc_order[TCG_TARGET_NB_REGS] = { | |
61 | + TCG_REG_L0, | |
62 | + TCG_REG_L1, | |
63 | + TCG_REG_L2, | |
64 | + TCG_REG_L3, | |
65 | + TCG_REG_L4, | |
66 | + TCG_REG_L5, | |
67 | + TCG_REG_L6, | |
68 | + TCG_REG_L7, | |
69 | + TCG_REG_I0, | |
70 | + TCG_REG_I1, | |
71 | + TCG_REG_I2, | |
72 | + TCG_REG_I3, | |
73 | + TCG_REG_I4, | |
74 | + TCG_REG_I5, | |
75 | +}; | |
76 | + | |
77 | +static const int tcg_target_call_iarg_regs[6] = { | |
78 | + TCG_REG_O0, | |
79 | + TCG_REG_O1, | |
80 | + TCG_REG_O2, | |
81 | + TCG_REG_O3, | |
82 | + TCG_REG_O4, | |
83 | + TCG_REG_O5, | |
84 | +}; | |
85 | + | |
86 | +static const int tcg_target_call_oarg_regs[2] = { | |
87 | + TCG_REG_O0, | |
88 | + TCG_REG_O1, | |
89 | +}; | |
90 | + | |
91 | +static void patch_reloc(uint8_t *code_ptr, int type, | |
92 | + tcg_target_long value) | |
93 | +{ | |
94 | + switch (type) { | |
95 | + case R_SPARC_32: | |
96 | + if (value != (uint32_t)value) | |
97 | + tcg_abort(); | |
98 | + *(uint32_t *)code_ptr = value; | |
99 | + break; | |
100 | + default: | |
101 | + tcg_abort(); | |
102 | + } | |
103 | +} | |
104 | + | |
105 | +/* maximum number of register used for input function arguments */ | |
106 | +static inline int tcg_target_get_call_iarg_regs_count(int flags) | |
107 | +{ | |
108 | + return 6; | |
109 | +} | |
110 | + | |
111 | +/* parse target specific constraints */ | |
112 | +static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) | |
113 | +{ | |
114 | + const char *ct_str; | |
115 | + | |
116 | + ct_str = *pct_str; | |
117 | + switch (ct_str[0]) { | |
118 | + case 'r': | |
119 | + case 'L': /* qemu_ld/st constraint */ | |
120 | + ct->ct |= TCG_CT_REG; | |
121 | + tcg_regset_set32(ct->u.regs, 0, 0xffffffff); | |
122 | + break; | |
123 | + case 'I': | |
124 | + ct->ct |= TCG_CT_CONST_S11; | |
125 | + break; | |
126 | + case 'J': | |
127 | + ct->ct |= TCG_CT_CONST_S13; | |
128 | + break; | |
129 | + default: | |
130 | + return -1; | |
131 | + } | |
132 | + ct_str++; | |
133 | + *pct_str = ct_str; | |
134 | + return 0; | |
135 | +} | |
136 | + | |
137 | +#define ABS(x) ((x) < 0? -(x) : (x)) | |
138 | +/* test if a constant matches the constraint */ | |
139 | +static inline int tcg_target_const_match(tcg_target_long val, | |
140 | + const TCGArgConstraint *arg_ct) | |
141 | +{ | |
142 | + int ct; | |
143 | + | |
144 | + ct = arg_ct->ct; | |
145 | + if (ct & TCG_CT_CONST) | |
146 | + return 1; | |
147 | + else if ((ct & TCG_CT_CONST_S11) && ABS(val) == (ABS(val) & 0x3ff)) | |
148 | + return 1; | |
149 | + else if ((ct & TCG_CT_CONST_S13) && ABS(val) == (ABS(val) & 0xfff)) | |
150 | + return 1; | |
151 | + else | |
152 | + return 0; | |
153 | +} | |
154 | + | |
155 | +#define INSN_OP(x) ((x) << 30) | |
156 | +#define INSN_OP2(x) ((x) << 22) | |
157 | +#define INSN_OP3(x) ((x) << 19) | |
158 | +#define INSN_OPF(x) ((x) << 5) | |
159 | +#define INSN_RD(x) ((x) << 25) | |
160 | +#define INSN_RS1(x) ((x) << 14) | |
161 | +#define INSN_RS2(x) (x) | |
162 | + | |
163 | +#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff)) | |
164 | + | |
165 | +#define INSN_COND(x, a) (((x) << 25) | ((a) << 29) | |
166 | + | |
167 | +#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00)) | |
168 | +#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01)) | |
169 | +#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02)) | |
170 | +#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03)) | |
171 | +#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x08)) | |
172 | +#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x10)) | |
173 | +#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c)) | |
174 | +#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a)) | |
175 | +#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e)) | |
176 | +#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f)) | |
177 | +#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09)) | |
178 | +#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d)) | |
179 | +#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d)) | |
180 | + | |
181 | +#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25)) | |
182 | +#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26)) | |
183 | +#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27)) | |
184 | + | |
185 | +#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12)) | |
186 | +#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12)) | |
187 | +#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12)) | |
188 | + | |
189 | +#define WRY (INSN_OP(2) | INSN_OP3(0x30)) | |
190 | +#define JMPL (INSN_OP(2) | INSN_OP3(0x38)) | |
191 | +#define SAVE (INSN_OP(2) | INSN_OP3(0x3c)) | |
192 | +#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d)) | |
193 | +#define SETHI (INSN_OP(0) | INSN_OP2(0x4)) | |
194 | +#define CALL INSN_OP(1) | |
195 | +#define LDUB (INSN_OP(3) | INSN_OP3(0x01)) | |
196 | +#define LDSB (INSN_OP(3) | INSN_OP3(0x09)) | |
197 | +#define LDUH (INSN_OP(3) | INSN_OP3(0x02)) | |
198 | +#define LDSH (INSN_OP(3) | INSN_OP3(0x0a)) | |
199 | +#define LDUW (INSN_OP(3) | INSN_OP3(0x00)) | |
200 | +#define LDSW (INSN_OP(3) | INSN_OP3(0x08)) | |
201 | +#define LDX (INSN_OP(3) | INSN_OP3(0x0b)) | |
202 | +#define STB (INSN_OP(3) | INSN_OP3(0x05)) | |
203 | +#define STH (INSN_OP(3) | INSN_OP3(0x06)) | |
204 | +#define STW (INSN_OP(3) | INSN_OP3(0x04)) | |
205 | +#define STX (INSN_OP(3) | INSN_OP3(0x0e)) | |
206 | + | |
207 | +static inline void tcg_out_mov(TCGContext *s, int ret, int arg) | |
208 | +{ | |
209 | + tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(arg) | | |
210 | + INSN_RS2(TCG_REG_G0)); | |
211 | +} | |
212 | + | |
213 | +static inline void tcg_out_movi(TCGContext *s, TCGType type, | |
214 | + int ret, tcg_target_long arg) | |
215 | +{ | |
216 | + if (arg == (arg & 0xfff)) | |
217 | + tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS2(TCG_REG_G0) | | |
218 | + INSN_IMM13(arg)); | |
219 | + else { | |
220 | + tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10)); | |
221 | + if (arg & 0x3ff) | |
222 | + tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(ret) | | |
223 | + INSN_IMM13(arg & 0x3ff)); | |
224 | + } | |
225 | +} | |
226 | + | |
227 | +static inline void tcg_out_ld_raw(TCGContext *s, int ret, | |
228 | + tcg_target_long arg) | |
229 | +{ | |
230 | + tcg_out32(s, SETHI | INSN_RD(ret) | (((uint32_t)arg & 0xfffffc00) >> 10)); | |
231 | + tcg_out32(s, LDUW | INSN_RD(ret) | INSN_RS1(ret) | | |
232 | + INSN_IMM13(arg & 0x3ff)); | |
233 | +} | |
234 | + | |
235 | +static inline void tcg_out_ldst(TCGContext *s, int ret, int addr, int offset, int op) | |
236 | +{ | |
237 | + if (offset == (offset & 0xfff)) | |
238 | + tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) | | |
239 | + INSN_IMM13(offset)); | |
240 | + else | |
241 | + fprintf(stderr, "unimplemented %s with offset %d\n", __func__, offset); | |
242 | +} | |
243 | + | |
244 | +static inline void tcg_out_ld(TCGContext *s, int ret, | |
245 | + int arg1, tcg_target_long arg2) | |
246 | +{ | |
247 | + fprintf(stderr, "unimplemented %s\n", __func__); | |
248 | +} | |
249 | + | |
250 | +static inline void tcg_out_st(TCGContext *s, int arg, | |
251 | + int arg1, tcg_target_long arg2) | |
252 | +{ | |
253 | + fprintf(stderr, "unimplemented %s\n", __func__); | |
254 | +} | |
255 | + | |
256 | +static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2, | |
257 | + int op) | |
258 | +{ | |
259 | + tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | | |
260 | + INSN_RS2(rs2)); | |
261 | +} | |
262 | + | |
263 | +static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1, int offset, | |
264 | + int op) | |
265 | +{ | |
266 | + tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | | |
267 | + INSN_IMM13(offset)); | |
268 | +} | |
269 | + | |
270 | +static inline void tcg_out_sety(TCGContext *s, tcg_target_long val) | |
271 | +{ | |
272 | + if (val == 0 || val == -1) | |
273 | + tcg_out32(s, WRY | INSN_IMM13(val)); | |
274 | + else | |
275 | + fprintf(stderr, "unimplemented sety %ld\n", (long)val); | |
276 | +} | |
277 | + | |
278 | +static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) | |
279 | +{ | |
280 | + if (val != 0) { | |
281 | + if (val == (val & 0xfff)) | |
282 | + tcg_out_arithi(s, reg, reg, val, ARITH_ADD); | |
283 | + else | |
284 | + fprintf(stderr, "unimplemented addi %ld\n", (long)val); | |
285 | + } | |
286 | +} | |
287 | + | |
288 | +static inline void tcg_out_nop(TCGContext *s) | |
289 | +{ | |
290 | + tcg_out32(s, SETHI | INSN_RD(TCG_REG_G0) | 0); | |
291 | +} | |
292 | + | |
293 | +static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args, | |
294 | + const int *const_args) | |
295 | +{ | |
296 | + int c; | |
297 | + | |
298 | + switch (opc) { | |
299 | + case INDEX_op_exit_tb: | |
300 | + tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_O0, args[0]); | |
301 | + tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_O7) | | |
302 | + INSN_IMM13(8)); | |
303 | + tcg_out_nop(s); | |
304 | + break; | |
305 | + case INDEX_op_goto_tb: | |
306 | + if (s->tb_jmp_offset) { | |
307 | + /* direct jump method */ | |
308 | + tcg_out32(s, CALL | 0); | |
309 | + s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; | |
310 | + tcg_out_nop(s); | |
311 | + } else { | |
312 | + /* indirect jump method */ | |
313 | + tcg_out_ld_raw(s, TCG_REG_O7, (tcg_target_long)(s->tb_next + args[0])); | |
314 | + tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_O7) | | |
315 | + INSN_RD(TCG_REG_G0)); | |
316 | + tcg_out_nop(s); | |
317 | + } | |
318 | + s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; | |
319 | + break; | |
320 | + case INDEX_op_call: | |
321 | + if (const_args[0]) { | |
322 | + tcg_out32(s, CALL | ((((tcg_target_ulong)args[0] | |
323 | + - (tcg_target_ulong)s->code_ptr) >> 2) | |
324 | + & 0x3fffffff)); | |
325 | + tcg_out_nop(s); | |
326 | + } else { | |
327 | + tcg_out_ld_raw(s, TCG_REG_O7, (tcg_target_long)(s->tb_next + args[0])); | |
328 | + tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_O7) | | |
329 | + INSN_RD(TCG_REG_G0)); | |
330 | + tcg_out_nop(s); | |
331 | + } | |
332 | + break; | |
333 | + case INDEX_op_jmp: | |
334 | + fprintf(stderr, "unimplemented jmp\n"); | |
335 | + break; | |
336 | + case INDEX_op_br: | |
337 | + fprintf(stderr, "unimplemented br\n"); | |
338 | + break; | |
339 | + case INDEX_op_movi_i32: | |
340 | + tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]); | |
341 | + break; | |
342 | + | |
343 | +#if defined(__sparc_v9__) && !defined(__sparc_v8plus__) | |
344 | +#define OP_32_64(x) \ | |
345 | + glue(glue(case INDEX_op_, x), _i32:) \ | |
346 | + glue(glue(case INDEX_op_, x), _i64:) | |
347 | +#else | |
348 | +#define OP_32_64(x) \ | |
349 | + glue(glue(case INDEX_op_, x), _i32:) | |
350 | +#endif | |
351 | + OP_32_64(ld8u); | |
352 | + tcg_out_ldst(s, args[0], args[1], args[2], LDUB); | |
353 | + break; | |
354 | + OP_32_64(ld8s); | |
355 | + tcg_out_ldst(s, args[0], args[1], args[2], LDSB); | |
356 | + break; | |
357 | + OP_32_64(ld16u); | |
358 | + tcg_out_ldst(s, args[0], args[1], args[2], LDUH); | |
359 | + break; | |
360 | + OP_32_64(ld16s); | |
361 | + tcg_out_ldst(s, args[0], args[1], args[2], LDSH); | |
362 | + break; | |
363 | + case INDEX_op_ld_i32: | |
364 | +#if defined(__sparc_v9__) && !defined(__sparc_v8plus__) | |
365 | + case INDEX_op_ld_i32u_i64: | |
366 | +#endif | |
367 | + tcg_out_ldst(s, args[0], args[1], args[2], LDUW); | |
368 | + break; | |
369 | + OP_32_64(st8); | |
370 | + tcg_out_ldst(s, args[0], args[1], args[2], STB); | |
371 | + break; | |
372 | + OP_32_64(st16); | |
373 | + tcg_out_ldst(s, args[0], args[1], args[2], STH); | |
374 | + break; | |
375 | + case INDEX_op_st_i32: | |
376 | +#if defined(__sparc_v9__) && !defined(__sparc_v8plus__) | |
377 | + case INDEX_op_st_i32_i64: | |
378 | +#endif | |
379 | + tcg_out_ldst(s, args[0], args[1], args[2], STW); | |
380 | + break; | |
381 | + OP_32_64(sub); | |
382 | + c = ARITH_SUB; | |
383 | + goto gen_arith32; | |
384 | + OP_32_64(and); | |
385 | + c = ARITH_AND; | |
386 | + goto gen_arith32; | |
387 | + OP_32_64(or); | |
388 | + c = ARITH_OR; | |
389 | + goto gen_arith32; | |
390 | + OP_32_64(xor); | |
391 | + c = ARITH_XOR; | |
392 | + goto gen_arith32; | |
393 | + case INDEX_op_shl_i32: | |
394 | + c = SHIFT_SLL; | |
395 | + goto gen_arith32; | |
396 | + case INDEX_op_shr_i32: | |
397 | + c = SHIFT_SRL; | |
398 | + goto gen_arith32; | |
399 | + case INDEX_op_sar_i32: | |
400 | + c = SHIFT_SRA; | |
401 | + goto gen_arith32; | |
402 | + case INDEX_op_mul_i32: | |
403 | + c = ARITH_UMUL; | |
404 | + goto gen_arith32; | |
405 | + OP_32_64(add); | |
406 | + c = ARITH_ADD; | |
407 | + gen_arith32: | |
408 | + if (const_args[2]) { | |
409 | + tcg_out_arithi(s, args[0], args[1], args[2], c); | |
410 | + } else { | |
411 | + tcg_out_arith(s, args[0], args[1], args[2], c); | |
412 | + } | |
413 | + break; | |
414 | + | |
415 | + case INDEX_op_div2_i32: | |
416 | +#if defined(__sparc_v9__) || defined(__sparc_v8plus__) | |
417 | + c = ARITH_SDIVX; | |
418 | + goto gen_arith32; | |
419 | +#else | |
420 | + tcg_out_sety(s, 0); | |
421 | + c = ARITH_SDIV; | |
422 | + goto gen_arith32; | |
423 | +#endif | |
424 | + case INDEX_op_divu2_i32: | |
425 | +#if defined(__sparc_v9__) || defined(__sparc_v8plus__) | |
426 | + c = ARITH_UDIVX; | |
427 | + goto gen_arith32; | |
428 | +#else | |
429 | + tcg_out_sety(s, 0); | |
430 | + c = ARITH_UDIV; | |
431 | + goto gen_arith32; | |
432 | +#endif | |
433 | + | |
434 | + case INDEX_op_brcond_i32: | |
435 | + fprintf(stderr, "unimplemented brcond\n"); | |
436 | + break; | |
437 | + | |
438 | + case INDEX_op_qemu_ld8u: | |
439 | + fprintf(stderr, "unimplemented qld\n"); | |
440 | + break; | |
441 | + case INDEX_op_qemu_ld8s: | |
442 | + fprintf(stderr, "unimplemented qld\n"); | |
443 | + break; | |
444 | + case INDEX_op_qemu_ld16u: | |
445 | + fprintf(stderr, "unimplemented qld\n"); | |
446 | + break; | |
447 | + case INDEX_op_qemu_ld16s: | |
448 | + fprintf(stderr, "unimplemented qld\n"); | |
449 | + break; | |
450 | + case INDEX_op_qemu_ld32u: | |
451 | + fprintf(stderr, "unimplemented qld\n"); | |
452 | + break; | |
453 | + case INDEX_op_qemu_ld32s: | |
454 | + fprintf(stderr, "unimplemented qld\n"); | |
455 | + break; | |
456 | + case INDEX_op_qemu_st8: | |
457 | + fprintf(stderr, "unimplemented qst\n"); | |
458 | + break; | |
459 | + case INDEX_op_qemu_st16: | |
460 | + fprintf(stderr, "unimplemented qst\n"); | |
461 | + break; | |
462 | + case INDEX_op_qemu_st32: | |
463 | + fprintf(stderr, "unimplemented qst\n"); | |
464 | + break; | |
465 | + | |
466 | +#if defined(__sparc_v9__) && !defined(__sparc_v8plus__) | |
467 | + case INDEX_op_movi_i64: | |
468 | + tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]); | |
469 | + break; | |
470 | + case INDEX_op_ld_i64: | |
471 | + tcg_out_ldst(s, args[0], args[1], args[2], LDX); | |
472 | + break; | |
473 | + case INDEX_op_st_i64: | |
474 | + tcg_out_ldst(s, args[0], args[1], args[2], STX); | |
475 | + break; | |
476 | + case INDEX_op_shl_i64: | |
477 | + c = SHIFT_SLLX; | |
478 | + goto gen_arith32; | |
479 | + case INDEX_op_shr_i64: | |
480 | + c = SHIFT_SRLX; | |
481 | + goto gen_arith32; | |
482 | + case INDEX_op_sar_i64: | |
483 | + c = SHIFT_SRAX; | |
484 | + goto gen_arith32; | |
485 | + case INDEX_op_mul_i64: | |
486 | + c = ARITH_MULX; | |
487 | + goto gen_arith32; | |
488 | + case INDEX_op_div2_i64: | |
489 | + c = ARITH_DIVX; | |
490 | + goto gen_arith32; | |
491 | + case INDEX_op_divu2_i64: | |
492 | + c = ARITH_UDIVX; | |
493 | + goto gen_arith32; | |
494 | + | |
495 | + case INDEX_op_brcond_i64: | |
496 | + fprintf(stderr, "unimplemented brcond\n"); | |
497 | + break; | |
498 | + case INDEX_op_qemu_ld64: | |
499 | + fprintf(stderr, "unimplemented qld\n"); | |
500 | + break; | |
501 | + case INDEX_op_qemu_st64: | |
502 | + fprintf(stderr, "unimplemented qst\n"); | |
503 | + break; | |
504 | + | |
505 | +#endif | |
506 | + default: | |
507 | + fprintf(stderr, "unknown opcode 0x%x\n", opc); | |
508 | + tcg_abort(); | |
509 | + } | |
510 | +} | |
511 | + | |
512 | +static const TCGTargetOpDef sparc_op_defs[] = { | |
513 | + { INDEX_op_exit_tb, { } }, | |
514 | + { INDEX_op_goto_tb, { } }, | |
515 | + { INDEX_op_call, { "ri" } }, | |
516 | + { INDEX_op_jmp, { "ri" } }, | |
517 | + { INDEX_op_br, { } }, | |
518 | + | |
519 | + { INDEX_op_mov_i32, { "r", "r" } }, | |
520 | + { INDEX_op_movi_i32, { "r" } }, | |
521 | + { INDEX_op_ld8u_i32, { "r", "r" } }, | |
522 | + { INDEX_op_ld8s_i32, { "r", "r" } }, | |
523 | + { INDEX_op_ld16u_i32, { "r", "r" } }, | |
524 | + { INDEX_op_ld16s_i32, { "r", "r" } }, | |
525 | + { INDEX_op_ld_i32, { "r", "r" } }, | |
526 | + { INDEX_op_st8_i32, { "r", "r" } }, | |
527 | + { INDEX_op_st16_i32, { "r", "r" } }, | |
528 | + { INDEX_op_st_i32, { "r", "r" } }, | |
529 | + | |
530 | + { INDEX_op_add_i32, { "r", "0", "rJ" } }, | |
531 | + { INDEX_op_mul_i32, { "r", "0", "rJ" } }, | |
532 | + { INDEX_op_div2_i32, { "r", "r", "0", "1", "r" } }, | |
533 | + { INDEX_op_divu2_i32, { "r", "r", "0", "1", "r" } }, | |
534 | + { INDEX_op_sub_i32, { "r", "0", "rJ" } }, | |
535 | + { INDEX_op_and_i32, { "r", "0", "rJ" } }, | |
536 | + { INDEX_op_or_i32, { "r", "0", "rJ" } }, | |
537 | + { INDEX_op_xor_i32, { "r", "0", "rJ" } }, | |
538 | + | |
539 | + { INDEX_op_shl_i32, { "r", "0", "rJ" } }, | |
540 | + { INDEX_op_shr_i32, { "r", "0", "rJ" } }, | |
541 | + { INDEX_op_sar_i32, { "r", "0", "rJ" } }, | |
542 | + | |
543 | + { INDEX_op_brcond_i32, { "r", "ri" } }, | |
544 | + | |
545 | + { INDEX_op_qemu_ld8u, { "r", "L" } }, | |
546 | + { INDEX_op_qemu_ld8s, { "r", "L" } }, | |
547 | + { INDEX_op_qemu_ld16u, { "r", "L" } }, | |
548 | + { INDEX_op_qemu_ld16s, { "r", "L" } }, | |
549 | + { INDEX_op_qemu_ld32u, { "r", "L" } }, | |
550 | + { INDEX_op_qemu_ld32s, { "r", "L" } }, | |
551 | + | |
552 | + { INDEX_op_qemu_st8, { "L", "L" } }, | |
553 | + { INDEX_op_qemu_st16, { "L", "L" } }, | |
554 | + { INDEX_op_qemu_st32, { "L", "L" } }, | |
555 | + | |
556 | +#if defined(__sparc_v9__) && !defined(__sparc_v8plus__) | |
557 | + { INDEX_op_mov_i64, { "r", "r" } }, | |
558 | + { INDEX_op_movi_i64, { "r" } }, | |
559 | + { INDEX_op_ld8u_i64, { "r", "r" } }, | |
560 | + { INDEX_op_ld8s_i64, { "r", "r" } }, | |
561 | + { INDEX_op_ld16u_i64, { "r", "r" } }, | |
562 | + { INDEX_op_ld16s_i64, { "r", "r" } }, | |
563 | + { INDEX_op_ld32u_i64, { "r", "r" } }, | |
564 | + { INDEX_op_ld32s_i64, { "r", "r" } }, | |
565 | + { INDEX_op_ld_i64, { "r", "r" } }, | |
566 | + { INDEX_op_st8_i64, { "r", "r" } }, | |
567 | + { INDEX_op_st16_i64, { "r", "r" } }, | |
568 | + { INDEX_op_st32_i64, { "r", "r" } }, | |
569 | + { INDEX_op_st_i64, { "r", "r" } }, | |
570 | + | |
571 | + { INDEX_op_add_i64, { "r", "0", "rJ" } }, | |
572 | + { INDEX_op_mul_i64, { "r", "0", "rJ" } }, | |
573 | + { INDEX_op_div2_i64, { "r", "r", "0", "1", "r" } }, | |
574 | + { INDEX_op_divu2_i64, { "r", "r", "0", "1", "r" } }, | |
575 | + { INDEX_op_sub_i64, { "r", "0", "rJ" } }, | |
576 | + { INDEX_op_and_i64, { "r", "0", "rJ" } }, | |
577 | + { INDEX_op_or_i64, { "r", "0", "rJ" } }, | |
578 | + { INDEX_op_xor_i64, { "r", "0", "rJ" } }, | |
579 | + | |
580 | + { INDEX_op_shl_i64, { "r", "0", "rJ" } }, | |
581 | + { INDEX_op_shr_i64, { "r", "0", "rJ" } }, | |
582 | + { INDEX_op_sar_i64, { "r", "0", "rJ" } }, | |
583 | + | |
584 | + { INDEX_op_brcond_i64, { "r", "ri" } }, | |
585 | +#endif | |
586 | + { -1 }, | |
587 | +}; | |
588 | + | |
589 | +void tcg_target_init(TCGContext *s) | |
590 | +{ | |
591 | + tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); | |
592 | +#if defined(__sparc_v9__) && !defined(__sparc_v8plus__) | |
593 | + tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); | |
594 | +#endif | |
595 | + tcg_regset_set32(tcg_target_call_clobber_regs, 0, | |
596 | + (1 << TCG_REG_O0) | | |
597 | + (1 << TCG_REG_O1) | | |
598 | + (1 << TCG_REG_O2) | | |
599 | + (1 << TCG_REG_O3) | | |
600 | + (1 << TCG_REG_O4) | | |
601 | + (1 << TCG_REG_O5) | | |
602 | + (1 << TCG_REG_O6) | | |
603 | + (1 << TCG_REG_O7)); | |
604 | + | |
605 | + tcg_regset_clear(s->reserved_regs); | |
606 | + tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); | |
607 | + tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); | |
608 | + tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); | |
609 | + tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); | |
610 | + tcg_regset_set_reg(s->reserved_regs, TCG_REG_O7); | |
611 | + tcg_add_target_add_op_defs(sparc_op_defs); | |
612 | +} | ... | ... |
tcg/sparc/tcg-target.h
0 → 100644
1 | +/* | |
2 | + * Tiny Code Generator for QEMU | |
3 | + * | |
4 | + * Copyright (c) 2008 Fabrice Bellard | |
5 | + * | |
6 | + * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | + * of this software and associated documentation files (the "Software"), to deal | |
8 | + * in the Software without restriction, including without limitation the rights | |
9 | + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | + * copies of the Software, and to permit persons to whom the Software is | |
11 | + * furnished to do so, subject to the following conditions: | |
12 | + * | |
13 | + * The above copyright notice and this permission notice shall be included in | |
14 | + * all copies or substantial portions of the Software. | |
15 | + * | |
16 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | + * THE SOFTWARE. | |
23 | + */ | |
24 | +#define TCG_TARGET_SPARC 1 | |
25 | + | |
26 | +#if defined(__sparc_v9__) && !defined(__sparc_v8plus__) | |
27 | +#define TCG_TARGET_REG_BITS 64 | |
28 | +#else | |
29 | +#define TCG_TARGET_REG_BITS 32 | |
30 | +#endif | |
31 | + | |
32 | +#define TCG_TARGET_WORDS_BIGENDIAN | |
33 | + | |
34 | +#define TCG_TARGET_NB_REGS 32 | |
35 | + | |
36 | +enum { | |
37 | + TCG_REG_G0 = 0, | |
38 | + TCG_REG_G1, | |
39 | + TCG_REG_G2, | |
40 | + TCG_REG_G3, | |
41 | + TCG_REG_G4, | |
42 | + TCG_REG_G5, | |
43 | + TCG_REG_G6, | |
44 | + TCG_REG_G7, | |
45 | + TCG_REG_O0, | |
46 | + TCG_REG_O1, | |
47 | + TCG_REG_O2, | |
48 | + TCG_REG_O3, | |
49 | + TCG_REG_O4, | |
50 | + TCG_REG_O5, | |
51 | + TCG_REG_O6, | |
52 | + TCG_REG_O7, | |
53 | + TCG_REG_L0, | |
54 | + TCG_REG_L1, | |
55 | + TCG_REG_L2, | |
56 | + TCG_REG_L3, | |
57 | + TCG_REG_L4, | |
58 | + TCG_REG_L5, | |
59 | + TCG_REG_L6, | |
60 | + TCG_REG_L7, | |
61 | + TCG_REG_I0, | |
62 | + TCG_REG_I1, | |
63 | + TCG_REG_I2, | |
64 | + TCG_REG_I3, | |
65 | + TCG_REG_I4, | |
66 | + TCG_REG_I5, | |
67 | + TCG_REG_I6, | |
68 | + TCG_REG_I7, | |
69 | +}; | |
70 | + | |
71 | +#define TCG_CT_CONST_S11 0x100 | |
72 | +#define TCG_CT_CONST_S13 0x200 | |
73 | + | |
74 | +/* used for function call generation */ | |
75 | +#define TCG_REG_CALL_STACK TCG_REG_O6 | |
76 | +#define TCG_TARGET_STACK_ALIGN 16 | |
77 | + | |
78 | +/* optional instructions */ | |
79 | +//#define TCG_TARGET_HAS_bswap_i32 | |
80 | +//#define TCG_TARGET_HAS_bswap_i64 | |
81 | + | |
82 | +/* Note: must be synced with dyngen-exec.h */ | |
83 | +#ifdef HOST_SOLARIS | |
84 | +#define TCG_AREG0 TCG_REG_G2 | |
85 | +#define TCG_AREG1 TCG_REG_G3 | |
86 | +#define TCG_AREG2 TCG_REG_G4 | |
87 | +#define TCG_AREG3 TCG_REG_G5 | |
88 | +#define TCG_AREG4 TCG_REG_G6 | |
89 | +#elif defined(__sparc_v9__) | |
90 | +#define TCG_AREG0 TCG_REG_G1 | |
91 | +#define TCG_AREG1 TCG_REG_G4 | |
92 | +#define TCG_AREG2 TCG_REG_G5 | |
93 | +#define TCG_AREG3 TCG_REG_G7 | |
94 | +#else | |
95 | +#define TCG_AREG0 TCG_REG_G6 | |
96 | +#define TCG_AREG1 TCG_REG_G1 | |
97 | +#define TCG_AREG2 TCG_REG_G2 | |
98 | +#define TCG_AREG3 TCG_REG_G3 | |
99 | +#endif | |
100 | + | |
101 | +static inline void flush_icache_range(unsigned long start, unsigned long stop) | |
102 | +{ | |
103 | + unsigned long p; | |
104 | + | |
105 | + p = start & ~(8UL - 1UL); | |
106 | + stop = (stop + (8UL - 1UL)) & ~(8UL - 1UL); | |
107 | + | |
108 | + for (; p < stop; p += 8) | |
109 | + __asm__ __volatile__("flush\t%0" : : "r" (p)); | |
110 | +} | ... | ... |