Commit 57fec1fee94aa9f7d2519e8c354f100fc36bc9fa
1 parent
c896fe29
use the TCG code generator
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@3944 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
27 changed files
with
970 additions
and
2024 deletions
Too many changes to show.
To preserve performance only 27 of 35 files are displayed.
LICENSE
... | ... | @@ -10,6 +10,9 @@ In particular, the QEMU virtual CPU core library (libqemu.a) is |
10 | 10 | released under the GNU Lesser General Public License. Many hardware |
11 | 11 | device emulation sources are released under the BSD license. |
12 | 12 | |
13 | -3) QEMU is a trademark of Fabrice Bellard. | |
13 | +3) The Tiny Code Generator (TCG) is released under the BSD license | |
14 | + (see license headers in files). | |
15 | + | |
16 | +4) QEMU is a trademark of Fabrice Bellard. | |
14 | 17 | |
15 | 18 | Fabrice Bellard. |
16 | 19 | \ No newline at end of file | ... | ... |
Makefile.target
... | ... | @@ -172,8 +172,11 @@ all: $(PROGS) |
172 | 172 | |
173 | 173 | ######################################################### |
174 | 174 | # cpu emulator library |
175 | -LIBOBJS=exec.o kqemu.o translate-op.o translate-all.o cpu-exec.o\ | |
175 | +LIBOBJS=exec.o kqemu.o translate-all.o cpu-exec.o\ | |
176 | 176 | translate.o op.o host-utils.o |
177 | +# TCG code generator | |
178 | +LIBOBJS+= tcg/tcg.o tcg/tcg-dyngen.o tcg/tcg-runtime.o | |
179 | +CPPFLAGS+=-I$(SRC_PATH)/tcg -I$(SRC_PATH)/tcg/$(ARCH) | |
177 | 180 | ifdef CONFIG_SOFTFLOAT |
178 | 181 | LIBOBJS+=fpu/softfloat.o |
179 | 182 | else |
... | ... | @@ -268,16 +271,16 @@ libqemu.a: $(LIBOBJS) |
268 | 271 | rm -f $@ |
269 | 272 | $(AR) rcs $@ $(LIBOBJS) |
270 | 273 | |
271 | -translate.o: translate.c gen-op.h opc.h cpu.h | |
274 | +translate.o: translate.c gen-op.h dyngen-opc.h cpu.h | |
272 | 275 | |
273 | -translate-all.o: translate-all.c opc.h cpu.h | |
276 | +translate-all.o: translate-all.c dyngen-opc.h cpu.h | |
274 | 277 | |
275 | -translate-op.o: translate-all.c op.h opc.h cpu.h | |
278 | +tcg/tcg.o: op.h dyngen-opc.h cpu.h | |
276 | 279 | |
277 | 280 | op.h: op.o $(DYNGEN) |
278 | 281 | $(DYNGEN) -o $@ $< |
279 | 282 | |
280 | -opc.h: op.o $(DYNGEN) | |
283 | +dyngen-opc.h: op.o $(DYNGEN) | |
281 | 284 | $(DYNGEN) -c -o $@ $< |
282 | 285 | |
283 | 286 | gen-op.h: op.o $(DYNGEN) |
... | ... | @@ -648,8 +651,8 @@ endif # !CONFIG_USER_ONLY |
648 | 651 | $(CC) $(CPPFLAGS) -c -o $@ $< |
649 | 652 | |
650 | 653 | clean: |
651 | - rm -f *.o *.a *~ $(PROGS) gen-op.h opc.h op.h nwfpe/*.o fpu/*.o | |
652 | - rm -f *.d */*.d | |
654 | + rm -f *.o *.a *~ $(PROGS) gen-op.h dyngen-opc.h op.h nwfpe/*.o fpu/*.o | |
655 | + rm -f *.d */*.d tcg/*.o | |
653 | 656 | |
654 | 657 | install: all |
655 | 658 | ifneq ($(PROGS),) | ... | ... |
configure
... | ... | @@ -1051,6 +1051,7 @@ test -f $config_h && mv $config_h ${config_h}~ |
1051 | 1051 | |
1052 | 1052 | mkdir -p $target_dir |
1053 | 1053 | mkdir -p $target_dir/fpu |
1054 | +mkdir -p $target_dir/tcg | |
1054 | 1055 | if test "$target" = "arm-linux-user" -o "$target" = "armeb-linux-user" ; then |
1055 | 1056 | mkdir -p $target_dir/nwfpe |
1056 | 1057 | fi | ... | ... |
cpu-all.h
... | ... | @@ -1048,6 +1048,18 @@ extern int64_t kqemu_ret_int_count; |
1048 | 1048 | extern int64_t kqemu_ret_excp_count; |
1049 | 1049 | extern int64_t kqemu_ret_intr_count; |
1050 | 1050 | |
1051 | +extern int64_t dyngen_tb_count1; | |
1052 | +extern int64_t dyngen_tb_count; | |
1053 | +extern int64_t dyngen_op_count; | |
1054 | +extern int64_t dyngen_old_op_count; | |
1055 | +extern int64_t dyngen_tcg_del_op_count; | |
1056 | +extern int dyngen_op_count_max; | |
1057 | +extern int64_t dyngen_code_in_len; | |
1058 | +extern int64_t dyngen_code_out_len; | |
1059 | +extern int64_t dyngen_interm_time; | |
1060 | +extern int64_t dyngen_code_time; | |
1061 | +extern int64_t dyngen_restore_count; | |
1062 | +extern int64_t dyngen_restore_time; | |
1051 | 1063 | #endif |
1052 | 1064 | |
1053 | 1065 | #endif /* CPU_ALL_H */ | ... | ... |
cpu-defs.h
... | ... | @@ -145,6 +145,7 @@ typedef struct CPUTLBEntry { |
145 | 145 | /* The meaning of the MMU modes is defined in the target code. */ \ |
146 | 146 | CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ |
147 | 147 | struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ |
148 | + long temp_buf[128]; /* buffer for temporaries in the code generator */ \ | |
148 | 149 | \ |
149 | 150 | /* from this point: preserved by CPU reset */ \ |
150 | 151 | /* ice debug support */ \ | ... | ... |
cpu-exec.c
... | ... | @@ -354,7 +354,7 @@ int cpu_exec(CPUState *env1) |
354 | 354 | #endif |
355 | 355 | #endif |
356 | 356 | int ret, interrupt_request; |
357 | - void (*gen_func)(void); | |
357 | + long (*gen_func)(void); | |
358 | 358 | TranslationBlock *tb; |
359 | 359 | uint8_t *tc_ptr; |
360 | 360 | |
... | ... | @@ -736,7 +736,7 @@ int cpu_exec(CPUState *env1) |
736 | 736 | fp.gp = code_gen_buffer + 2 * (1 << 20); |
737 | 737 | (*(void (*)(void)) &fp)(); |
738 | 738 | #else |
739 | - gen_func(); | |
739 | + T0 = gen_func(); | |
740 | 740 | #endif |
741 | 741 | env->current_tb = NULL; |
742 | 742 | /* reset soft MMU for next block (it can currently | ... | ... |
dyngen-op.h deleted
100644 โ 0
dyngen.c
... | ... | @@ -1212,14 +1212,16 @@ int load_object(const char *filename) |
1212 | 1212 | |
1213 | 1213 | #endif /* CONFIG_FORMAT_MACH */ |
1214 | 1214 | |
1215 | -void get_reloc_expr(char *name, int name_size, const char *sym_name) | |
1215 | +/* return true if the expression is a label reference */ | |
1216 | +int get_reloc_expr(char *name, int name_size, const char *sym_name) | |
1216 | 1217 | { |
1217 | 1218 | const char *p; |
1218 | 1219 | |
1219 | 1220 | if (strstart(sym_name, "__op_param", &p)) { |
1220 | 1221 | snprintf(name, name_size, "param%s", p); |
1221 | 1222 | } else if (strstart(sym_name, "__op_gen_label", &p)) { |
1222 | - snprintf(name, name_size, "gen_labels[param%s]", p); | |
1223 | + snprintf(name, name_size, "param%s", p); | |
1224 | + return 1; | |
1223 | 1225 | } else { |
1224 | 1226 | #ifdef HOST_SPARC |
1225 | 1227 | if (sym_name[0] == '.') |
... | ... | @@ -1230,6 +1232,7 @@ void get_reloc_expr(char *name, int name_size, const char *sym_name) |
1230 | 1232 | #endif |
1231 | 1233 | snprintf(name, name_size, "(long)(&%s)", sym_name); |
1232 | 1234 | } |
1235 | + return 0; | |
1233 | 1236 | } |
1234 | 1237 | |
1235 | 1238 | #ifdef HOST_IA64 |
... | ... | @@ -1846,7 +1849,7 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, |
1846 | 1849 | #if defined(HOST_I386) |
1847 | 1850 | { |
1848 | 1851 | char relname[256]; |
1849 | - int type; | |
1852 | + int type, is_label; | |
1850 | 1853 | int addend; |
1851 | 1854 | int reloc_offset; |
1852 | 1855 | for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { |
... | ... | @@ -1868,21 +1871,33 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, |
1868 | 1871 | continue; |
1869 | 1872 | } |
1870 | 1873 | |
1871 | - get_reloc_expr(relname, sizeof(relname), sym_name); | |
1874 | + is_label = get_reloc_expr(relname, sizeof(relname), sym_name); | |
1872 | 1875 | addend = get32((uint32_t *)(text + rel->r_offset)); |
1873 | 1876 | #ifdef CONFIG_FORMAT_ELF |
1874 | 1877 | type = ELF32_R_TYPE(rel->r_info); |
1875 | - switch(type) { | |
1876 | - case R_386_32: | |
1877 | - fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s + %d;\n", | |
1878 | - reloc_offset, relname, addend); | |
1879 | - break; | |
1880 | - case R_386_PC32: | |
1881 | - fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s - (long)(gen_code_ptr + %d) + %d;\n", | |
1882 | - reloc_offset, relname, reloc_offset, addend); | |
1883 | - break; | |
1884 | - default: | |
1885 | - error("unsupported i386 relocation (%d)", type); | |
1878 | + if (is_label) { | |
1879 | + switch(type) { | |
1880 | + case R_386_32: | |
1881 | + case R_386_PC32: | |
1882 | + fprintf(outfile, " tcg_out_reloc(s, gen_code_ptr + %d, %d, %s, %d);\n", | |
1883 | + reloc_offset, type, relname, addend); | |
1884 | + break; | |
1885 | + default: | |
1886 | + error("unsupported i386 relocation (%d)", type); | |
1887 | + } | |
1888 | + } else { | |
1889 | + switch(type) { | |
1890 | + case R_386_32: | |
1891 | + fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s + %d;\n", | |
1892 | + reloc_offset, relname, addend); | |
1893 | + break; | |
1894 | + case R_386_PC32: | |
1895 | + fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s - (long)(gen_code_ptr + %d) + %d;\n", | |
1896 | + reloc_offset, relname, reloc_offset, addend); | |
1897 | + break; | |
1898 | + default: | |
1899 | + error("unsupported i386 relocation (%d)", type); | |
1900 | + } | |
1886 | 1901 | } |
1887 | 1902 | #elif defined(CONFIG_FORMAT_COFF) |
1888 | 1903 | { |
... | ... | @@ -1920,32 +1935,45 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, |
1920 | 1935 | #elif defined(HOST_X86_64) |
1921 | 1936 | { |
1922 | 1937 | char relname[256]; |
1923 | - int type; | |
1938 | + int type, is_label; | |
1924 | 1939 | int addend; |
1925 | 1940 | int reloc_offset; |
1926 | 1941 | for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { |
1927 | 1942 | if (rel->r_offset >= start_offset && |
1928 | 1943 | rel->r_offset < start_offset + copy_size) { |
1929 | 1944 | sym_name = strtab + symtab[ELFW(R_SYM)(rel->r_info)].st_name; |
1930 | - get_reloc_expr(relname, sizeof(relname), sym_name); | |
1945 | + is_label = get_reloc_expr(relname, sizeof(relname), sym_name); | |
1931 | 1946 | type = ELF32_R_TYPE(rel->r_info); |
1932 | 1947 | addend = rel->r_addend; |
1933 | 1948 | reloc_offset = rel->r_offset - start_offset; |
1934 | - switch(type) { | |
1935 | - case R_X86_64_32: | |
1936 | - fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = (uint32_t)%s + %d;\n", | |
1937 | - reloc_offset, relname, addend); | |
1938 | - break; | |
1939 | - case R_X86_64_32S: | |
1940 | - fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = (int32_t)%s + %d;\n", | |
1941 | - reloc_offset, relname, addend); | |
1942 | - break; | |
1943 | - case R_X86_64_PC32: | |
1944 | - fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s - (long)(gen_code_ptr + %d) + %d;\n", | |
1945 | - reloc_offset, relname, reloc_offset, addend); | |
1946 | - break; | |
1947 | - default: | |
1948 | - error("unsupported X86_64 relocation (%d)", type); | |
1949 | + if (is_label) { | |
1950 | + switch(type) { | |
1951 | + case R_X86_64_32: | |
1952 | + case R_X86_64_32S: | |
1953 | + case R_X86_64_PC32: | |
1954 | + fprintf(outfile, " tcg_out_reloc(s, gen_code_ptr + %d, %d, %s, %d);\n", | |
1955 | + reloc_offset, type, relname, addend); | |
1956 | + break; | |
1957 | + default: | |
1958 | + error("unsupported X86_64 relocation (%d)", type); | |
1959 | + } | |
1960 | + } else { | |
1961 | + switch(type) { | |
1962 | + case R_X86_64_32: | |
1963 | + fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = (uint32_t)%s + %d;\n", | |
1964 | + reloc_offset, relname, addend); | |
1965 | + break; | |
1966 | + case R_X86_64_32S: | |
1967 | + fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = (int32_t)%s + %d;\n", | |
1968 | + reloc_offset, relname, addend); | |
1969 | + break; | |
1970 | + case R_X86_64_PC32: | |
1971 | + fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s - (long)(gen_code_ptr + %d) + %d;\n", | |
1972 | + reloc_offset, relname, reloc_offset, addend); | |
1973 | + break; | |
1974 | + default: | |
1975 | + error("unsupported X86_64 relocation (%d)", type); | |
1976 | + } | |
1949 | 1977 | } |
1950 | 1978 | } |
1951 | 1979 | } |
... | ... | @@ -2639,11 +2667,6 @@ int gen_file(FILE *outfile, int out_type) |
2639 | 2667 | EXE_SYM *sym; |
2640 | 2668 | |
2641 | 2669 | if (out_type == OUT_INDEX_OP) { |
2642 | - fprintf(outfile, "DEF(end, 0, 0)\n"); | |
2643 | - fprintf(outfile, "DEF(nop, 0, 0)\n"); | |
2644 | - fprintf(outfile, "DEF(nop1, 1, 0)\n"); | |
2645 | - fprintf(outfile, "DEF(nop2, 2, 0)\n"); | |
2646 | - fprintf(outfile, "DEF(nop3, 3, 0)\n"); | |
2647 | 2670 | for(i = 0, sym = symtab; i < nb_syms; i++, sym++) { |
2648 | 2671 | const char *name; |
2649 | 2672 | name = get_sym_name(sym); |
... | ... | @@ -2653,7 +2676,6 @@ int gen_file(FILE *outfile, int out_type) |
2653 | 2676 | } |
2654 | 2677 | } else if (out_type == OUT_GEN_OP) { |
2655 | 2678 | /* generate gen_xxx functions */ |
2656 | - fprintf(outfile, "#include \"dyngen-op.h\"\n"); | |
2657 | 2679 | for(i = 0, sym = symtab; i < nb_syms; i++, sym++) { |
2658 | 2680 | const char *name; |
2659 | 2681 | name = get_sym_name(sym); |
... | ... | @@ -2670,6 +2692,7 @@ int gen_file(FILE *outfile, int out_type) |
2670 | 2692 | /* generate big code generation switch */ |
2671 | 2693 | |
2672 | 2694 | #ifdef HOST_ARM |
2695 | +#error broken | |
2673 | 2696 | /* We need to know the size of all the ops so we can figure out when |
2674 | 2697 | to emit constant pools. This must be consistent with opc.h. */ |
2675 | 2698 | fprintf(outfile, |
... | ... | @@ -2690,16 +2713,8 @@ fprintf(outfile, |
2690 | 2713 | "};\n"); |
2691 | 2714 | #endif |
2692 | 2715 | |
2693 | -fprintf(outfile, | |
2694 | -"int dyngen_code(uint8_t *gen_code_buf,\n" | |
2695 | -" uint16_t *label_offsets, uint16_t *jmp_offsets,\n" | |
2696 | -" const uint16_t *opc_buf, const uint32_t *opparam_buf, const long *gen_labels)\n" | |
2697 | -"{\n" | |
2698 | -" uint8_t *gen_code_ptr;\n" | |
2699 | -" const uint16_t *opc_ptr;\n" | |
2700 | -" const uint32_t *opparam_ptr;\n"); | |
2701 | - | |
2702 | 2716 | #ifdef HOST_ARM |
2717 | +#error broken | |
2703 | 2718 | /* Arm is tricky because it uses constant pools for loading immediate values. |
2704 | 2719 | We assume (and require) each function is code followed by a constant pool. |
2705 | 2720 | All the ops are small so this should be ok. For each op we figure |
... | ... | @@ -2732,6 +2747,7 @@ fprintf(outfile, |
2732 | 2747 | " uint8_t *arm_pool_ptr = gen_code_buf + 0x1000000;\n"); |
2733 | 2748 | #endif |
2734 | 2749 | #ifdef HOST_IA64 |
2750 | +#error broken | |
2735 | 2751 | { |
2736 | 2752 | long addend, not_first = 0; |
2737 | 2753 | unsigned long sym_idx; |
... | ... | @@ -2789,18 +2805,8 @@ fprintf(outfile, |
2789 | 2805 | } |
2790 | 2806 | #endif |
2791 | 2807 | |
2792 | -fprintf(outfile, | |
2793 | -"\n" | |
2794 | -" gen_code_ptr = gen_code_buf;\n" | |
2795 | -" opc_ptr = opc_buf;\n" | |
2796 | -" opparam_ptr = opparam_buf;\n"); | |
2797 | - | |
2798 | - /* Generate prologue, if needed. */ | |
2799 | - | |
2800 | -fprintf(outfile, | |
2801 | -" for(;;) {\n"); | |
2802 | - | |
2803 | 2808 | #ifdef HOST_ARM |
2809 | +#error broken | |
2804 | 2810 | /* Generate constant pool if needed */ |
2805 | 2811 | fprintf(outfile, |
2806 | 2812 | " if (gen_code_ptr + arm_opc_size[*opc_ptr] >= arm_pool_ptr) {\n" |
... | ... | @@ -2813,9 +2819,6 @@ fprintf(outfile, |
2813 | 2819 | " }\n"); |
2814 | 2820 | #endif |
2815 | 2821 | |
2816 | -fprintf(outfile, | |
2817 | -" switch(*opc_ptr++) {\n"); | |
2818 | - | |
2819 | 2822 | for(i = 0, sym = symtab; i < nb_syms; i++, sym++) { |
2820 | 2823 | const char *name; |
2821 | 2824 | name = get_sym_name(sym); |
... | ... | @@ -2831,51 +2834,6 @@ fprintf(outfile, |
2831 | 2834 | gen_code(name, sym->st_value, sym->st_size, outfile, 1); |
2832 | 2835 | } |
2833 | 2836 | } |
2834 | - | |
2835 | -fprintf(outfile, | |
2836 | -" case INDEX_op_nop:\n" | |
2837 | -" break;\n" | |
2838 | -" case INDEX_op_nop1:\n" | |
2839 | -" opparam_ptr++;\n" | |
2840 | -" break;\n" | |
2841 | -" case INDEX_op_nop2:\n" | |
2842 | -" opparam_ptr += 2;\n" | |
2843 | -" break;\n" | |
2844 | -" case INDEX_op_nop3:\n" | |
2845 | -" opparam_ptr += 3;\n" | |
2846 | -" break;\n" | |
2847 | -" default:\n" | |
2848 | -" goto the_end;\n" | |
2849 | -" }\n"); | |
2850 | - | |
2851 | - | |
2852 | -fprintf(outfile, | |
2853 | -" }\n" | |
2854 | -" the_end:\n" | |
2855 | -); | |
2856 | -#ifdef HOST_IA64 | |
2857 | - fprintf(outfile, | |
2858 | - " {\n" | |
2859 | - " extern char code_gen_buffer[];\n" | |
2860 | - " ia64_apply_fixes(&gen_code_ptr, ltoff_fixes, " | |
2861 | - "(uint64_t) code_gen_buffer + 2*(1<<20), plt_fixes,\n\t\t\t" | |
2862 | - "sizeof(plt_target)/sizeof(plt_target[0]),\n\t\t\t" | |
2863 | - "plt_target, plt_offset);\n }\n"); | |
2864 | -#endif | |
2865 | - | |
2866 | -/* generate some code patching */ | |
2867 | -#ifdef HOST_ARM | |
2868 | -fprintf(outfile, | |
2869 | -"if (arm_data_ptr != arm_data_table + ARM_LDR_TABLE_SIZE)\n" | |
2870 | -" gen_code_ptr = arm_flush_ldr(gen_code_ptr, arm_ldr_table, " | |
2871 | -"arm_ldr_ptr, arm_data_ptr, arm_data_table + ARM_LDR_TABLE_SIZE, 0);\n"); | |
2872 | -#endif | |
2873 | - /* flush instruction cache */ | |
2874 | - fprintf(outfile, "flush_icache_range((unsigned long)gen_code_buf, (unsigned long)gen_code_ptr);\n"); | |
2875 | - | |
2876 | - fprintf(outfile, "return gen_code_ptr - gen_code_buf;\n"); | |
2877 | - fprintf(outfile, "}\n\n"); | |
2878 | - | |
2879 | 2837 | } |
2880 | 2838 | |
2881 | 2839 | return 0; | ... | ... |
dyngen.h deleted
100644 โ 0
1 | -/* | |
2 | - * dyngen helpers | |
3 | - * | |
4 | - * Copyright (c) 2003 Fabrice Bellard | |
5 | - * | |
6 | - * This library is free software; you can redistribute it and/or | |
7 | - * modify it under the terms of the GNU Lesser General Public | |
8 | - * License as published by the Free Software Foundation; either | |
9 | - * version 2 of the License, or (at your option) any later version. | |
10 | - * | |
11 | - * This library is distributed in the hope that it will be useful, | |
12 | - * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | - * Lesser General Public License for more details. | |
15 | - * | |
16 | - * You should have received a copy of the GNU Lesser General Public | |
17 | - * License along with this library; if not, write to the Free Software | |
18 | - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | - */ | |
20 | - | |
21 | -int __op_param1, __op_param2, __op_param3; | |
22 | -#if defined(__sparc__) || defined(__arm__) | |
23 | - void __op_gen_label1(){} | |
24 | - void __op_gen_label2(){} | |
25 | - void __op_gen_label3(){} | |
26 | -#else | |
27 | - int __op_gen_label1, __op_gen_label2, __op_gen_label3; | |
28 | -#endif | |
29 | -int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3; | |
30 | - | |
31 | -#if defined(__i386__) || defined(__x86_64__) || defined(__s390__) | |
32 | -static inline void flush_icache_range(unsigned long start, unsigned long stop) | |
33 | -{ | |
34 | -} | |
35 | -#elif defined(__ia64__) | |
36 | -static inline void flush_icache_range(unsigned long start, unsigned long stop) | |
37 | -{ | |
38 | - while (start < stop) { | |
39 | - asm volatile ("fc %0" :: "r"(start)); | |
40 | - start += 32; | |
41 | - } | |
42 | - asm volatile (";;sync.i;;srlz.i;;"); | |
43 | -} | |
44 | -#elif defined(__powerpc__) | |
45 | - | |
46 | -#define MIN_CACHE_LINE_SIZE 8 /* conservative value */ | |
47 | - | |
48 | -static inline void flush_icache_range(unsigned long start, unsigned long stop) | |
49 | -{ | |
50 | - unsigned long p; | |
51 | - | |
52 | - start &= ~(MIN_CACHE_LINE_SIZE - 1); | |
53 | - stop = (stop + MIN_CACHE_LINE_SIZE - 1) & ~(MIN_CACHE_LINE_SIZE - 1); | |
54 | - | |
55 | - for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) { | |
56 | - asm volatile ("dcbst 0,%0" : : "r"(p) : "memory"); | |
57 | - } | |
58 | - asm volatile ("sync" : : : "memory"); | |
59 | - for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) { | |
60 | - asm volatile ("icbi 0,%0" : : "r"(p) : "memory"); | |
61 | - } | |
62 | - asm volatile ("sync" : : : "memory"); | |
63 | - asm volatile ("isync" : : : "memory"); | |
64 | -} | |
65 | -#elif defined(__alpha__) | |
66 | -static inline void flush_icache_range(unsigned long start, unsigned long stop) | |
67 | -{ | |
68 | - asm ("imb"); | |
69 | -} | |
70 | -#elif defined(__sparc__) | |
71 | -static inline void flush_icache_range(unsigned long start, unsigned long stop) | |
72 | -{ | |
73 | - unsigned long p; | |
74 | - | |
75 | - p = start & ~(8UL - 1UL); | |
76 | - stop = (stop + (8UL - 1UL)) & ~(8UL - 1UL); | |
77 | - | |
78 | - for (; p < stop; p += 8) | |
79 | - __asm__ __volatile__("flush\t%0" : : "r" (p)); | |
80 | -} | |
81 | -#elif defined(__arm__) | |
82 | -static inline void flush_icache_range(unsigned long start, unsigned long stop) | |
83 | -{ | |
84 | - register unsigned long _beg __asm ("a1") = start; | |
85 | - register unsigned long _end __asm ("a2") = stop; | |
86 | - register unsigned long _flg __asm ("a3") = 0; | |
87 | - __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); | |
88 | -} | |
89 | -#elif defined(__mc68000) | |
90 | - | |
91 | -# include <asm/cachectl.h> | |
92 | -static inline void flush_icache_range(unsigned long start, unsigned long stop) | |
93 | -{ | |
94 | - cacheflush(start,FLUSH_SCOPE_LINE,FLUSH_CACHE_BOTH,stop-start+16); | |
95 | -} | |
96 | -#elif defined(__mips__) | |
97 | - | |
98 | -#include <sys/cachectl.h> | |
99 | -static inline void flush_icache_range(unsigned long start, unsigned long stop) | |
100 | -{ | |
101 | - _flush_cache ((void *)start, stop - start, BCACHE); | |
102 | -} | |
103 | -#else | |
104 | -#error unsupported CPU | |
105 | -#endif | |
106 | - | |
107 | -#ifdef __alpha__ | |
108 | - | |
109 | -register int gp asm("$29"); | |
110 | - | |
111 | -static inline void immediate_ldah(void *p, int val) { | |
112 | - uint32_t *dest = p; | |
113 | - long high = ((val >> 16) + ((val >> 15) & 1)) & 0xffff; | |
114 | - | |
115 | - *dest &= ~0xffff; | |
116 | - *dest |= high; | |
117 | - *dest |= 31 << 16; | |
118 | -} | |
119 | -static inline void immediate_lda(void *dest, int val) { | |
120 | - *(uint16_t *) dest = val; | |
121 | -} | |
122 | -void fix_bsr(void *p, int offset) { | |
123 | - uint32_t *dest = p; | |
124 | - *dest &= ~((1 << 21) - 1); | |
125 | - *dest |= (offset >> 2) & ((1 << 21) - 1); | |
126 | -} | |
127 | - | |
128 | -#endif /* __alpha__ */ | |
129 | - | |
130 | -#ifdef __arm__ | |
131 | - | |
132 | -#define ARM_LDR_TABLE_SIZE 1024 | |
133 | - | |
134 | -typedef struct LDREntry { | |
135 | - uint8_t *ptr; | |
136 | - uint32_t *data_ptr; | |
137 | - unsigned type:2; | |
138 | -} LDREntry; | |
139 | - | |
140 | -static LDREntry arm_ldr_table[1024]; | |
141 | -static uint32_t arm_data_table[ARM_LDR_TABLE_SIZE]; | |
142 | - | |
143 | -extern char exec_loop; | |
144 | - | |
145 | -static inline void arm_reloc_pc24(uint32_t *ptr, uint32_t insn, int val) | |
146 | -{ | |
147 | - *ptr = (insn & ~0xffffff) | ((insn + ((val - (int)ptr) >> 2)) & 0xffffff); | |
148 | -} | |
149 | - | |
150 | -static uint8_t *arm_flush_ldr(uint8_t *gen_code_ptr, | |
151 | - LDREntry *ldr_start, LDREntry *ldr_end, | |
152 | - uint32_t *data_start, uint32_t *data_end, | |
153 | - int gen_jmp) | |
154 | -{ | |
155 | - LDREntry *le; | |
156 | - uint32_t *ptr; | |
157 | - int offset, data_size, target; | |
158 | - uint8_t *data_ptr; | |
159 | - uint32_t insn; | |
160 | - uint32_t mask; | |
161 | - | |
162 | - data_size = (data_end - data_start) << 2; | |
163 | - | |
164 | - if (gen_jmp) { | |
165 | - /* generate branch to skip the data */ | |
166 | - if (data_size == 0) | |
167 | - return gen_code_ptr; | |
168 | - target = (long)gen_code_ptr + data_size + 4; | |
169 | - arm_reloc_pc24((uint32_t *)gen_code_ptr, 0xeafffffe, target); | |
170 | - gen_code_ptr += 4; | |
171 | - } | |
172 | - | |
173 | - /* copy the data */ | |
174 | - data_ptr = gen_code_ptr; | |
175 | - memcpy(gen_code_ptr, data_start, data_size); | |
176 | - gen_code_ptr += data_size; | |
177 | - | |
178 | - /* patch the ldr to point to the data */ | |
179 | - for(le = ldr_start; le < ldr_end; le++) { | |
180 | - ptr = (uint32_t *)le->ptr; | |
181 | - offset = ((unsigned long)(le->data_ptr) - (unsigned long)data_start) + | |
182 | - (unsigned long)data_ptr - | |
183 | - (unsigned long)ptr - 8; | |
184 | - if (offset < 0) { | |
185 | - fprintf(stderr, "Negative constant pool offset\n"); | |
186 | - abort(); | |
187 | - } | |
188 | - switch (le->type) { | |
189 | - case 0: /* ldr */ | |
190 | - mask = ~0x00800fff; | |
191 | - if (offset >= 4096) { | |
192 | - fprintf(stderr, "Bad ldr offset\n"); | |
193 | - abort(); | |
194 | - } | |
195 | - break; | |
196 | - case 1: /* ldc */ | |
197 | - mask = ~0x008000ff; | |
198 | - if (offset >= 1024 ) { | |
199 | - fprintf(stderr, "Bad ldc offset\n"); | |
200 | - abort(); | |
201 | - } | |
202 | - break; | |
203 | - case 2: /* add */ | |
204 | - mask = ~0xfff; | |
205 | - if (offset >= 1024 ) { | |
206 | - fprintf(stderr, "Bad add offset\n"); | |
207 | - abort(); | |
208 | - } | |
209 | - break; | |
210 | - default: | |
211 | - fprintf(stderr, "Bad pc relative fixup\n"); | |
212 | - abort(); | |
213 | - } | |
214 | - insn = *ptr & mask; | |
215 | - switch (le->type) { | |
216 | - case 0: /* ldr */ | |
217 | - insn |= offset | 0x00800000; | |
218 | - break; | |
219 | - case 1: /* ldc */ | |
220 | - insn |= (offset >> 2) | 0x00800000; | |
221 | - break; | |
222 | - case 2: /* add */ | |
223 | - insn |= (offset >> 2) | 0xf00; | |
224 | - break; | |
225 | - } | |
226 | - *ptr = insn; | |
227 | - } | |
228 | - return gen_code_ptr; | |
229 | -} | |
230 | - | |
231 | -#endif /* __arm__ */ | |
232 | - | |
233 | -#ifdef __ia64 | |
234 | - | |
235 | -/* Patch instruction with "val" where "mask" has 1 bits. */ | |
236 | -static inline void ia64_patch (uint64_t insn_addr, uint64_t mask, uint64_t val) | |
237 | -{ | |
238 | - uint64_t m0, m1, v0, v1, b0, b1, *b = (uint64_t *) (insn_addr & -16); | |
239 | -# define insn_mask ((1UL << 41) - 1) | |
240 | - unsigned long shift; | |
241 | - | |
242 | - b0 = b[0]; b1 = b[1]; | |
243 | - shift = 5 + 41 * (insn_addr % 16); /* 5 template, 3 x 41-bit insns */ | |
244 | - if (shift >= 64) { | |
245 | - m1 = mask << (shift - 64); | |
246 | - v1 = val << (shift - 64); | |
247 | - } else { | |
248 | - m0 = mask << shift; m1 = mask >> (64 - shift); | |
249 | - v0 = val << shift; v1 = val >> (64 - shift); | |
250 | - b[0] = (b0 & ~m0) | (v0 & m0); | |
251 | - } | |
252 | - b[1] = (b1 & ~m1) | (v1 & m1); | |
253 | -} | |
254 | - | |
255 | -static inline void ia64_patch_imm60 (uint64_t insn_addr, uint64_t val) | |
256 | -{ | |
257 | - ia64_patch(insn_addr, | |
258 | - 0x011ffffe000UL, | |
259 | - ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */ | |
260 | - | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */)); | |
261 | - ia64_patch(insn_addr - 1, 0x1fffffffffcUL, val >> 18); | |
262 | -} | |
263 | - | |
264 | -static inline void ia64_imm64 (void *insn, uint64_t val) | |
265 | -{ | |
266 | - /* Ignore the slot number of the relocation; GCC and Intel | |
267 | - toolchains differed for some time on whether IMM64 relocs are | |
268 | - against slot 1 (Intel) or slot 2 (GCC). */ | |
269 | - uint64_t insn_addr = (uint64_t) insn & ~3UL; | |
270 | - | |
271 | - ia64_patch(insn_addr + 2, | |
272 | - 0x01fffefe000UL, | |
273 | - ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */ | |
274 | - | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */ | |
275 | - | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */ | |
276 | - | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */ | |
277 | - | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */) | |
278 | - ); | |
279 | - ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22); | |
280 | -} | |
281 | - | |
282 | -static inline void ia64_imm60b (void *insn, uint64_t val) | |
283 | -{ | |
284 | - /* Ignore the slot number of the relocation; GCC and Intel | |
285 | - toolchains differed for some time on whether IMM64 relocs are | |
286 | - against slot 1 (Intel) or slot 2 (GCC). */ | |
287 | - uint64_t insn_addr = (uint64_t) insn & ~3UL; | |
288 | - | |
289 | - if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) | |
290 | - fprintf(stderr, "%s: value %ld out of IMM60 range\n", | |
291 | - __FUNCTION__, (int64_t) val); | |
292 | - ia64_patch_imm60(insn_addr + 2, val); | |
293 | -} | |
294 | - | |
295 | -static inline void ia64_imm22 (void *insn, uint64_t val) | |
296 | -{ | |
297 | - if (val + (1 << 21) >= (1 << 22)) | |
298 | - fprintf(stderr, "%s: value %li out of IMM22 range\n", | |
299 | - __FUNCTION__, (int64_t)val); | |
300 | - ia64_patch((uint64_t) insn, 0x01fffcfe000UL, | |
301 | - ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ | |
302 | - | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */ | |
303 | - | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */ | |
304 | - | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */)); | |
305 | -} | |
306 | - | |
307 | -/* Like ia64_imm22(), but also clear bits 20-21. For addl, this has | |
308 | - the effect of turning "addl rX=imm22,rY" into "addl | |
309 | - rX=imm22,r0". */ | |
310 | -static inline void ia64_imm22_r0 (void *insn, uint64_t val) | |
311 | -{ | |
312 | - if (val + (1 << 21) >= (1 << 22)) | |
313 | - fprintf(stderr, "%s: value %li out of IMM22 range\n", | |
314 | - __FUNCTION__, (int64_t)val); | |
315 | - ia64_patch((uint64_t) insn, 0x01fffcfe000UL | (0x3UL << 20), | |
316 | - ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ | |
317 | - | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */ | |
318 | - | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */ | |
319 | - | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */)); | |
320 | -} | |
321 | - | |
322 | -static inline void ia64_imm21b (void *insn, uint64_t val) | |
323 | -{ | |
324 | - if (val + (1 << 20) >= (1 << 21)) | |
325 | - fprintf(stderr, "%s: value %li out of IMM21b range\n", | |
326 | - __FUNCTION__, (int64_t)val); | |
327 | - ia64_patch((uint64_t) insn, 0x11ffffe000UL, | |
328 | - ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */ | |
329 | - | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */)); | |
330 | -} | |
331 | - | |
332 | -static inline void ia64_nop_b (void *insn) | |
333 | -{ | |
334 | - ia64_patch((uint64_t) insn, (1UL << 41) - 1, 2UL << 37); | |
335 | -} | |
336 | - | |
337 | -static inline void ia64_ldxmov(void *insn, uint64_t val) | |
338 | -{ | |
339 | - if (val + (1 << 21) < (1 << 22)) | |
340 | - ia64_patch((uint64_t) insn, 0x1fff80fe000UL, 8UL << 37); | |
341 | -} | |
342 | - | |
343 | -static inline int ia64_patch_ltoff(void *insn, uint64_t val, | |
344 | - int relaxable) | |
345 | -{ | |
346 | - if (relaxable && (val + (1 << 21) < (1 << 22))) { | |
347 | - ia64_imm22_r0(insn, val); | |
348 | - return 0; | |
349 | - } | |
350 | - return 1; | |
351 | -} | |
352 | - | |
353 | -struct ia64_fixup { | |
354 | - struct ia64_fixup *next; | |
355 | - void *addr; /* address that needs to be patched */ | |
356 | - long value; | |
357 | -}; | |
358 | - | |
359 | -#define IA64_PLT(insn, plt_index) \ | |
360 | -do { \ | |
361 | - struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \ | |
362 | - fixup->next = plt_fixes; \ | |
363 | - plt_fixes = fixup; \ | |
364 | - fixup->addr = (insn); \ | |
365 | - fixup->value = (plt_index); \ | |
366 | - plt_offset[(plt_index)] = 1; \ | |
367 | -} while (0) | |
368 | - | |
369 | -#define IA64_LTOFF(insn, val, relaxable) \ | |
370 | -do { \ | |
371 | - if (ia64_patch_ltoff(insn, val, relaxable)) { \ | |
372 | - struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \ | |
373 | - fixup->next = ltoff_fixes; \ | |
374 | - ltoff_fixes = fixup; \ | |
375 | - fixup->addr = (insn); \ | |
376 | - fixup->value = (val); \ | |
377 | - } \ | |
378 | -} while (0) | |
379 | - | |
380 | -static inline void ia64_apply_fixes (uint8_t **gen_code_pp, | |
381 | - struct ia64_fixup *ltoff_fixes, | |
382 | - uint64_t gp, | |
383 | - struct ia64_fixup *plt_fixes, | |
384 | - int num_plts, | |
385 | - unsigned long *plt_target, | |
386 | - unsigned int *plt_offset) | |
387 | -{ | |
388 | - static const uint8_t plt_bundle[] = { | |
389 | - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; movl r1=GP */ | |
390 | - 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x60, | |
391 | - | |
392 | - 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; brl IP */ | |
393 | - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0 | |
394 | - }; | |
395 | - uint8_t *gen_code_ptr = *gen_code_pp, *plt_start, *got_start; | |
396 | - uint64_t *vp; | |
397 | - struct ia64_fixup *fixup; | |
398 | - unsigned int offset = 0; | |
399 | - struct fdesc { | |
400 | - long ip; | |
401 | - long gp; | |
402 | - } *fdesc; | |
403 | - int i; | |
404 | - | |
405 | - if (plt_fixes) { | |
406 | - plt_start = gen_code_ptr; | |
407 | - | |
408 | - for (i = 0; i < num_plts; ++i) { | |
409 | - if (plt_offset[i]) { | |
410 | - plt_offset[i] = offset; | |
411 | - offset += sizeof(plt_bundle); | |
412 | - | |
413 | - fdesc = (struct fdesc *) plt_target[i]; | |
414 | - memcpy(gen_code_ptr, plt_bundle, sizeof(plt_bundle)); | |
415 | - ia64_imm64 (gen_code_ptr + 0x02, fdesc->gp); | |
416 | - ia64_imm60b(gen_code_ptr + 0x12, | |
417 | - (fdesc->ip - (long) (gen_code_ptr + 0x10)) >> 4); | |
418 | - gen_code_ptr += sizeof(plt_bundle); | |
419 | - } | |
420 | - } | |
421 | - | |
422 | - for (fixup = plt_fixes; fixup; fixup = fixup->next) | |
423 | - ia64_imm21b(fixup->addr, | |
424 | - ((long) plt_start + plt_offset[fixup->value] | |
425 | - - ((long) fixup->addr & ~0xf)) >> 4); | |
426 | - } | |
427 | - | |
428 | - got_start = gen_code_ptr; | |
429 | - | |
430 | - /* First, create the GOT: */ | |
431 | - for (fixup = ltoff_fixes; fixup; fixup = fixup->next) { | |
432 | - /* first check if we already have this value in the GOT: */ | |
433 | - for (vp = (uint64_t *) got_start; vp < (uint64_t *) gen_code_ptr; ++vp) | |
434 | - if (*vp == fixup->value) | |
435 | - break; | |
436 | - if (vp == (uint64_t *) gen_code_ptr) { | |
437 | - /* Nope, we need to put the value in the GOT: */ | |
438 | - *vp = fixup->value; | |
439 | - gen_code_ptr += 8; | |
440 | - } | |
441 | - ia64_imm22(fixup->addr, (long) vp - gp); | |
442 | - } | |
443 | - /* Keep code ptr aligned. */ | |
444 | - if ((long) gen_code_ptr & 15) | |
445 | - gen_code_ptr += 8; | |
446 | - *gen_code_pp = gen_code_ptr; | |
447 | -} | |
448 | - | |
449 | -#endif |
exec-all.h
... | ... | @@ -36,10 +36,6 @@ struct TranslationBlock; |
36 | 36 | |
37 | 37 | #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3) |
38 | 38 | |
39 | -extern uint16_t gen_opc_buf[OPC_BUF_SIZE]; | |
40 | -extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE]; | |
41 | -extern long gen_labels[OPC_BUF_SIZE]; | |
42 | -extern int nb_gen_labels; | |
43 | 39 | extern target_ulong gen_opc_pc[OPC_BUF_SIZE]; |
44 | 40 | extern target_ulong gen_opc_npc[OPC_BUF_SIZE]; |
45 | 41 | extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE]; |
... | ... | @@ -63,8 +59,8 @@ extern int loglevel; |
63 | 59 | |
64 | 60 | int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb); |
65 | 61 | int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb); |
66 | -void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf); | |
67 | 62 | unsigned long code_gen_max_block_size(void); |
63 | +void cpu_gen_init(void); | |
68 | 64 | int cpu_gen_code(CPUState *env, struct TranslationBlock *tb, |
69 | 65 | int *gen_code_size_ptr); |
70 | 66 | int cpu_restore_state(struct TranslationBlock *tb, |
... | ... | @@ -120,6 +116,7 @@ static inline int tlb_set_page(CPUState *env, target_ulong vaddr, |
120 | 116 | #elif defined(__powerpc__) |
121 | 117 | #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024) |
122 | 118 | #else |
119 | +/* XXX: make it dynamic on x86 */ | |
123 | 120 | #define CODE_GEN_BUFFER_SIZE (16 * 1024 * 1024) |
124 | 121 | #endif |
125 | 122 | |
... | ... | @@ -136,7 +133,7 @@ static inline int tlb_set_page(CPUState *env, target_ulong vaddr, |
136 | 133 | |
137 | 134 | #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE) |
138 | 135 | |
139 | -#if defined(__powerpc__) | |
136 | +#if defined(__powerpc__) || defined(__x86_64__) | |
140 | 137 | #define USE_DIRECT_JUMP |
141 | 138 | #endif |
142 | 139 | #if defined(__i386__) && !defined(_WIN32) |
... | ... | @@ -169,7 +166,7 @@ typedef struct TranslationBlock { |
169 | 166 | #ifdef USE_DIRECT_JUMP |
170 | 167 | uint16_t tb_jmp_offset[4]; /* offset of jump instruction */ |
171 | 168 | #else |
172 | - uint32_t tb_next[2]; /* address of jump generated code */ | |
169 | + unsigned long tb_next[2]; /* address of jump generated code */ | |
173 | 170 | #endif |
174 | 171 | /* list of TBs jumping to this one. This is a circular list using |
175 | 172 | the two least significant bits of the pointers to tell what is |
... | ... | @@ -228,7 +225,7 @@ static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr |
228 | 225 | asm volatile ("sync" : : : "memory"); |
229 | 226 | asm volatile ("isync" : : : "memory"); |
230 | 227 | } |
231 | -#elif defined(__i386__) | |
228 | +#elif defined(__i386__) || defined(__x86_64__) | |
232 | 229 | static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) |
233 | 230 | { |
234 | 231 | /* patch the branch destination */ |
... | ... | @@ -294,48 +291,6 @@ TranslationBlock *tb_find_pc(unsigned long pc_ptr); |
294 | 291 | #define ASM_OP_LABEL_NAME(n, opname) \ |
295 | 292 | ASM_NAME(__op_label) #n "." ASM_NAME(opname) |
296 | 293 | |
297 | -#if defined(__powerpc__) | |
298 | - | |
299 | -/* we patch the jump instruction directly */ | |
300 | -#define GOTO_TB(opname, tbparam, n)\ | |
301 | -do {\ | |
302 | - asm volatile (ASM_DATA_SECTION\ | |
303 | - ASM_OP_LABEL_NAME(n, opname) ":\n"\ | |
304 | - ".long 1f\n"\ | |
305 | - ASM_PREVIOUS_SECTION \ | |
306 | - "b " ASM_NAME(__op_jmp) #n "\n"\ | |
307 | - "1:\n");\ | |
308 | -} while (0) | |
309 | - | |
310 | -#elif defined(__i386__) && defined(USE_DIRECT_JUMP) | |
311 | - | |
312 | -/* we patch the jump instruction directly */ | |
313 | -#define GOTO_TB(opname, tbparam, n)\ | |
314 | -do {\ | |
315 | - asm volatile (".section .data\n"\ | |
316 | - ASM_OP_LABEL_NAME(n, opname) ":\n"\ | |
317 | - ".long 1f\n"\ | |
318 | - ASM_PREVIOUS_SECTION \ | |
319 | - "jmp " ASM_NAME(__op_jmp) #n "\n"\ | |
320 | - "1:\n");\ | |
321 | -} while (0) | |
322 | - | |
323 | -#else | |
324 | - | |
325 | -/* jump to next block operations (more portable code, does not need | |
326 | - cache flushing, but slower because of indirect jump) */ | |
327 | -#define GOTO_TB(opname, tbparam, n)\ | |
328 | -do {\ | |
329 | - static void __attribute__((used)) *dummy ## n = &&dummy_label ## n;\ | |
330 | - static void __attribute__((used)) *__op_label ## n \ | |
331 | - __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\ | |
332 | - goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\ | |
333 | -label ## n: ;\ | |
334 | -dummy_label ## n: ;\ | |
335 | -} while (0) | |
336 | - | |
337 | -#endif | |
338 | - | |
339 | 294 | extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
340 | 295 | extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; |
341 | 296 | extern void *io_mem_opaque[IO_MEM_NB_ENTRIES]; | ... | ... |
exec.c
... | ... | @@ -312,6 +312,7 @@ void cpu_exec_init(CPUState *env) |
312 | 312 | int cpu_index; |
313 | 313 | |
314 | 314 | if (!code_gen_ptr) { |
315 | + cpu_gen_init(); | |
315 | 316 | code_gen_ptr = code_gen_buffer; |
316 | 317 | page_init(); |
317 | 318 | io_mem_init(); |
... | ... | @@ -1238,10 +1239,10 @@ CPULogItem cpu_log_items[] = { |
1238 | 1239 | { CPU_LOG_TB_IN_ASM, "in_asm", |
1239 | 1240 | "show target assembly code for each compiled TB" }, |
1240 | 1241 | { CPU_LOG_TB_OP, "op", |
1241 | - "show micro ops for each compiled TB (only usable if 'in_asm' used)" }, | |
1242 | + "show micro ops for each compiled TB" }, | |
1242 | 1243 | #ifdef TARGET_I386 |
1243 | 1244 | { CPU_LOG_TB_OP_OPT, "op_opt", |
1244 | - "show micro ops after optimization for each compiled TB" }, | |
1245 | + "show micro ops before eflags optimization" }, | |
1245 | 1246 | #endif |
1246 | 1247 | { CPU_LOG_INT, "int", |
1247 | 1248 | "show interrupts/exceptions in short format" }, |
... | ... | @@ -2935,6 +2936,7 @@ void dump_exec_info(FILE *f, |
2935 | 2936 | } |
2936 | 2937 | } |
2937 | 2938 | /* XXX: avoid using doubles ? */ |
2939 | + cpu_fprintf(f, "Translation buffer state:\n"); | |
2938 | 2940 | cpu_fprintf(f, "TB count %d\n", nb_tbs); |
2939 | 2941 | cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", |
2940 | 2942 | nb_tbs ? target_code_size / nb_tbs : 0, |
... | ... | @@ -2950,9 +2952,49 @@ void dump_exec_info(FILE *f, |
2950 | 2952 | nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0, |
2951 | 2953 | direct_jmp2_count, |
2952 | 2954 | nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0); |
2955 | + cpu_fprintf(f, "\nStatistics:\n"); | |
2953 | 2956 | cpu_fprintf(f, "TB flush count %d\n", tb_flush_count); |
2954 | 2957 | cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count); |
2955 | 2958 | cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); |
2959 | +#ifdef CONFIG_PROFILER | |
2960 | + { | |
2961 | + int64_t tot; | |
2962 | + tot = dyngen_interm_time + dyngen_code_time; | |
2963 | + cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n", | |
2964 | + tot, tot / 2.4e9); | |
2965 | + cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n", | |
2966 | + dyngen_tb_count, | |
2967 | + dyngen_tb_count1 - dyngen_tb_count, | |
2968 | + dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0); | |
2969 | + cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n", | |
2970 | + dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max); | |
2971 | + cpu_fprintf(f, "old ops/total ops %0.1f%%\n", | |
2972 | + dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0); | |
2973 | + cpu_fprintf(f, "deleted ops/TB %0.2f\n", | |
2974 | + dyngen_tb_count ? | |
2975 | + (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0); | |
2976 | + cpu_fprintf(f, "cycles/op %0.1f\n", | |
2977 | + dyngen_op_count ? (double)tot / dyngen_op_count : 0); | |
2978 | + cpu_fprintf(f, "cycles/in byte %0.1f\n", | |
2979 | + dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0); | |
2980 | + cpu_fprintf(f, "cycles/out byte %0.1f\n", | |
2981 | + dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0); | |
2982 | + if (tot == 0) | |
2983 | + tot = 1; | |
2984 | + cpu_fprintf(f, " gen_interm time %0.1f%%\n", | |
2985 | + (double)dyngen_interm_time / tot * 100.0); | |
2986 | + cpu_fprintf(f, " gen_code time %0.1f%%\n", | |
2987 | + (double)dyngen_code_time / tot * 100.0); | |
2988 | + cpu_fprintf(f, "cpu_restore count %" PRId64 "\n", | |
2989 | + dyngen_restore_count); | |
2990 | + cpu_fprintf(f, " avg cycles %0.1f\n", | |
2991 | + dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0); | |
2992 | + { | |
2993 | + extern void dump_op_count(void); | |
2994 | + dump_op_count(); | |
2995 | + } | |
2996 | + } | |
2997 | +#endif | |
2956 | 2998 | } |
2957 | 2999 | |
2958 | 3000 | #if !defined(CONFIG_USER_ONLY) | ... | ... |
target-alpha/op.c
target-alpha/translate.c
... | ... | @@ -25,6 +25,7 @@ |
25 | 25 | #include "cpu.h" |
26 | 26 | #include "exec-all.h" |
27 | 27 | #include "disas.h" |
28 | +#include "tcg-op.h" | |
28 | 29 | |
29 | 30 | #define DO_SINGLE_STEP |
30 | 31 | #define GENERATE_NOP |
... | ... | @@ -41,24 +42,6 @@ struct DisasContext { |
41 | 42 | uint32_t amask; |
42 | 43 | }; |
43 | 44 | |
44 | -#ifdef USE_DIRECT_JUMP | |
45 | -#define TBPARAM(x) | |
46 | -#else | |
47 | -#define TBPARAM(x) (long)(x) | |
48 | -#endif | |
49 | - | |
50 | -enum { | |
51 | -#define DEF(s, n, copy_size) INDEX_op_ ## s, | |
52 | -#include "opc.h" | |
53 | -#undef DEF | |
54 | - NB_OPS, | |
55 | -}; | |
56 | - | |
57 | -static uint16_t *gen_opc_ptr; | |
58 | -static uint32_t *gen_opparam_ptr; | |
59 | - | |
60 | -#include "gen-op.h" | |
61 | - | |
62 | 45 | static always_inline void gen_op_nop (void) |
63 | 46 | { |
64 | 47 | #if defined(GENERATE_NOP) |
... | ... | @@ -1988,10 +1971,7 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
1988 | 1971 | int ret; |
1989 | 1972 | |
1990 | 1973 | pc_start = tb->pc; |
1991 | - gen_opc_ptr = gen_opc_buf; | |
1992 | 1974 | gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; |
1993 | - gen_opparam_ptr = gen_opparam_buf; | |
1994 | - nb_gen_labels = 0; | |
1995 | 1975 | ctx.pc = pc_start; |
1996 | 1976 | ctx.amask = env->amask; |
1997 | 1977 | #if defined (CONFIG_USER_ONLY) |
... | ... | @@ -2051,12 +2031,11 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
2051 | 2031 | if (ret != 1 && ret != 3) { |
2052 | 2032 | gen_update_pc(&ctx); |
2053 | 2033 | } |
2054 | - gen_op_reset_T0(); | |
2055 | 2034 | #if defined (DO_TB_FLUSH) |
2056 | 2035 | gen_op_tb_flush(); |
2057 | 2036 | #endif |
2058 | 2037 | /* Generate the return instruction */ |
2059 | - gen_op_exit_tb(); | |
2038 | + tcg_gen_exit_tb(0); | |
2060 | 2039 | *gen_opc_ptr = INDEX_op_end; |
2061 | 2040 | if (search_pc) { |
2062 | 2041 | j = gen_opc_ptr - gen_opc_buf; |
... | ... | @@ -2075,11 +2054,6 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
2075 | 2054 | target_disas(logfile, pc_start, ctx.pc - pc_start, 1); |
2076 | 2055 | fprintf(logfile, "\n"); |
2077 | 2056 | } |
2078 | - if (loglevel & CPU_LOG_TB_OP) { | |
2079 | - fprintf(logfile, "OP:\n"); | |
2080 | - dump_ops(gen_opc_buf, gen_opparam_buf); | |
2081 | - fprintf(logfile, "\n"); | |
2082 | - } | |
2083 | 2057 | #endif |
2084 | 2058 | |
2085 | 2059 | return 0; | ... | ... |
target-arm/op.c
... | ... | @@ -364,21 +364,6 @@ void OPPROTO op_testn_T0(void) |
364 | 364 | FORCE_RET(); |
365 | 365 | } |
366 | 366 | |
367 | -void OPPROTO op_goto_tb0(void) | |
368 | -{ | |
369 | - GOTO_TB(op_goto_tb0, PARAM1, 0); | |
370 | -} | |
371 | - | |
372 | -void OPPROTO op_goto_tb1(void) | |
373 | -{ | |
374 | - GOTO_TB(op_goto_tb1, PARAM1, 1); | |
375 | -} | |
376 | - | |
377 | -void OPPROTO op_exit_tb(void) | |
378 | -{ | |
379 | - EXIT_TB(); | |
380 | -} | |
381 | - | |
382 | 367 | void OPPROTO op_movl_T0_cpsr(void) |
383 | 368 | { |
384 | 369 | /* Execution state bits always read as zero. */ | ... | ... |
target-arm/translate.c
... | ... | @@ -28,6 +28,7 @@ |
28 | 28 | #include "cpu.h" |
29 | 29 | #include "exec-all.h" |
30 | 30 | #include "disas.h" |
31 | +#include "tcg-op.h" | |
31 | 32 | |
32 | 33 | #define ENABLE_ARCH_5J 0 |
33 | 34 | #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6) |
... | ... | @@ -68,27 +69,10 @@ typedef struct DisasContext { |
68 | 69 | #define DISAS_WFI 4 |
69 | 70 | #define DISAS_SWI 5 |
70 | 71 | |
71 | -#ifdef USE_DIRECT_JUMP | |
72 | -#define TBPARAM(x) | |
73 | -#else | |
74 | -#define TBPARAM(x) (long)(x) | |
75 | -#endif | |
76 | - | |
77 | 72 | /* XXX: move that elsewhere */ |
78 | -static uint16_t *gen_opc_ptr; | |
79 | -static uint32_t *gen_opparam_ptr; | |
80 | 73 | extern FILE *logfile; |
81 | 74 | extern int loglevel; |
82 | 75 | |
83 | -enum { | |
84 | -#define DEF(s, n, copy_size) INDEX_op_ ## s, | |
85 | -#include "opc.h" | |
86 | -#undef DEF | |
87 | - NB_OPS, | |
88 | -}; | |
89 | - | |
90 | -#include "gen-op.h" | |
91 | - | |
92 | 76 | #define PAS_OP(pfx) { \ |
93 | 77 | gen_op_ ## pfx ## add16_T0_T1, \ |
94 | 78 | gen_op_ ## pfx ## addsubx_T0_T1, \ |
... | ... | @@ -2432,19 +2416,14 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest) |
2432 | 2416 | |
2433 | 2417 | tb = s->tb; |
2434 | 2418 | if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { |
2435 | - if (n == 0) | |
2436 | - gen_op_goto_tb0(TBPARAM(tb)); | |
2437 | - else | |
2438 | - gen_op_goto_tb1(TBPARAM(tb)); | |
2419 | + tcg_gen_goto_tb(n); | |
2439 | 2420 | gen_op_movl_T0_im(dest); |
2440 | 2421 | gen_op_movl_r15_T0(); |
2441 | - gen_op_movl_T0_im((long)tb + n); | |
2442 | - gen_op_exit_tb(); | |
2422 | + tcg_gen_exit_tb((long)tb + n); | |
2443 | 2423 | } else { |
2444 | 2424 | gen_op_movl_T0_im(dest); |
2445 | 2425 | gen_op_movl_r15_T0(); |
2446 | - gen_op_movl_T0_0(); | |
2447 | - gen_op_exit_tb(); | |
2426 | + tcg_gen_exit_tb(0); | |
2448 | 2427 | } |
2449 | 2428 | } |
2450 | 2429 | |
... | ... | @@ -7486,9 +7465,7 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
7486 | 7465 | |
7487 | 7466 | dc->tb = tb; |
7488 | 7467 | |
7489 | - gen_opc_ptr = gen_opc_buf; | |
7490 | 7468 | gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; |
7491 | - gen_opparam_ptr = gen_opparam_buf; | |
7492 | 7469 | |
7493 | 7470 | dc->is_jmp = DISAS_NEXT; |
7494 | 7471 | dc->pc = pc_start; |
... | ... | @@ -7506,7 +7483,6 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
7506 | 7483 | } |
7507 | 7484 | #endif |
7508 | 7485 | next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; |
7509 | - nb_gen_labels = 0; | |
7510 | 7486 | lj = -1; |
7511 | 7487 | /* Reset the conditional execution bits immediately. This avoids |
7512 | 7488 | complications trying to do it at the end of the block. */ |
... | ... | @@ -7625,8 +7601,7 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
7625 | 7601 | case DISAS_JUMP: |
7626 | 7602 | case DISAS_UPDATE: |
7627 | 7603 | /* indicate that the hash table must be used to find the next TB */ |
7628 | - gen_op_movl_T0_0(); | |
7629 | - gen_op_exit_tb(); | |
7604 | + tcg_gen_exit_tb(0); | |
7630 | 7605 | break; |
7631 | 7606 | case DISAS_TB_JUMP: |
7632 | 7607 | /* nothing more to generate */ |
... | ... | @@ -7654,11 +7629,6 @@ done_generating: |
7654 | 7629 | fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start)); |
7655 | 7630 | target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb); |
7656 | 7631 | fprintf(logfile, "\n"); |
7657 | - if (loglevel & (CPU_LOG_TB_OP)) { | |
7658 | - fprintf(logfile, "OP:\n"); | |
7659 | - dump_ops(gen_opc_buf, gen_opparam_buf); | |
7660 | - fprintf(logfile, "\n"); | |
7661 | - } | |
7662 | 7632 | } |
7663 | 7633 | #endif |
7664 | 7634 | if (search_pc) { | ... | ... |
target-cris/op.c
... | ... | @@ -151,23 +151,6 @@ |
151 | 151 | |
152 | 152 | /* Microcode. */ |
153 | 153 | |
154 | -void OPPROTO op_exit_tb (void) | |
155 | -{ | |
156 | - EXIT_TB(); | |
157 | -} | |
158 | - | |
159 | -void OPPROTO op_goto_tb0 (void) | |
160 | -{ | |
161 | - GOTO_TB(op_goto_tb0, PARAM1, 0); | |
162 | - RETURN(); | |
163 | -} | |
164 | - | |
165 | -void OPPROTO op_goto_tb1 (void) | |
166 | -{ | |
167 | - GOTO_TB(op_goto_tb1, PARAM1, 1); | |
168 | - RETURN(); | |
169 | -} | |
170 | - | |
171 | 154 | void OPPROTO op_break_im(void) |
172 | 155 | { |
173 | 156 | env->trapnr = PARAM1; |
... | ... | @@ -1268,7 +1251,7 @@ void OPPROTO op_movl_btarget_T0 (void) |
1268 | 1251 | RETURN(); |
1269 | 1252 | } |
1270 | 1253 | |
1271 | -void OPPROTO op_jmp (void) | |
1254 | +void OPPROTO op_jmp1 (void) | |
1272 | 1255 | { |
1273 | 1256 | env->pc = env->btarget; |
1274 | 1257 | RETURN(); | ... | ... |
target-cris/translate.c
... | ... | @@ -51,6 +51,7 @@ |
51 | 51 | #include "cpu.h" |
52 | 52 | #include "exec-all.h" |
53 | 53 | #include "disas.h" |
54 | +#include "tcg-op.h" | |
54 | 55 | #include "crisv32-decode.h" |
55 | 56 | |
56 | 57 | #define CRIS_STATS 0 |
... | ... | @@ -67,12 +68,6 @@ |
67 | 68 | #define DIS(x) |
68 | 69 | #endif |
69 | 70 | |
70 | -#ifdef USE_DIRECT_JUMP | |
71 | -#define TBPARAM(x) | |
72 | -#else | |
73 | -#define TBPARAM(x) (long)(x) | |
74 | -#endif | |
75 | - | |
76 | 71 | #define BUG() (gen_BUG(dc, __FILE__, __LINE__)) |
77 | 72 | #define BUG_ON(x) ({if (x) BUG();}) |
78 | 73 | |
... | ... | @@ -85,17 +80,6 @@ |
85 | 80 | #define CC_MASK_NZVC 0xf |
86 | 81 | #define CC_MASK_RNZV 0x10e |
87 | 82 | |
88 | -static uint16_t *gen_opc_ptr; | |
89 | -static uint32_t *gen_opparam_ptr; | |
90 | - | |
91 | -enum { | |
92 | -#define DEF(s, n, copy_size) INDEX_op_ ## s, | |
93 | -#include "opc.h" | |
94 | -#undef DEF | |
95 | - NB_OPS, | |
96 | -}; | |
97 | -#include "gen-op.h" | |
98 | - | |
99 | 83 | /* This is the state at translation time. */ |
100 | 84 | typedef struct DisasContext { |
101 | 85 | CPUState *env; |
... | ... | @@ -264,15 +248,14 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) |
264 | 248 | TranslationBlock *tb; |
265 | 249 | tb = dc->tb; |
266 | 250 | if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { |
267 | - if (n == 0) | |
268 | - gen_op_goto_tb0(TBPARAM(tb)); | |
269 | - else | |
270 | - gen_op_goto_tb1(TBPARAM(tb)); | |
271 | - gen_op_movl_T0_0(); | |
251 | +#if 0 | |
252 | + /* XXX: this code is not finished */ | |
253 | + tcg_gen_goto_tb(n); | |
254 | +#endif | |
255 | + tcg_gen_exit_tb(0); | |
272 | 256 | } else { |
273 | - gen_op_movl_T0_0(); | |
257 | + tcg_gen_exit_tb(0); | |
274 | 258 | } |
275 | - gen_op_exit_tb(); | |
276 | 259 | } |
277 | 260 | |
278 | 261 | /* Sign extend at translation time. */ |
... | ... | @@ -2325,9 +2308,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
2325 | 2308 | dc->env = env; |
2326 | 2309 | dc->tb = tb; |
2327 | 2310 | |
2328 | - gen_opc_ptr = gen_opc_buf; | |
2329 | 2311 | gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; |
2330 | - gen_opparam_ptr = gen_opparam_buf; | |
2331 | 2312 | |
2332 | 2313 | dc->is_jmp = DISAS_NEXT; |
2333 | 2314 | dc->pc = pc_start; |
... | ... | @@ -2374,7 +2355,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
2374 | 2355 | if (dc->delayed_branch == 0) |
2375 | 2356 | { |
2376 | 2357 | if (dc->bcc == CC_A) { |
2377 | - gen_op_jmp (); | |
2358 | + gen_op_jmp1 (); | |
2378 | 2359 | dc->is_jmp = DISAS_UPDATE; |
2379 | 2360 | } |
2380 | 2361 | else { |
... | ... | @@ -2409,9 +2390,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
2409 | 2390 | case DISAS_UPDATE: |
2410 | 2391 | /* indicate that the hash table must be used |
2411 | 2392 | to find the next TB */ |
2412 | - /* T0 is used to index the jmp tables. */ | |
2413 | - gen_op_movl_T0_0(); | |
2414 | - gen_op_exit_tb(); | |
2393 | + tcg_gen_exit_tb(0); | |
2415 | 2394 | break; |
2416 | 2395 | case DISAS_TB_JUMP: |
2417 | 2396 | /* nothing more to generate */ |
... | ... | @@ -2434,11 +2413,6 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
2434 | 2413 | fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start)); |
2435 | 2414 | target_disas(logfile, pc_start, dc->pc + 4 - pc_start, 0); |
2436 | 2415 | fprintf(logfile, "\n"); |
2437 | - if (loglevel & CPU_LOG_TB_OP) { | |
2438 | - fprintf(logfile, "OP:\n"); | |
2439 | - dump_ops(gen_opc_buf, gen_opparam_buf); | |
2440 | - fprintf(logfile, "\n"); | |
2441 | - } | |
2442 | 2416 | } |
2443 | 2417 | #endif |
2444 | 2418 | return 0; | ... | ... |
target-i386/exec.h
... | ... | @@ -181,8 +181,9 @@ void __hidden cpu_loop_exit(void); |
181 | 181 | |
182 | 182 | void OPPROTO op_movl_eflags_T0(void); |
183 | 183 | void OPPROTO op_movl_T0_eflags(void); |
184 | -void helper_divl_EAX_T0(void); | |
185 | -void helper_idivl_EAX_T0(void); | |
184 | + | |
185 | +#include "helper.h" | |
186 | + | |
186 | 187 | void helper_mulq_EAX_T0(void); |
187 | 188 | void helper_imulq_EAX_T0(void); |
188 | 189 | void helper_imulq_T0_T1(void); | ... | ... |
target-i386/helper.c
... | ... | @@ -1608,13 +1608,13 @@ int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den) |
1608 | 1608 | } |
1609 | 1609 | #endif |
1610 | 1610 | |
1611 | -void helper_divl_EAX_T0(void) | |
1611 | +void helper_divl_EAX_T0(target_ulong t0) | |
1612 | 1612 | { |
1613 | 1613 | unsigned int den, r; |
1614 | 1614 | uint64_t num, q; |
1615 | 1615 | |
1616 | 1616 | num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); |
1617 | - den = T0; | |
1617 | + den = t0; | |
1618 | 1618 | if (den == 0) { |
1619 | 1619 | raise_exception(EXCP00_DIVZ); |
1620 | 1620 | } |
... | ... | @@ -1630,13 +1630,13 @@ void helper_divl_EAX_T0(void) |
1630 | 1630 | EDX = (uint32_t)r; |
1631 | 1631 | } |
1632 | 1632 | |
1633 | -void helper_idivl_EAX_T0(void) | |
1633 | +void helper_idivl_EAX_T0(target_ulong t0) | |
1634 | 1634 | { |
1635 | 1635 | int den, r; |
1636 | 1636 | int64_t num, q; |
1637 | 1637 | |
1638 | 1638 | num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); |
1639 | - den = T0; | |
1639 | + den = t0; | |
1640 | 1640 | if (den == 0) { |
1641 | 1641 | raise_exception(EXCP00_DIVZ); |
1642 | 1642 | } | ... | ... |
target-i386/helper.h
0 โ 100644
target-i386/op.c
... | ... | @@ -172,31 +172,6 @@ void OPPROTO op_testl_T0_T1_cc(void) |
172 | 172 | |
173 | 173 | /* operations without flags */ |
174 | 174 | |
175 | -void OPPROTO op_addl_T0_T1(void) | |
176 | -{ | |
177 | - T0 += T1; | |
178 | -} | |
179 | - | |
180 | -void OPPROTO op_orl_T0_T1(void) | |
181 | -{ | |
182 | - T0 |= T1; | |
183 | -} | |
184 | - | |
185 | -void OPPROTO op_andl_T0_T1(void) | |
186 | -{ | |
187 | - T0 &= T1; | |
188 | -} | |
189 | - | |
190 | -void OPPROTO op_subl_T0_T1(void) | |
191 | -{ | |
192 | - T0 -= T1; | |
193 | -} | |
194 | - | |
195 | -void OPPROTO op_xorl_T0_T1(void) | |
196 | -{ | |
197 | - T0 ^= T1; | |
198 | -} | |
199 | - | |
200 | 175 | void OPPROTO op_negl_T0(void) |
201 | 176 | { |
202 | 177 | T0 = -T0; |
... | ... | @@ -217,18 +192,6 @@ void OPPROTO op_notl_T0(void) |
217 | 192 | T0 = ~T0; |
218 | 193 | } |
219 | 194 | |
220 | -void OPPROTO op_bswapl_T0(void) | |
221 | -{ | |
222 | - T0 = bswap32(T0); | |
223 | -} | |
224 | - | |
225 | -#ifdef TARGET_X86_64 | |
226 | -void OPPROTO op_bswapq_T0(void) | |
227 | -{ | |
228 | - helper_bswapq_T0(); | |
229 | -} | |
230 | -#endif | |
231 | - | |
232 | 195 | /* multiply/divide */ |
233 | 196 | |
234 | 197 | /* XXX: add eflags optimizations */ |
... | ... | @@ -399,16 +362,6 @@ void OPPROTO op_idivw_AX_T0(void) |
399 | 362 | EDX = (EDX & ~0xffff) | r; |
400 | 363 | } |
401 | 364 | |
402 | -void OPPROTO op_divl_EAX_T0(void) | |
403 | -{ | |
404 | - helper_divl_EAX_T0(); | |
405 | -} | |
406 | - | |
407 | -void OPPROTO op_idivl_EAX_T0(void) | |
408 | -{ | |
409 | - helper_idivl_EAX_T0(); | |
410 | -} | |
411 | - | |
412 | 365 | #ifdef TARGET_X86_64 |
413 | 366 | void OPPROTO op_divq_EAX_T0(void) |
414 | 367 | { |
... | ... | @@ -424,46 +377,6 @@ void OPPROTO op_idivq_EAX_T0(void) |
424 | 377 | /* constant load & misc op */ |
425 | 378 | |
426 | 379 | /* XXX: consistent names */ |
427 | -void OPPROTO op_movl_T0_imu(void) | |
428 | -{ | |
429 | - T0 = (uint32_t)PARAM1; | |
430 | -} | |
431 | - | |
432 | -void OPPROTO op_movl_T0_im(void) | |
433 | -{ | |
434 | - T0 = (int32_t)PARAM1; | |
435 | -} | |
436 | - | |
437 | -void OPPROTO op_addl_T0_im(void) | |
438 | -{ | |
439 | - T0 += PARAM1; | |
440 | -} | |
441 | - | |
442 | -void OPPROTO op_andl_T0_ffff(void) | |
443 | -{ | |
444 | - T0 = T0 & 0xffff; | |
445 | -} | |
446 | - | |
447 | -void OPPROTO op_andl_T0_im(void) | |
448 | -{ | |
449 | - T0 = T0 & PARAM1; | |
450 | -} | |
451 | - | |
452 | -void OPPROTO op_movl_T0_T1(void) | |
453 | -{ | |
454 | - T0 = T1; | |
455 | -} | |
456 | - | |
457 | -void OPPROTO op_movl_T1_imu(void) | |
458 | -{ | |
459 | - T1 = (uint32_t)PARAM1; | |
460 | -} | |
461 | - | |
462 | -void OPPROTO op_movl_T1_im(void) | |
463 | -{ | |
464 | - T1 = (int32_t)PARAM1; | |
465 | -} | |
466 | - | |
467 | 380 | void OPPROTO op_addl_T1_im(void) |
468 | 381 | { |
469 | 382 | T1 += PARAM1; |
... | ... | @@ -474,26 +387,6 @@ void OPPROTO op_movl_T1_A0(void) |
474 | 387 | T1 = A0; |
475 | 388 | } |
476 | 389 | |
477 | -void OPPROTO op_movl_A0_im(void) | |
478 | -{ | |
479 | - A0 = (uint32_t)PARAM1; | |
480 | -} | |
481 | - | |
482 | -void OPPROTO op_addl_A0_im(void) | |
483 | -{ | |
484 | - A0 = (uint32_t)(A0 + PARAM1); | |
485 | -} | |
486 | - | |
487 | -void OPPROTO op_movl_A0_seg(void) | |
488 | -{ | |
489 | - A0 = (uint32_t)*(target_ulong *)((char *)env + PARAM1); | |
490 | -} | |
491 | - | |
492 | -void OPPROTO op_addl_A0_seg(void) | |
493 | -{ | |
494 | - A0 = (uint32_t)(A0 + *(target_ulong *)((char *)env + PARAM1)); | |
495 | -} | |
496 | - | |
497 | 390 | void OPPROTO op_addl_A0_AL(void) |
498 | 391 | { |
499 | 392 | A0 = (uint32_t)(A0 + (EAX & 0xff)); |
... | ... | @@ -523,46 +416,6 @@ typedef union UREG64 { |
523 | 416 | |
524 | 417 | #ifdef TARGET_X86_64 |
525 | 418 | |
526 | -void OPPROTO op_movq_T0_im64(void) | |
527 | -{ | |
528 | - T0 = PARAMQ1; | |
529 | -} | |
530 | - | |
531 | -void OPPROTO op_movq_T1_im64(void) | |
532 | -{ | |
533 | - T1 = PARAMQ1; | |
534 | -} | |
535 | - | |
536 | -void OPPROTO op_movq_A0_im(void) | |
537 | -{ | |
538 | - A0 = (int32_t)PARAM1; | |
539 | -} | |
540 | - | |
541 | -void OPPROTO op_movq_A0_im64(void) | |
542 | -{ | |
543 | - A0 = PARAMQ1; | |
544 | -} | |
545 | - | |
546 | -void OPPROTO op_addq_A0_im(void) | |
547 | -{ | |
548 | - A0 = (A0 + (int32_t)PARAM1); | |
549 | -} | |
550 | - | |
551 | -void OPPROTO op_addq_A0_im64(void) | |
552 | -{ | |
553 | - A0 = (A0 + PARAMQ1); | |
554 | -} | |
555 | - | |
556 | -void OPPROTO op_movq_A0_seg(void) | |
557 | -{ | |
558 | - A0 = *(target_ulong *)((char *)env + PARAM1); | |
559 | -} | |
560 | - | |
561 | -void OPPROTO op_addq_A0_seg(void) | |
562 | -{ | |
563 | - A0 += *(target_ulong *)((char *)env + PARAM1); | |
564 | -} | |
565 | - | |
566 | 419 | void OPPROTO op_addq_A0_AL(void) |
567 | 420 | { |
568 | 421 | A0 = (A0 + (EAX & 0xff)); |
... | ... | @@ -570,11 +423,6 @@ void OPPROTO op_addq_A0_AL(void) |
570 | 423 | |
571 | 424 | #endif |
572 | 425 | |
573 | -void OPPROTO op_andl_A0_ffff(void) | |
574 | -{ | |
575 | - A0 = A0 & 0xffff; | |
576 | -} | |
577 | - | |
578 | 426 | /* memory access */ |
579 | 427 | |
580 | 428 | #define MEMSUFFIX _raw |
... | ... | @@ -588,30 +436,6 @@ void OPPROTO op_andl_A0_ffff(void) |
588 | 436 | #include "ops_mem.h" |
589 | 437 | #endif |
590 | 438 | |
591 | -/* indirect jump */ | |
592 | - | |
593 | -void OPPROTO op_jmp_T0(void) | |
594 | -{ | |
595 | - EIP = T0; | |
596 | -} | |
597 | - | |
598 | -void OPPROTO op_movl_eip_im(void) | |
599 | -{ | |
600 | - EIP = (uint32_t)PARAM1; | |
601 | -} | |
602 | - | |
603 | -#ifdef TARGET_X86_64 | |
604 | -void OPPROTO op_movq_eip_im(void) | |
605 | -{ | |
606 | - EIP = (int32_t)PARAM1; | |
607 | -} | |
608 | - | |
609 | -void OPPROTO op_movq_eip_im64(void) | |
610 | -{ | |
611 | - EIP = PARAMQ1; | |
612 | -} | |
613 | -#endif | |
614 | - | |
615 | 439 | void OPPROTO op_hlt(void) |
616 | 440 | { |
617 | 441 | helper_hlt(); |
... | ... | @@ -735,16 +559,6 @@ void OPPROTO op_single_step(void) |
735 | 559 | helper_single_step(); |
736 | 560 | } |
737 | 561 | |
738 | -void OPPROTO op_movl_T0_0(void) | |
739 | -{ | |
740 | - T0 = 0; | |
741 | -} | |
742 | - | |
743 | -void OPPROTO op_exit_tb(void) | |
744 | -{ | |
745 | - EXIT_TB(); | |
746 | -} | |
747 | - | |
748 | 562 | /* multiple size ops */ |
749 | 563 | |
750 | 564 | #define ldul ldl |
... | ... | @@ -879,75 +693,6 @@ void OPPROTO op_decq_ECX(void) |
879 | 693 | } |
880 | 694 | #endif |
881 | 695 | |
882 | -/* push/pop utils */ | |
883 | - | |
884 | -void op_addl_A0_SS(void) | |
885 | -{ | |
886 | - A0 = (uint32_t)(A0 + env->segs[R_SS].base); | |
887 | -} | |
888 | - | |
889 | -void op_subl_A0_2(void) | |
890 | -{ | |
891 | - A0 = (uint32_t)(A0 - 2); | |
892 | -} | |
893 | - | |
894 | -void op_subl_A0_4(void) | |
895 | -{ | |
896 | - A0 = (uint32_t)(A0 - 4); | |
897 | -} | |
898 | - | |
899 | -void op_addl_ESP_4(void) | |
900 | -{ | |
901 | - ESP = (uint32_t)(ESP + 4); | |
902 | -} | |
903 | - | |
904 | -void op_addl_ESP_2(void) | |
905 | -{ | |
906 | - ESP = (uint32_t)(ESP + 2); | |
907 | -} | |
908 | - | |
909 | -void op_addw_ESP_4(void) | |
910 | -{ | |
911 | - ESP = (ESP & ~0xffff) | ((ESP + 4) & 0xffff); | |
912 | -} | |
913 | - | |
914 | -void op_addw_ESP_2(void) | |
915 | -{ | |
916 | - ESP = (ESP & ~0xffff) | ((ESP + 2) & 0xffff); | |
917 | -} | |
918 | - | |
919 | -void op_addl_ESP_im(void) | |
920 | -{ | |
921 | - ESP = (uint32_t)(ESP + PARAM1); | |
922 | -} | |
923 | - | |
924 | -void op_addw_ESP_im(void) | |
925 | -{ | |
926 | - ESP = (ESP & ~0xffff) | ((ESP + PARAM1) & 0xffff); | |
927 | -} | |
928 | - | |
929 | -#ifdef TARGET_X86_64 | |
930 | -void op_subq_A0_2(void) | |
931 | -{ | |
932 | - A0 -= 2; | |
933 | -} | |
934 | - | |
935 | -void op_subq_A0_8(void) | |
936 | -{ | |
937 | - A0 -= 8; | |
938 | -} | |
939 | - | |
940 | -void op_addq_ESP_8(void) | |
941 | -{ | |
942 | - ESP += 8; | |
943 | -} | |
944 | - | |
945 | -void op_addq_ESP_im(void) | |
946 | -{ | |
947 | - ESP += PARAM1; | |
948 | -} | |
949 | -#endif | |
950 | - | |
951 | 696 | void OPPROTO op_rdtsc(void) |
952 | 697 | { |
953 | 698 | helper_rdtsc(); |
... | ... | @@ -1362,16 +1107,6 @@ void OPPROTO op_clts(void) |
1362 | 1107 | |
1363 | 1108 | /* flags handling */ |
1364 | 1109 | |
1365 | -void OPPROTO op_goto_tb0(void) | |
1366 | -{ | |
1367 | - GOTO_TB(op_goto_tb0, PARAM1, 0); | |
1368 | -} | |
1369 | - | |
1370 | -void OPPROTO op_goto_tb1(void) | |
1371 | -{ | |
1372 | - GOTO_TB(op_goto_tb1, PARAM1, 1); | |
1373 | -} | |
1374 | - | |
1375 | 1110 | void OPPROTO op_jmp_label(void) |
1376 | 1111 | { |
1377 | 1112 | GOTO_LABEL_PARAM(1); |
... | ... | @@ -1451,11 +1186,6 @@ void OPPROTO op_xor_T0_1(void) |
1451 | 1186 | T0 ^= 1; |
1452 | 1187 | } |
1453 | 1188 | |
1454 | -void OPPROTO op_set_cc_op(void) | |
1455 | -{ | |
1456 | - CC_OP = PARAM1; | |
1457 | -} | |
1458 | - | |
1459 | 1189 | void OPPROTO op_mov_T0_cc(void) |
1460 | 1190 | { |
1461 | 1191 | T0 = cc_table[CC_OP].compute_all(); | ... | ... |
target-i386/opreg_template.h
... | ... | @@ -18,110 +18,6 @@ |
18 | 18 | * License along with this library; if not, write to the Free Software |
19 | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | 20 | */ |
21 | -void OPPROTO glue(op_movl_A0,REGNAME)(void) | |
22 | -{ | |
23 | - A0 = (uint32_t)REG; | |
24 | -} | |
25 | - | |
26 | -void OPPROTO glue(op_addl_A0,REGNAME)(void) | |
27 | -{ | |
28 | - A0 = (uint32_t)(A0 + REG); | |
29 | -} | |
30 | - | |
31 | -void OPPROTO glue(glue(op_addl_A0,REGNAME),_s1)(void) | |
32 | -{ | |
33 | - A0 = (uint32_t)(A0 + (REG << 1)); | |
34 | -} | |
35 | - | |
36 | -void OPPROTO glue(glue(op_addl_A0,REGNAME),_s2)(void) | |
37 | -{ | |
38 | - A0 = (uint32_t)(A0 + (REG << 2)); | |
39 | -} | |
40 | - | |
41 | -void OPPROTO glue(glue(op_addl_A0,REGNAME),_s3)(void) | |
42 | -{ | |
43 | - A0 = (uint32_t)(A0 + (REG << 3)); | |
44 | -} | |
45 | - | |
46 | -#ifdef TARGET_X86_64 | |
47 | -void OPPROTO glue(op_movq_A0,REGNAME)(void) | |
48 | -{ | |
49 | - A0 = REG; | |
50 | -} | |
51 | - | |
52 | -void OPPROTO glue(op_addq_A0,REGNAME)(void) | |
53 | -{ | |
54 | - A0 = (A0 + REG); | |
55 | -} | |
56 | - | |
57 | -void OPPROTO glue(glue(op_addq_A0,REGNAME),_s1)(void) | |
58 | -{ | |
59 | - A0 = (A0 + (REG << 1)); | |
60 | -} | |
61 | - | |
62 | -void OPPROTO glue(glue(op_addq_A0,REGNAME),_s2)(void) | |
63 | -{ | |
64 | - A0 = (A0 + (REG << 2)); | |
65 | -} | |
66 | - | |
67 | -void OPPROTO glue(glue(op_addq_A0,REGNAME),_s3)(void) | |
68 | -{ | |
69 | - A0 = (A0 + (REG << 3)); | |
70 | -} | |
71 | -#endif | |
72 | - | |
73 | -void OPPROTO glue(op_movl_T0,REGNAME)(void) | |
74 | -{ | |
75 | - T0 = REG; | |
76 | -} | |
77 | - | |
78 | -void OPPROTO glue(op_movl_T1,REGNAME)(void) | |
79 | -{ | |
80 | - T1 = REG; | |
81 | -} | |
82 | - | |
83 | -void OPPROTO glue(op_movh_T0,REGNAME)(void) | |
84 | -{ | |
85 | - T0 = REG >> 8; | |
86 | -} | |
87 | - | |
88 | -void OPPROTO glue(op_movh_T1,REGNAME)(void) | |
89 | -{ | |
90 | - T1 = REG >> 8; | |
91 | -} | |
92 | - | |
93 | -void OPPROTO glue(glue(op_movl,REGNAME),_T0)(void) | |
94 | -{ | |
95 | - REG = (uint32_t)T0; | |
96 | -} | |
97 | - | |
98 | -void OPPROTO glue(glue(op_movl,REGNAME),_T1)(void) | |
99 | -{ | |
100 | - REG = (uint32_t)T1; | |
101 | -} | |
102 | - | |
103 | -void OPPROTO glue(glue(op_movl,REGNAME),_A0)(void) | |
104 | -{ | |
105 | - REG = (uint32_t)A0; | |
106 | -} | |
107 | - | |
108 | -#ifdef TARGET_X86_64 | |
109 | -void OPPROTO glue(glue(op_movq,REGNAME),_T0)(void) | |
110 | -{ | |
111 | - REG = T0; | |
112 | -} | |
113 | - | |
114 | -void OPPROTO glue(glue(op_movq,REGNAME),_T1)(void) | |
115 | -{ | |
116 | - REG = T1; | |
117 | -} | |
118 | - | |
119 | -void OPPROTO glue(glue(op_movq,REGNAME),_A0)(void) | |
120 | -{ | |
121 | - REG = A0; | |
122 | -} | |
123 | -#endif | |
124 | - | |
125 | 21 | /* mov T1 to REG if T0 is true */ |
126 | 22 | void OPPROTO glue(glue(op_cmovw,REGNAME),_T1_T0)(void) |
127 | 23 | { |
... | ... | @@ -132,8 +28,15 @@ void OPPROTO glue(glue(op_cmovw,REGNAME),_T1_T0)(void) |
132 | 28 | |
133 | 29 | void OPPROTO glue(glue(op_cmovl,REGNAME),_T1_T0)(void) |
134 | 30 | { |
31 | +#ifdef TARGET_X86_64 | |
135 | 32 | if (T0) |
136 | 33 | REG = (uint32_t)T1; |
34 | + else | |
35 | + REG = (uint32_t)REG; | |
36 | +#else | |
37 | + if (T0) | |
38 | + REG = (uint32_t)T1; | |
39 | +#endif | |
137 | 40 | FORCE_RET(); |
138 | 41 | } |
139 | 42 | |
... | ... | @@ -145,46 +48,3 @@ void OPPROTO glue(glue(op_cmovq,REGNAME),_T1_T0)(void) |
145 | 48 | FORCE_RET(); |
146 | 49 | } |
147 | 50 | #endif |
148 | - | |
149 | -/* NOTE: T0 high order bits are ignored */ | |
150 | -void OPPROTO glue(glue(op_movw,REGNAME),_T0)(void) | |
151 | -{ | |
152 | - REG = (REG & ~0xffff) | (T0 & 0xffff); | |
153 | -} | |
154 | - | |
155 | -/* NOTE: T0 high order bits are ignored */ | |
156 | -void OPPROTO glue(glue(op_movw,REGNAME),_T1)(void) | |
157 | -{ | |
158 | - REG = (REG & ~0xffff) | (T1 & 0xffff); | |
159 | -} | |
160 | - | |
161 | -/* NOTE: A0 high order bits are ignored */ | |
162 | -void OPPROTO glue(glue(op_movw,REGNAME),_A0)(void) | |
163 | -{ | |
164 | - REG = (REG & ~0xffff) | (A0 & 0xffff); | |
165 | -} | |
166 | - | |
167 | -/* NOTE: T0 high order bits are ignored */ | |
168 | -void OPPROTO glue(glue(op_movb,REGNAME),_T0)(void) | |
169 | -{ | |
170 | - REG = (REG & ~0xff) | (T0 & 0xff); | |
171 | -} | |
172 | - | |
173 | -/* NOTE: T0 high order bits are ignored */ | |
174 | -void OPPROTO glue(glue(op_movh,REGNAME),_T0)(void) | |
175 | -{ | |
176 | - REG = (REG & ~0xff00) | ((T0 & 0xff) << 8); | |
177 | -} | |
178 | - | |
179 | -/* NOTE: T1 high order bits are ignored */ | |
180 | -void OPPROTO glue(glue(op_movb,REGNAME),_T1)(void) | |
181 | -{ | |
182 | - REG = (REG & ~0xff) | (T1 & 0xff); | |
183 | -} | |
184 | - | |
185 | -/* NOTE: T1 high order bits are ignored */ | |
186 | -void OPPROTO glue(glue(op_movh,REGNAME),_T1)(void) | |
187 | -{ | |
188 | - REG = (REG & ~0xff00) | ((T1 & 0xff) << 8); | |
189 | -} | |
190 | - | ... | ... |
target-i386/translate.c
... | ... | @@ -28,10 +28,8 @@ |
28 | 28 | #include "cpu.h" |
29 | 29 | #include "exec-all.h" |
30 | 30 | #include "disas.h" |
31 | - | |
32 | -/* XXX: move that elsewhere */ | |
33 | -static uint16_t *gen_opc_ptr; | |
34 | -static uint32_t *gen_opparam_ptr; | |
31 | +#include "helper.h" | |
32 | +#include "tcg-op.h" | |
35 | 33 | |
36 | 34 | #define PREFIX_REPZ 0x01 |
37 | 35 | #define PREFIX_REPNZ 0x02 |
... | ... | @@ -57,14 +55,79 @@ static uint32_t *gen_opparam_ptr; |
57 | 55 | #define REX_B(s) 0 |
58 | 56 | #endif |
59 | 57 | |
58 | +//#define MACRO_TEST 1 | |
59 | + | |
60 | 60 | #ifdef TARGET_X86_64 |
61 | -static int x86_64_hregs; | |
61 | +#define TCG_TYPE_TL TCG_TYPE_I64 | |
62 | +#define tcg_gen_movi_tl tcg_gen_movi_i64 | |
63 | +#define tcg_gen_mov_tl tcg_gen_mov_i64 | |
64 | +#define tcg_gen_ld8u_tl tcg_gen_ld8u_i64 | |
65 | +#define tcg_gen_ld8s_tl tcg_gen_ld8s_i64 | |
66 | +#define tcg_gen_ld16u_tl tcg_gen_ld16u_i64 | |
67 | +#define tcg_gen_ld16s_tl tcg_gen_ld16s_i64 | |
68 | +#define tcg_gen_ld32u_tl tcg_gen_ld32u_i64 | |
69 | +#define tcg_gen_ld32s_tl tcg_gen_ld32s_i64 | |
70 | +#define tcg_gen_ld_tl tcg_gen_ld_i64 | |
71 | +#define tcg_gen_st8_tl tcg_gen_st8_i64 | |
72 | +#define tcg_gen_st16_tl tcg_gen_st16_i64 | |
73 | +#define tcg_gen_st32_tl tcg_gen_st32_i64 | |
74 | +#define tcg_gen_st_tl tcg_gen_st_i64 | |
75 | +#define tcg_gen_add_tl tcg_gen_add_i64 | |
76 | +#define tcg_gen_addi_tl tcg_gen_addi_i64 | |
77 | +#define tcg_gen_sub_tl tcg_gen_sub_i64 | |
78 | +#define tcg_gen_subi_tl tcg_gen_subi_i64 | |
79 | +#define tcg_gen_and_tl tcg_gen_and_i64 | |
80 | +#define tcg_gen_andi_tl tcg_gen_andi_i64 | |
81 | +#define tcg_gen_or_tl tcg_gen_or_i64 | |
82 | +#define tcg_gen_ori_tl tcg_gen_ori_i64 | |
83 | +#define tcg_gen_xor_tl tcg_gen_xor_i64 | |
84 | +#define tcg_gen_xori_tl tcg_gen_xori_i64 | |
85 | +#define tcg_gen_shl_tl tcg_gen_shl_i64 | |
86 | +#define tcg_gen_shli_tl tcg_gen_shli_i64 | |
87 | +#define tcg_gen_shr_tl tcg_gen_shr_i64 | |
88 | +#define tcg_gen_shri_tl tcg_gen_shri_i64 | |
89 | +#define tcg_gen_sar_tl tcg_gen_sar_i64 | |
90 | +#define tcg_gen_sari_tl tcg_gen_sari_i64 | |
91 | +#else | |
92 | +#define TCG_TYPE_TL TCG_TYPE_I32 | |
93 | +#define tcg_gen_movi_tl tcg_gen_movi_i32 | |
94 | +#define tcg_gen_mov_tl tcg_gen_mov_i32 | |
95 | +#define tcg_gen_ld8u_tl tcg_gen_ld8u_i32 | |
96 | +#define tcg_gen_ld8s_tl tcg_gen_ld8s_i32 | |
97 | +#define tcg_gen_ld16u_tl tcg_gen_ld16u_i32 | |
98 | +#define tcg_gen_ld16s_tl tcg_gen_ld16s_i32 | |
99 | +#define tcg_gen_ld32u_tl tcg_gen_ld_i32 | |
100 | +#define tcg_gen_ld32s_tl tcg_gen_ld_i32 | |
101 | +#define tcg_gen_ld_tl tcg_gen_ld_i32 | |
102 | +#define tcg_gen_st8_tl tcg_gen_st8_i32 | |
103 | +#define tcg_gen_st16_tl tcg_gen_st16_i32 | |
104 | +#define tcg_gen_st32_tl tcg_gen_st_i32 | |
105 | +#define tcg_gen_st_tl tcg_gen_st_i32 | |
106 | +#define tcg_gen_add_tl tcg_gen_add_i32 | |
107 | +#define tcg_gen_addi_tl tcg_gen_addi_i32 | |
108 | +#define tcg_gen_sub_tl tcg_gen_sub_i32 | |
109 | +#define tcg_gen_subi_tl tcg_gen_subi_i32 | |
110 | +#define tcg_gen_and_tl tcg_gen_and_i32 | |
111 | +#define tcg_gen_andi_tl tcg_gen_andi_i32 | |
112 | +#define tcg_gen_or_tl tcg_gen_or_i32 | |
113 | +#define tcg_gen_ori_tl tcg_gen_ori_i32 | |
114 | +#define tcg_gen_xor_tl tcg_gen_xor_i32 | |
115 | +#define tcg_gen_xori_tl tcg_gen_xori_i32 | |
116 | +#define tcg_gen_shl_tl tcg_gen_shl_i32 | |
117 | +#define tcg_gen_shli_tl tcg_gen_shli_i32 | |
118 | +#define tcg_gen_shr_tl tcg_gen_shr_i32 | |
119 | +#define tcg_gen_shri_tl tcg_gen_shri_i32 | |
120 | +#define tcg_gen_sar_tl tcg_gen_sar_i32 | |
121 | +#define tcg_gen_sari_tl tcg_gen_sari_i32 | |
62 | 122 | #endif |
63 | 123 | |
64 | -#ifdef USE_DIRECT_JUMP | |
65 | -#define TBPARAM(x) | |
66 | -#else | |
67 | -#define TBPARAM(x) (long)(x) | |
124 | +/* global register indexes */ | |
125 | +static int cpu_env, cpu_T[2], cpu_A0; | |
126 | +/* local register indexes (only used inside old micro ops) */ | |
127 | +static int cpu_tmp0; | |
128 | + | |
129 | +#ifdef TARGET_X86_64 | |
130 | +static int x86_64_hregs; | |
68 | 131 | #endif |
69 | 132 | |
70 | 133 | typedef struct DisasContext { |
... | ... | @@ -131,15 +194,6 @@ enum { |
131 | 194 | OP_SAR = 7, |
132 | 195 | }; |
133 | 196 | |
134 | -enum { | |
135 | -#define DEF(s, n, copy_size) INDEX_op_ ## s, | |
136 | -#include "opc.h" | |
137 | -#undef DEF | |
138 | - NB_OPS, | |
139 | -}; | |
140 | - | |
141 | -#include "gen-op.h" | |
142 | - | |
143 | 197 | /* operand size */ |
144 | 198 | enum { |
145 | 199 | OT_BYTE = 0, |
... | ... | @@ -164,6 +218,73 @@ enum { |
164 | 218 | OR_A0, /* temporary register used when doing address evaluation */ |
165 | 219 | }; |
166 | 220 | |
221 | +static inline void gen_op_movl_T0_0(void) | |
222 | +{ | |
223 | + tcg_gen_movi_tl(cpu_T[0], 0); | |
224 | +} | |
225 | + | |
226 | +static inline void gen_op_movl_T0_im(int32_t val) | |
227 | +{ | |
228 | + tcg_gen_movi_tl(cpu_T[0], val); | |
229 | +} | |
230 | + | |
231 | +static inline void gen_op_movl_T0_imu(uint32_t val) | |
232 | +{ | |
233 | + tcg_gen_movi_tl(cpu_T[0], val); | |
234 | +} | |
235 | + | |
236 | +static inline void gen_op_movl_T1_im(int32_t val) | |
237 | +{ | |
238 | + tcg_gen_movi_tl(cpu_T[1], val); | |
239 | +} | |
240 | + | |
241 | +static inline void gen_op_movl_T1_imu(uint32_t val) | |
242 | +{ | |
243 | + tcg_gen_movi_tl(cpu_T[1], val); | |
244 | +} | |
245 | + | |
246 | +static inline void gen_op_movl_A0_im(uint32_t val) | |
247 | +{ | |
248 | + tcg_gen_movi_tl(cpu_A0, val); | |
249 | +} | |
250 | + | |
251 | +#ifdef TARGET_X86_64 | |
252 | +static inline void gen_op_movq_A0_im(int64_t val) | |
253 | +{ | |
254 | + tcg_gen_movi_tl(cpu_A0, val); | |
255 | +} | |
256 | +#endif | |
257 | + | |
258 | +static inline void gen_movtl_T0_im(target_ulong val) | |
259 | +{ | |
260 | + tcg_gen_movi_tl(cpu_T[0], val); | |
261 | +} | |
262 | + | |
263 | +static inline void gen_movtl_T1_im(target_ulong val) | |
264 | +{ | |
265 | + tcg_gen_movi_tl(cpu_T[1], val); | |
266 | +} | |
267 | + | |
268 | +static inline void gen_op_andl_T0_ffff(void) | |
269 | +{ | |
270 | + tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff); | |
271 | +} | |
272 | + | |
273 | +static inline void gen_op_andl_T0_im(uint32_t val) | |
274 | +{ | |
275 | + tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val); | |
276 | +} | |
277 | + | |
278 | +static inline void gen_op_movl_T0_T1(void) | |
279 | +{ | |
280 | + tcg_gen_mov_tl(cpu_T[0], cpu_T[1]); | |
281 | +} | |
282 | + | |
283 | +static inline void gen_op_andl_A0_ffff(void) | |
284 | +{ | |
285 | + tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff); | |
286 | +} | |
287 | + | |
167 | 288 | #ifdef TARGET_X86_64 |
168 | 289 | |
169 | 290 | #define NB_OP_SIZES 4 |
... | ... | @@ -186,45 +307,6 @@ enum { |
186 | 307 | prefix ## R14 ## suffix,\ |
187 | 308 | prefix ## R15 ## suffix, |
188 | 309 | |
189 | -#define DEF_BREGS(prefixb, prefixh, suffix) \ | |
190 | - \ | |
191 | -static void prefixb ## ESP ## suffix ## _wrapper(void) \ | |
192 | -{ \ | |
193 | - if (x86_64_hregs) \ | |
194 | - prefixb ## ESP ## suffix (); \ | |
195 | - else \ | |
196 | - prefixh ## EAX ## suffix (); \ | |
197 | -} \ | |
198 | - \ | |
199 | -static void prefixb ## EBP ## suffix ## _wrapper(void) \ | |
200 | -{ \ | |
201 | - if (x86_64_hregs) \ | |
202 | - prefixb ## EBP ## suffix (); \ | |
203 | - else \ | |
204 | - prefixh ## ECX ## suffix (); \ | |
205 | -} \ | |
206 | - \ | |
207 | -static void prefixb ## ESI ## suffix ## _wrapper(void) \ | |
208 | -{ \ | |
209 | - if (x86_64_hregs) \ | |
210 | - prefixb ## ESI ## suffix (); \ | |
211 | - else \ | |
212 | - prefixh ## EDX ## suffix (); \ | |
213 | -} \ | |
214 | - \ | |
215 | -static void prefixb ## EDI ## suffix ## _wrapper(void) \ | |
216 | -{ \ | |
217 | - if (x86_64_hregs) \ | |
218 | - prefixb ## EDI ## suffix (); \ | |
219 | - else \ | |
220 | - prefixh ## EBX ## suffix (); \ | |
221 | -} | |
222 | - | |
223 | -DEF_BREGS(gen_op_movb_, gen_op_movh_, _T0) | |
224 | -DEF_BREGS(gen_op_movb_, gen_op_movh_, _T1) | |
225 | -DEF_BREGS(gen_op_movl_T0_, gen_op_movh_T0_, ) | |
226 | -DEF_BREGS(gen_op_movl_T1_, gen_op_movh_T1_, ) | |
227 | - | |
228 | 310 | #else /* !TARGET_X86_64 */ |
229 | 311 | |
230 | 312 | #define NB_OP_SIZES 3 |
... | ... | @@ -241,218 +323,227 @@ DEF_BREGS(gen_op_movl_T1_, gen_op_movh_T1_, ) |
241 | 323 | |
242 | 324 | #endif /* !TARGET_X86_64 */ |
243 | 325 | |
244 | -static GenOpFunc *gen_op_mov_reg_T0[NB_OP_SIZES][CPU_NB_REGS] = { | |
245 | - [OT_BYTE] = { | |
246 | - gen_op_movb_EAX_T0, | |
247 | - gen_op_movb_ECX_T0, | |
248 | - gen_op_movb_EDX_T0, | |
249 | - gen_op_movb_EBX_T0, | |
250 | -#ifdef TARGET_X86_64 | |
251 | - gen_op_movb_ESP_T0_wrapper, | |
252 | - gen_op_movb_EBP_T0_wrapper, | |
253 | - gen_op_movb_ESI_T0_wrapper, | |
254 | - gen_op_movb_EDI_T0_wrapper, | |
255 | - gen_op_movb_R8_T0, | |
256 | - gen_op_movb_R9_T0, | |
257 | - gen_op_movb_R10_T0, | |
258 | - gen_op_movb_R11_T0, | |
259 | - gen_op_movb_R12_T0, | |
260 | - gen_op_movb_R13_T0, | |
261 | - gen_op_movb_R14_T0, | |
262 | - gen_op_movb_R15_T0, | |
326 | +#if defined(WORDS_BIGENDIAN) | |
327 | +#define REG_B_OFFSET (sizeof(target_ulong) - 1) | |
328 | +#define REG_H_OFFSET (sizeof(target_ulong) - 2) | |
329 | +#define REG_W_OFFSET (sizeof(target_ulong) - 2) | |
330 | +#define REG_L_OFFSET (sizeof(target_ulong) - 4) | |
331 | +#define REG_LH_OFFSET (sizeof(target_ulong) - 8) | |
263 | 332 | #else |
264 | - gen_op_movh_EAX_T0, | |
265 | - gen_op_movh_ECX_T0, | |
266 | - gen_op_movh_EDX_T0, | |
267 | - gen_op_movh_EBX_T0, | |
333 | +#define REG_B_OFFSET 0 | |
334 | +#define REG_H_OFFSET 1 | |
335 | +#define REG_W_OFFSET 0 | |
336 | +#define REG_L_OFFSET 0 | |
337 | +#define REG_LH_OFFSET 4 | |
268 | 338 | #endif |
269 | - }, | |
270 | - [OT_WORD] = { | |
271 | - DEF_REGS(gen_op_movw_, _T0) | |
272 | - }, | |
273 | - [OT_LONG] = { | |
274 | - DEF_REGS(gen_op_movl_, _T0) | |
275 | - }, | |
339 | + | |
340 | +static inline void gen_op_mov_reg_TN(int ot, int t_index, int reg) | |
341 | +{ | |
342 | + switch(ot) { | |
343 | + case OT_BYTE: | |
344 | + if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) { | |
345 | + tcg_gen_st8_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg]) + REG_B_OFFSET); | |
346 | + } else { | |
347 | + tcg_gen_st8_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET); | |
348 | + } | |
349 | + break; | |
350 | + case OT_WORD: | |
351 | + tcg_gen_st16_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET); | |
352 | + break; | |
276 | 353 | #ifdef TARGET_X86_64 |
277 | - [OT_QUAD] = { | |
278 | - DEF_REGS(gen_op_movq_, _T0) | |
279 | - }, | |
354 | + case OT_LONG: | |
355 | + tcg_gen_st32_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); | |
356 | + /* high part of register set to zero */ | |
357 | + tcg_gen_movi_tl(cpu_tmp0, 0); | |
358 | + tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET); | |
359 | + break; | |
360 | + default: | |
361 | + case OT_QUAD: | |
362 | + tcg_gen_st_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg])); | |
363 | + break; | |
364 | +#else | |
365 | + default: | |
366 | + case OT_LONG: | |
367 | + tcg_gen_st32_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); | |
368 | + break; | |
280 | 369 | #endif |
281 | -}; | |
370 | + } | |
371 | +} | |
282 | 372 | |
283 | -static GenOpFunc *gen_op_mov_reg_T1[NB_OP_SIZES][CPU_NB_REGS] = { | |
284 | - [OT_BYTE] = { | |
285 | - gen_op_movb_EAX_T1, | |
286 | - gen_op_movb_ECX_T1, | |
287 | - gen_op_movb_EDX_T1, | |
288 | - gen_op_movb_EBX_T1, | |
373 | +static inline void gen_op_mov_reg_T0(int ot, int reg) | |
374 | +{ | |
375 | + gen_op_mov_reg_TN(ot, 0, reg); | |
376 | +} | |
377 | + | |
378 | +static inline void gen_op_mov_reg_T1(int ot, int reg) | |
379 | +{ | |
380 | + gen_op_mov_reg_TN(ot, 1, reg); | |
381 | +} | |
382 | + | |
383 | +static inline void gen_op_mov_reg_A0(int size, int reg) | |
384 | +{ | |
385 | + switch(size) { | |
386 | + case 0: | |
387 | + tcg_gen_st16_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET); | |
388 | + break; | |
289 | 389 | #ifdef TARGET_X86_64 |
290 | - gen_op_movb_ESP_T1_wrapper, | |
291 | - gen_op_movb_EBP_T1_wrapper, | |
292 | - gen_op_movb_ESI_T1_wrapper, | |
293 | - gen_op_movb_EDI_T1_wrapper, | |
294 | - gen_op_movb_R8_T1, | |
295 | - gen_op_movb_R9_T1, | |
296 | - gen_op_movb_R10_T1, | |
297 | - gen_op_movb_R11_T1, | |
298 | - gen_op_movb_R12_T1, | |
299 | - gen_op_movb_R13_T1, | |
300 | - gen_op_movb_R14_T1, | |
301 | - gen_op_movb_R15_T1, | |
390 | + case 1: | |
391 | + tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); | |
392 | + /* high part of register set to zero */ | |
393 | + tcg_gen_movi_tl(cpu_tmp0, 0); | |
394 | + tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET); | |
395 | + break; | |
396 | + default: | |
397 | + case 2: | |
398 | + tcg_gen_st_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg])); | |
399 | + break; | |
302 | 400 | #else |
303 | - gen_op_movh_EAX_T1, | |
304 | - gen_op_movh_ECX_T1, | |
305 | - gen_op_movh_EDX_T1, | |
306 | - gen_op_movh_EBX_T1, | |
401 | + default: | |
402 | + case 1: | |
403 | + tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); | |
404 | + break; | |
307 | 405 | #endif |
308 | - }, | |
309 | - [OT_WORD] = { | |
310 | - DEF_REGS(gen_op_movw_, _T1) | |
311 | - }, | |
312 | - [OT_LONG] = { | |
313 | - DEF_REGS(gen_op_movl_, _T1) | |
314 | - }, | |
406 | + } | |
407 | +} | |
408 | + | |
409 | +static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg) | |
410 | +{ | |
411 | + switch(ot) { | |
412 | + case OT_BYTE: | |
413 | + if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) { | |
414 | + goto std_case; | |
415 | + } else { | |
416 | + tcg_gen_ld8u_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET); | |
417 | + } | |
418 | + break; | |
419 | + default: | |
420 | + std_case: | |
421 | + tcg_gen_ld_tl(cpu_T[t_index], cpu_env, offsetof(CPUState, regs[reg])); | |
422 | + break; | |
423 | + } | |
424 | +} | |
425 | + | |
426 | +static inline void gen_op_movl_A0_reg(int reg) | |
427 | +{ | |
428 | + tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); | |
429 | +} | |
430 | + | |
431 | +static inline void gen_op_addl_A0_im(int32_t val) | |
432 | +{ | |
433 | + tcg_gen_addi_tl(cpu_A0, cpu_A0, val); | |
315 | 434 | #ifdef TARGET_X86_64 |
316 | - [OT_QUAD] = { | |
317 | - DEF_REGS(gen_op_movq_, _T1) | |
318 | - }, | |
435 | + tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff); | |
319 | 436 | #endif |
320 | -}; | |
437 | +} | |
321 | 438 | |
322 | -static GenOpFunc *gen_op_mov_reg_A0[NB_OP_SIZES - 1][CPU_NB_REGS] = { | |
323 | - [0] = { | |
324 | - DEF_REGS(gen_op_movw_, _A0) | |
325 | - }, | |
326 | - [1] = { | |
327 | - DEF_REGS(gen_op_movl_, _A0) | |
328 | - }, | |
329 | 439 | #ifdef TARGET_X86_64 |
330 | - [2] = { | |
331 | - DEF_REGS(gen_op_movq_, _A0) | |
332 | - }, | |
440 | +static inline void gen_op_addq_A0_im(int64_t val) | |
441 | +{ | |
442 | + tcg_gen_addi_tl(cpu_A0, cpu_A0, val); | |
443 | +} | |
333 | 444 | #endif |
334 | -}; | |
445 | + | |
446 | +static void gen_add_A0_im(DisasContext *s, int val) | |
447 | +{ | |
448 | +#ifdef TARGET_X86_64 | |
449 | + if (CODE64(s)) | |
450 | + gen_op_addq_A0_im(val); | |
451 | + else | |
452 | +#endif | |
453 | + gen_op_addl_A0_im(val); | |
454 | +} | |
335 | 455 | |
336 | -static GenOpFunc *gen_op_mov_TN_reg[NB_OP_SIZES][2][CPU_NB_REGS] = | |
456 | +static inline void gen_op_addl_T0_T1(void) | |
337 | 457 | { |
338 | - [OT_BYTE] = { | |
339 | - { | |
340 | - gen_op_movl_T0_EAX, | |
341 | - gen_op_movl_T0_ECX, | |
342 | - gen_op_movl_T0_EDX, | |
343 | - gen_op_movl_T0_EBX, | |
458 | + tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]); | |
459 | +} | |
460 | + | |
461 | +static inline void gen_op_jmp_T0(void) | |
462 | +{ | |
463 | + tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip)); | |
464 | +} | |
465 | + | |
466 | +static inline void gen_op_addw_ESP_im(int32_t val) | |
467 | +{ | |
468 | + tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP])); | |
469 | + tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val); | |
470 | + tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP]) + REG_W_OFFSET); | |
471 | +} | |
472 | + | |
473 | +static inline void gen_op_addl_ESP_im(int32_t val) | |
474 | +{ | |
475 | + tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP])); | |
476 | + tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val); | |
344 | 477 | #ifdef TARGET_X86_64 |
345 | - gen_op_movl_T0_ESP_wrapper, | |
346 | - gen_op_movl_T0_EBP_wrapper, | |
347 | - gen_op_movl_T0_ESI_wrapper, | |
348 | - gen_op_movl_T0_EDI_wrapper, | |
349 | - gen_op_movl_T0_R8, | |
350 | - gen_op_movl_T0_R9, | |
351 | - gen_op_movl_T0_R10, | |
352 | - gen_op_movl_T0_R11, | |
353 | - gen_op_movl_T0_R12, | |
354 | - gen_op_movl_T0_R13, | |
355 | - gen_op_movl_T0_R14, | |
356 | - gen_op_movl_T0_R15, | |
357 | -#else | |
358 | - gen_op_movh_T0_EAX, | |
359 | - gen_op_movh_T0_ECX, | |
360 | - gen_op_movh_T0_EDX, | |
361 | - gen_op_movh_T0_EBX, | |
478 | + tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff); | |
362 | 479 | #endif |
363 | - }, | |
364 | - { | |
365 | - gen_op_movl_T1_EAX, | |
366 | - gen_op_movl_T1_ECX, | |
367 | - gen_op_movl_T1_EDX, | |
368 | - gen_op_movl_T1_EBX, | |
480 | + tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP])); | |
481 | +} | |
482 | + | |
369 | 483 | #ifdef TARGET_X86_64 |
370 | - gen_op_movl_T1_ESP_wrapper, | |
371 | - gen_op_movl_T1_EBP_wrapper, | |
372 | - gen_op_movl_T1_ESI_wrapper, | |
373 | - gen_op_movl_T1_EDI_wrapper, | |
374 | - gen_op_movl_T1_R8, | |
375 | - gen_op_movl_T1_R9, | |
376 | - gen_op_movl_T1_R10, | |
377 | - gen_op_movl_T1_R11, | |
378 | - gen_op_movl_T1_R12, | |
379 | - gen_op_movl_T1_R13, | |
380 | - gen_op_movl_T1_R14, | |
381 | - gen_op_movl_T1_R15, | |
382 | -#else | |
383 | - gen_op_movh_T1_EAX, | |
384 | - gen_op_movh_T1_ECX, | |
385 | - gen_op_movh_T1_EDX, | |
386 | - gen_op_movh_T1_EBX, | |
484 | +static inline void gen_op_addq_ESP_im(int32_t val) | |
485 | +{ | |
486 | + tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP])); | |
487 | + tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val); | |
488 | + tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ESP])); | |
489 | +} | |
387 | 490 | #endif |
388 | - }, | |
389 | - }, | |
390 | - [OT_WORD] = { | |
391 | - { | |
392 | - DEF_REGS(gen_op_movl_T0_, ) | |
393 | - }, | |
394 | - { | |
395 | - DEF_REGS(gen_op_movl_T1_, ) | |
396 | - }, | |
397 | - }, | |
398 | - [OT_LONG] = { | |
399 | - { | |
400 | - DEF_REGS(gen_op_movl_T0_, ) | |
401 | - }, | |
402 | - { | |
403 | - DEF_REGS(gen_op_movl_T1_, ) | |
404 | - }, | |
405 | - }, | |
491 | + | |
492 | +static inline void gen_op_set_cc_op(int32_t val) | |
493 | +{ | |
494 | + tcg_gen_movi_tl(cpu_tmp0, val); | |
495 | + tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, cc_op)); | |
496 | +} | |
497 | + | |
498 | +static inline void gen_op_addl_A0_reg_sN(int shift, int reg) | |
499 | +{ | |
500 | + tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); | |
501 | + if (shift != 0) | |
502 | + tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift); | |
503 | + tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); | |
406 | 504 | #ifdef TARGET_X86_64 |
407 | - [OT_QUAD] = { | |
408 | - { | |
409 | - DEF_REGS(gen_op_movl_T0_, ) | |
410 | - }, | |
411 | - { | |
412 | - DEF_REGS(gen_op_movl_T1_, ) | |
413 | - }, | |
414 | - }, | |
505 | + tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff); | |
415 | 506 | #endif |
416 | -}; | |
507 | +} | |
417 | 508 | |
418 | -static GenOpFunc *gen_op_movl_A0_reg[CPU_NB_REGS] = { | |
419 | - DEF_REGS(gen_op_movl_A0_, ) | |
420 | -}; | |
509 | +static inline void gen_op_movl_A0_seg(int reg) | |
510 | +{ | |
511 | + tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET); | |
512 | +} | |
421 | 513 | |
422 | -static GenOpFunc *gen_op_addl_A0_reg_sN[4][CPU_NB_REGS] = { | |
423 | - [0] = { | |
424 | - DEF_REGS(gen_op_addl_A0_, ) | |
425 | - }, | |
426 | - [1] = { | |
427 | - DEF_REGS(gen_op_addl_A0_, _s1) | |
428 | - }, | |
429 | - [2] = { | |
430 | - DEF_REGS(gen_op_addl_A0_, _s2) | |
431 | - }, | |
432 | - [3] = { | |
433 | - DEF_REGS(gen_op_addl_A0_, _s3) | |
434 | - }, | |
435 | -}; | |
514 | +static inline void gen_op_addl_A0_seg(int reg) | |
515 | +{ | |
516 | + tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base)); | |
517 | + tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); | |
518 | +#ifdef TARGET_X86_64 | |
519 | + tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff); | |
520 | +#endif | |
521 | +} | |
436 | 522 | |
437 | 523 | #ifdef TARGET_X86_64 |
438 | -static GenOpFunc *gen_op_movq_A0_reg[CPU_NB_REGS] = { | |
439 | - DEF_REGS(gen_op_movq_A0_, ) | |
440 | -}; | |
524 | +static inline void gen_op_movq_A0_seg(int reg) | |
525 | +{ | |
526 | + tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base)); | |
527 | +} | |
441 | 528 | |
442 | -static GenOpFunc *gen_op_addq_A0_reg_sN[4][CPU_NB_REGS] = { | |
443 | - [0] = { | |
444 | - DEF_REGS(gen_op_addq_A0_, ) | |
445 | - }, | |
446 | - [1] = { | |
447 | - DEF_REGS(gen_op_addq_A0_, _s1) | |
448 | - }, | |
449 | - [2] = { | |
450 | - DEF_REGS(gen_op_addq_A0_, _s2) | |
451 | - }, | |
452 | - [3] = { | |
453 | - DEF_REGS(gen_op_addq_A0_, _s3) | |
454 | - }, | |
455 | -}; | |
529 | +static inline void gen_op_addq_A0_seg(int reg) | |
530 | +{ | |
531 | + tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base)); | |
532 | + tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); | |
533 | +} | |
534 | + | |
535 | +static inline void gen_op_movq_A0_reg(int reg) | |
536 | +{ | |
537 | + tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg])); | |
538 | +} | |
539 | + | |
540 | +static inline void gen_op_addq_A0_reg_sN(int shift, int reg) | |
541 | +{ | |
542 | + tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); | |
543 | + if (shift != 0) | |
544 | + tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift); | |
545 | + tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); | |
546 | +} | |
456 | 547 | #endif |
457 | 548 | |
458 | 549 | static GenOpFunc *gen_op_cmov_reg_T1_T0[NB_OP_SIZES - 1][CPU_NB_REGS] = { |
... | ... | @@ -469,17 +560,6 @@ static GenOpFunc *gen_op_cmov_reg_T1_T0[NB_OP_SIZES - 1][CPU_NB_REGS] = { |
469 | 560 | #endif |
470 | 561 | }; |
471 | 562 | |
472 | -static GenOpFunc *gen_op_arith_T0_T1_cc[8] = { | |
473 | - NULL, | |
474 | - gen_op_orl_T0_T1, | |
475 | - NULL, | |
476 | - NULL, | |
477 | - gen_op_andl_T0_T1, | |
478 | - NULL, | |
479 | - gen_op_xorl_T0_T1, | |
480 | - NULL, | |
481 | -}; | |
482 | - | |
483 | 563 | #define DEF_ARITHC(SUFFIX)\ |
484 | 564 | {\ |
485 | 565 | gen_op_adcb ## SUFFIX ## _T0_T1_cc,\ |
... | ... | @@ -681,133 +761,113 @@ static GenOpFunc *gen_op_bsx_T0_cc[3][2] = { |
681 | 761 | #endif |
682 | 762 | }; |
683 | 763 | |
684 | -static GenOpFunc *gen_op_lds_T0_A0[3 * 4] = { | |
685 | - gen_op_ldsb_raw_T0_A0, | |
686 | - gen_op_ldsw_raw_T0_A0, | |
687 | - X86_64_ONLY(gen_op_ldsl_raw_T0_A0), | |
688 | - NULL, | |
689 | -#ifndef CONFIG_USER_ONLY | |
690 | - gen_op_ldsb_kernel_T0_A0, | |
691 | - gen_op_ldsw_kernel_T0_A0, | |
692 | - X86_64_ONLY(gen_op_ldsl_kernel_T0_A0), | |
693 | - NULL, | |
694 | - | |
695 | - gen_op_ldsb_user_T0_A0, | |
696 | - gen_op_ldsw_user_T0_A0, | |
697 | - X86_64_ONLY(gen_op_ldsl_user_T0_A0), | |
698 | - NULL, | |
699 | -#endif | |
700 | -}; | |
701 | - | |
702 | -static GenOpFunc *gen_op_ldu_T0_A0[3 * 4] = { | |
703 | - gen_op_ldub_raw_T0_A0, | |
704 | - gen_op_lduw_raw_T0_A0, | |
705 | - NULL, | |
706 | - NULL, | |
707 | - | |
708 | -#ifndef CONFIG_USER_ONLY | |
709 | - gen_op_ldub_kernel_T0_A0, | |
710 | - gen_op_lduw_kernel_T0_A0, | |
711 | - NULL, | |
712 | - NULL, | |
713 | - | |
714 | - gen_op_ldub_user_T0_A0, | |
715 | - gen_op_lduw_user_T0_A0, | |
716 | - NULL, | |
717 | - NULL, | |
718 | -#endif | |
719 | -}; | |
764 | +static inline void gen_op_lds_T0_A0(int idx) | |
765 | +{ | |
766 | + int mem_index = (idx >> 2) - 1; | |
767 | + switch(idx & 3) { | |
768 | + case 0: | |
769 | + tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index); | |
770 | + break; | |
771 | + case 1: | |
772 | + tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index); | |
773 | + break; | |
774 | + default: | |
775 | + case 2: | |
776 | + tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index); | |
777 | + break; | |
778 | + } | |
779 | +} | |
720 | 780 | |
721 | 781 | /* sign does not matter, except for lidt/lgdt call (TODO: fix it) */ |
722 | -static GenOpFunc *gen_op_ld_T0_A0[3 * 4] = { | |
723 | - gen_op_ldub_raw_T0_A0, | |
724 | - gen_op_lduw_raw_T0_A0, | |
725 | - gen_op_ldl_raw_T0_A0, | |
726 | - X86_64_ONLY(gen_op_ldq_raw_T0_A0), | |
727 | - | |
728 | -#ifndef CONFIG_USER_ONLY | |
729 | - gen_op_ldub_kernel_T0_A0, | |
730 | - gen_op_lduw_kernel_T0_A0, | |
731 | - gen_op_ldl_kernel_T0_A0, | |
732 | - X86_64_ONLY(gen_op_ldq_kernel_T0_A0), | |
733 | - | |
734 | - gen_op_ldub_user_T0_A0, | |
735 | - gen_op_lduw_user_T0_A0, | |
736 | - gen_op_ldl_user_T0_A0, | |
737 | - X86_64_ONLY(gen_op_ldq_user_T0_A0), | |
738 | -#endif | |
739 | -}; | |
740 | - | |
741 | -static GenOpFunc *gen_op_ld_T1_A0[3 * 4] = { | |
742 | - gen_op_ldub_raw_T1_A0, | |
743 | - gen_op_lduw_raw_T1_A0, | |
744 | - gen_op_ldl_raw_T1_A0, | |
745 | - X86_64_ONLY(gen_op_ldq_raw_T1_A0), | |
746 | - | |
747 | -#ifndef CONFIG_USER_ONLY | |
748 | - gen_op_ldub_kernel_T1_A0, | |
749 | - gen_op_lduw_kernel_T1_A0, | |
750 | - gen_op_ldl_kernel_T1_A0, | |
751 | - X86_64_ONLY(gen_op_ldq_kernel_T1_A0), | |
752 | - | |
753 | - gen_op_ldub_user_T1_A0, | |
754 | - gen_op_lduw_user_T1_A0, | |
755 | - gen_op_ldl_user_T1_A0, | |
756 | - X86_64_ONLY(gen_op_ldq_user_T1_A0), | |
757 | -#endif | |
758 | -}; | |
759 | - | |
760 | -static GenOpFunc *gen_op_st_T0_A0[3 * 4] = { | |
761 | - gen_op_stb_raw_T0_A0, | |
762 | - gen_op_stw_raw_T0_A0, | |
763 | - gen_op_stl_raw_T0_A0, | |
764 | - X86_64_ONLY(gen_op_stq_raw_T0_A0), | |
782 | +static inline void gen_op_ld_T0_A0(int idx) | |
783 | +{ | |
784 | + int mem_index = (idx >> 2) - 1; | |
785 | + switch(idx & 3) { | |
786 | + case 0: | |
787 | + tcg_gen_qemu_ld8u(cpu_T[0], cpu_A0, mem_index); | |
788 | + break; | |
789 | + case 1: | |
790 | + tcg_gen_qemu_ld16u(cpu_T[0], cpu_A0, mem_index); | |
791 | + break; | |
792 | + case 2: | |
793 | + tcg_gen_qemu_ld32u(cpu_T[0], cpu_A0, mem_index); | |
794 | + break; | |
795 | + default: | |
796 | + case 3: | |
797 | + tcg_gen_qemu_ld64(cpu_T[0], cpu_A0, mem_index); | |
798 | + break; | |
799 | + } | |
800 | +} | |
765 | 801 | |
766 | -#ifndef CONFIG_USER_ONLY | |
767 | - gen_op_stb_kernel_T0_A0, | |
768 | - gen_op_stw_kernel_T0_A0, | |
769 | - gen_op_stl_kernel_T0_A0, | |
770 | - X86_64_ONLY(gen_op_stq_kernel_T0_A0), | |
771 | - | |
772 | - gen_op_stb_user_T0_A0, | |
773 | - gen_op_stw_user_T0_A0, | |
774 | - gen_op_stl_user_T0_A0, | |
775 | - X86_64_ONLY(gen_op_stq_user_T0_A0), | |
776 | -#endif | |
777 | -}; | |
802 | +static inline void gen_op_ldu_T0_A0(int idx) | |
803 | +{ | |
804 | + gen_op_ld_T0_A0(idx); | |
805 | +} | |
778 | 806 | |
779 | -static GenOpFunc *gen_op_st_T1_A0[3 * 4] = { | |
780 | - NULL, | |
781 | - gen_op_stw_raw_T1_A0, | |
782 | - gen_op_stl_raw_T1_A0, | |
783 | - X86_64_ONLY(gen_op_stq_raw_T1_A0), | |
807 | +static inline void gen_op_ld_T1_A0(int idx) | |
808 | +{ | |
809 | + int mem_index = (idx >> 2) - 1; | |
810 | + switch(idx & 3) { | |
811 | + case 0: | |
812 | + tcg_gen_qemu_ld8u(cpu_T[1], cpu_A0, mem_index); | |
813 | + break; | |
814 | + case 1: | |
815 | + tcg_gen_qemu_ld16u(cpu_T[1], cpu_A0, mem_index); | |
816 | + break; | |
817 | + case 2: | |
818 | + tcg_gen_qemu_ld32u(cpu_T[1], cpu_A0, mem_index); | |
819 | + break; | |
820 | + default: | |
821 | + case 3: | |
822 | + tcg_gen_qemu_ld64(cpu_T[1], cpu_A0, mem_index); | |
823 | + break; | |
824 | + } | |
825 | +} | |
784 | 826 | |
785 | -#ifndef CONFIG_USER_ONLY | |
786 | - NULL, | |
787 | - gen_op_stw_kernel_T1_A0, | |
788 | - gen_op_stl_kernel_T1_A0, | |
789 | - X86_64_ONLY(gen_op_stq_kernel_T1_A0), | |
827 | +static inline void gen_op_st_T0_A0(int idx) | |
828 | +{ | |
829 | + int mem_index = (idx >> 2) - 1; | |
830 | + switch(idx & 3) { | |
831 | + case 0: | |
832 | + tcg_gen_qemu_st8(cpu_T[0], cpu_A0, mem_index); | |
833 | + break; | |
834 | + case 1: | |
835 | + tcg_gen_qemu_st16(cpu_T[0], cpu_A0, mem_index); | |
836 | + break; | |
837 | + case 2: | |
838 | + tcg_gen_qemu_st32(cpu_T[0], cpu_A0, mem_index); | |
839 | + break; | |
840 | + default: | |
841 | + case 3: | |
842 | + tcg_gen_qemu_st64(cpu_T[0], cpu_A0, mem_index); | |
843 | + break; | |
844 | + } | |
845 | +} | |
790 | 846 | |
791 | - NULL, | |
792 | - gen_op_stw_user_T1_A0, | |
793 | - gen_op_stl_user_T1_A0, | |
794 | - X86_64_ONLY(gen_op_stq_user_T1_A0), | |
795 | -#endif | |
796 | -}; | |
847 | +static inline void gen_op_st_T1_A0(int idx) | |
848 | +{ | |
849 | + int mem_index = (idx >> 2) - 1; | |
850 | + switch(idx & 3) { | |
851 | + case 0: | |
852 | + tcg_gen_qemu_st8(cpu_T[1], cpu_A0, mem_index); | |
853 | + break; | |
854 | + case 1: | |
855 | + tcg_gen_qemu_st16(cpu_T[1], cpu_A0, mem_index); | |
856 | + break; | |
857 | + case 2: | |
858 | + tcg_gen_qemu_st32(cpu_T[1], cpu_A0, mem_index); | |
859 | + break; | |
860 | + default: | |
861 | + case 3: | |
862 | + tcg_gen_qemu_st64(cpu_T[1], cpu_A0, mem_index); | |
863 | + break; | |
864 | + } | |
865 | +} | |
797 | 866 | |
798 | 867 | static inline void gen_jmp_im(target_ulong pc) |
799 | 868 | { |
800 | -#ifdef TARGET_X86_64 | |
801 | - if (pc == (uint32_t)pc) { | |
802 | - gen_op_movl_eip_im(pc); | |
803 | - } else if (pc == (int32_t)pc) { | |
804 | - gen_op_movq_eip_im(pc); | |
805 | - } else { | |
806 | - gen_op_movq_eip_im64(pc >> 32, pc); | |
807 | - } | |
808 | -#else | |
809 | - gen_op_movl_eip_im(pc); | |
810 | -#endif | |
869 | + tcg_gen_movi_tl(cpu_tmp0, pc); | |
870 | + tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip)); | |
811 | 871 | } |
812 | 872 | |
813 | 873 | static inline void gen_string_movl_A0_ESI(DisasContext *s) |
... | ... | @@ -818,10 +878,10 @@ static inline void gen_string_movl_A0_ESI(DisasContext *s) |
818 | 878 | #ifdef TARGET_X86_64 |
819 | 879 | if (s->aflag == 2) { |
820 | 880 | if (override >= 0) { |
821 | - gen_op_movq_A0_seg(offsetof(CPUX86State,segs[override].base)); | |
822 | - gen_op_addq_A0_reg_sN[0][R_ESI](); | |
881 | + gen_op_movq_A0_seg(override); | |
882 | + gen_op_addq_A0_reg_sN(0, R_ESI); | |
823 | 883 | } else { |
824 | - gen_op_movq_A0_reg[R_ESI](); | |
884 | + gen_op_movq_A0_reg(R_ESI); | |
825 | 885 | } |
826 | 886 | } else |
827 | 887 | #endif |
... | ... | @@ -830,18 +890,18 @@ static inline void gen_string_movl_A0_ESI(DisasContext *s) |
830 | 890 | if (s->addseg && override < 0) |
831 | 891 | override = R_DS; |
832 | 892 | if (override >= 0) { |
833 | - gen_op_movl_A0_seg(offsetof(CPUX86State,segs[override].base)); | |
834 | - gen_op_addl_A0_reg_sN[0][R_ESI](); | |
893 | + gen_op_movl_A0_seg(override); | |
894 | + gen_op_addl_A0_reg_sN(0, R_ESI); | |
835 | 895 | } else { |
836 | - gen_op_movl_A0_reg[R_ESI](); | |
896 | + gen_op_movl_A0_reg(R_ESI); | |
837 | 897 | } |
838 | 898 | } else { |
839 | 899 | /* 16 address, always override */ |
840 | 900 | if (override < 0) |
841 | 901 | override = R_DS; |
842 | - gen_op_movl_A0_reg[R_ESI](); | |
902 | + gen_op_movl_A0_reg(R_ESI); | |
843 | 903 | gen_op_andl_A0_ffff(); |
844 | - gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base)); | |
904 | + gen_op_addl_A0_seg(override); | |
845 | 905 | } |
846 | 906 | } |
847 | 907 | |
... | ... | @@ -849,20 +909,20 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s) |
849 | 909 | { |
850 | 910 | #ifdef TARGET_X86_64 |
851 | 911 | if (s->aflag == 2) { |
852 | - gen_op_movq_A0_reg[R_EDI](); | |
912 | + gen_op_movq_A0_reg(R_EDI); | |
853 | 913 | } else |
854 | 914 | #endif |
855 | 915 | if (s->aflag) { |
856 | 916 | if (s->addseg) { |
857 | - gen_op_movl_A0_seg(offsetof(CPUX86State,segs[R_ES].base)); | |
858 | - gen_op_addl_A0_reg_sN[0][R_EDI](); | |
917 | + gen_op_movl_A0_seg(R_ES); | |
918 | + gen_op_addl_A0_reg_sN(0, R_EDI); | |
859 | 919 | } else { |
860 | - gen_op_movl_A0_reg[R_EDI](); | |
920 | + gen_op_movl_A0_reg(R_EDI); | |
861 | 921 | } |
862 | 922 | } else { |
863 | - gen_op_movl_A0_reg[R_EDI](); | |
923 | + gen_op_movl_A0_reg(R_EDI); | |
864 | 924 | gen_op_andl_A0_ffff(); |
865 | - gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_ES].base)); | |
925 | + gen_op_addl_A0_seg(R_ES); | |
866 | 926 | } |
867 | 927 | } |
868 | 928 | |
... | ... | @@ -958,9 +1018,9 @@ static void gen_check_io(DisasContext *s, int ot, int use_dx, target_ulong cur_e |
958 | 1018 | static inline void gen_movs(DisasContext *s, int ot) |
959 | 1019 | { |
960 | 1020 | gen_string_movl_A0_ESI(s); |
961 | - gen_op_ld_T0_A0[ot + s->mem_index](); | |
1021 | + gen_op_ld_T0_A0(ot + s->mem_index); | |
962 | 1022 | gen_string_movl_A0_EDI(s); |
963 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
1023 | + gen_op_st_T0_A0(ot + s->mem_index); | |
964 | 1024 | gen_op_movl_T0_Dshift[ot](); |
965 | 1025 | #ifdef TARGET_X86_64 |
966 | 1026 | if (s->aflag == 2) { |
... | ... | @@ -1002,9 +1062,9 @@ static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) |
1002 | 1062 | |
1003 | 1063 | static inline void gen_stos(DisasContext *s, int ot) |
1004 | 1064 | { |
1005 | - gen_op_mov_TN_reg[OT_LONG][0][R_EAX](); | |
1065 | + gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); | |
1006 | 1066 | gen_string_movl_A0_EDI(s); |
1007 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
1067 | + gen_op_st_T0_A0(ot + s->mem_index); | |
1008 | 1068 | gen_op_movl_T0_Dshift[ot](); |
1009 | 1069 | #ifdef TARGET_X86_64 |
1010 | 1070 | if (s->aflag == 2) { |
... | ... | @@ -1021,8 +1081,8 @@ static inline void gen_stos(DisasContext *s, int ot) |
1021 | 1081 | static inline void gen_lods(DisasContext *s, int ot) |
1022 | 1082 | { |
1023 | 1083 | gen_string_movl_A0_ESI(s); |
1024 | - gen_op_ld_T0_A0[ot + s->mem_index](); | |
1025 | - gen_op_mov_reg_T0[ot][R_EAX](); | |
1084 | + gen_op_ld_T0_A0(ot + s->mem_index); | |
1085 | + gen_op_mov_reg_T0(ot, R_EAX); | |
1026 | 1086 | gen_op_movl_T0_Dshift[ot](); |
1027 | 1087 | #ifdef TARGET_X86_64 |
1028 | 1088 | if (s->aflag == 2) { |
... | ... | @@ -1038,9 +1098,9 @@ static inline void gen_lods(DisasContext *s, int ot) |
1038 | 1098 | |
1039 | 1099 | static inline void gen_scas(DisasContext *s, int ot) |
1040 | 1100 | { |
1041 | - gen_op_mov_TN_reg[OT_LONG][0][R_EAX](); | |
1101 | + gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); | |
1042 | 1102 | gen_string_movl_A0_EDI(s); |
1043 | - gen_op_ld_T1_A0[ot + s->mem_index](); | |
1103 | + gen_op_ld_T1_A0(ot + s->mem_index); | |
1044 | 1104 | gen_op_cmpl_T0_T1_cc(); |
1045 | 1105 | gen_op_movl_T0_Dshift[ot](); |
1046 | 1106 | #ifdef TARGET_X86_64 |
... | ... | @@ -1058,9 +1118,9 @@ static inline void gen_scas(DisasContext *s, int ot) |
1058 | 1118 | static inline void gen_cmps(DisasContext *s, int ot) |
1059 | 1119 | { |
1060 | 1120 | gen_string_movl_A0_ESI(s); |
1061 | - gen_op_ld_T0_A0[ot + s->mem_index](); | |
1121 | + gen_op_ld_T0_A0(ot + s->mem_index); | |
1062 | 1122 | gen_string_movl_A0_EDI(s); |
1063 | - gen_op_ld_T1_A0[ot + s->mem_index](); | |
1123 | + gen_op_ld_T1_A0(ot + s->mem_index); | |
1064 | 1124 | gen_op_cmpl_T0_T1_cc(); |
1065 | 1125 | gen_op_movl_T0_Dshift[ot](); |
1066 | 1126 | #ifdef TARGET_X86_64 |
... | ... | @@ -1082,9 +1142,9 @@ static inline void gen_ins(DisasContext *s, int ot) |
1082 | 1142 | { |
1083 | 1143 | gen_string_movl_A0_EDI(s); |
1084 | 1144 | gen_op_movl_T0_0(); |
1085 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
1145 | + gen_op_st_T0_A0(ot + s->mem_index); | |
1086 | 1146 | gen_op_in_DX_T0[ot](); |
1087 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
1147 | + gen_op_st_T0_A0(ot + s->mem_index); | |
1088 | 1148 | gen_op_movl_T0_Dshift[ot](); |
1089 | 1149 | #ifdef TARGET_X86_64 |
1090 | 1150 | if (s->aflag == 2) { |
... | ... | @@ -1101,7 +1161,7 @@ static inline void gen_ins(DisasContext *s, int ot) |
1101 | 1161 | static inline void gen_outs(DisasContext *s, int ot) |
1102 | 1162 | { |
1103 | 1163 | gen_string_movl_A0_ESI(s); |
1104 | - gen_op_ld_T0_A0[ot + s->mem_index](); | |
1164 | + gen_op_ld_T0_A0(ot + s->mem_index); | |
1105 | 1165 | gen_op_out_DX_T0[ot](); |
1106 | 1166 | gen_op_movl_T0_Dshift[ot](); |
1107 | 1167 | #ifdef TARGET_X86_64 |
... | ... | @@ -1320,9 +1380,9 @@ static void gen_op(DisasContext *s1, int op, int ot, int d) |
1320 | 1380 | GenOpFunc *gen_update_cc; |
1321 | 1381 | |
1322 | 1382 | if (d != OR_TMP0) { |
1323 | - gen_op_mov_TN_reg[ot][0][d](); | |
1383 | + gen_op_mov_TN_reg(ot, 0, d); | |
1324 | 1384 | } else { |
1325 | - gen_op_ld_T0_A0[ot + s1->mem_index](); | |
1385 | + gen_op_ld_T0_A0(ot + s1->mem_index); | |
1326 | 1386 | } |
1327 | 1387 | switch(op) { |
1328 | 1388 | case OP_ADCL: |
... | ... | @@ -1331,7 +1391,7 @@ static void gen_op(DisasContext *s1, int op, int ot, int d) |
1331 | 1391 | gen_op_set_cc_op(s1->cc_op); |
1332 | 1392 | if (d != OR_TMP0) { |
1333 | 1393 | gen_op_arithc_T0_T1_cc[ot][op - OP_ADCL](); |
1334 | - gen_op_mov_reg_T0[ot][d](); | |
1394 | + gen_op_mov_reg_T0(ot, d); | |
1335 | 1395 | } else { |
1336 | 1396 | gen_op_arithc_mem_T0_T1_cc[ot + s1->mem_index][op - OP_ADCL](); |
1337 | 1397 | } |
... | ... | @@ -1343,15 +1403,23 @@ static void gen_op(DisasContext *s1, int op, int ot, int d) |
1343 | 1403 | gen_update_cc = gen_op_update2_cc; |
1344 | 1404 | break; |
1345 | 1405 | case OP_SUBL: |
1346 | - gen_op_subl_T0_T1(); | |
1406 | + tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]); | |
1347 | 1407 | s1->cc_op = CC_OP_SUBB + ot; |
1348 | 1408 | gen_update_cc = gen_op_update2_cc; |
1349 | 1409 | break; |
1350 | 1410 | default: |
1351 | 1411 | case OP_ANDL: |
1412 | + tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]); | |
1413 | + s1->cc_op = CC_OP_LOGICB + ot; | |
1414 | + gen_update_cc = gen_op_update1_cc; | |
1415 | + break; | |
1352 | 1416 | case OP_ORL: |
1417 | + tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]); | |
1418 | + s1->cc_op = CC_OP_LOGICB + ot; | |
1419 | + gen_update_cc = gen_op_update1_cc; | |
1420 | + break; | |
1353 | 1421 | case OP_XORL: |
1354 | - gen_op_arith_T0_T1_cc[op](); | |
1422 | + tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]); | |
1355 | 1423 | s1->cc_op = CC_OP_LOGICB + ot; |
1356 | 1424 | gen_update_cc = gen_op_update1_cc; |
1357 | 1425 | break; |
... | ... | @@ -1363,9 +1431,9 @@ static void gen_op(DisasContext *s1, int op, int ot, int d) |
1363 | 1431 | } |
1364 | 1432 | if (op != OP_CMPL) { |
1365 | 1433 | if (d != OR_TMP0) |
1366 | - gen_op_mov_reg_T0[ot][d](); | |
1434 | + gen_op_mov_reg_T0(ot, d); | |
1367 | 1435 | else |
1368 | - gen_op_st_T0_A0[ot + s1->mem_index](); | |
1436 | + gen_op_st_T0_A0(ot + s1->mem_index); | |
1369 | 1437 | } |
1370 | 1438 | /* the flags update must happen after the memory write (precise |
1371 | 1439 | exception support) */ |
... | ... | @@ -1378,9 +1446,9 @@ static void gen_op(DisasContext *s1, int op, int ot, int d) |
1378 | 1446 | static void gen_inc(DisasContext *s1, int ot, int d, int c) |
1379 | 1447 | { |
1380 | 1448 | if (d != OR_TMP0) |
1381 | - gen_op_mov_TN_reg[ot][0][d](); | |
1449 | + gen_op_mov_TN_reg(ot, 0, d); | |
1382 | 1450 | else |
1383 | - gen_op_ld_T0_A0[ot + s1->mem_index](); | |
1451 | + gen_op_ld_T0_A0(ot + s1->mem_index); | |
1384 | 1452 | if (s1->cc_op != CC_OP_DYNAMIC) |
1385 | 1453 | gen_op_set_cc_op(s1->cc_op); |
1386 | 1454 | if (c > 0) { |
... | ... | @@ -1391,20 +1459,20 @@ static void gen_inc(DisasContext *s1, int ot, int d, int c) |
1391 | 1459 | s1->cc_op = CC_OP_DECB + ot; |
1392 | 1460 | } |
1393 | 1461 | if (d != OR_TMP0) |
1394 | - gen_op_mov_reg_T0[ot][d](); | |
1462 | + gen_op_mov_reg_T0(ot, d); | |
1395 | 1463 | else |
1396 | - gen_op_st_T0_A0[ot + s1->mem_index](); | |
1464 | + gen_op_st_T0_A0(ot + s1->mem_index); | |
1397 | 1465 | gen_op_update_inc_cc(); |
1398 | 1466 | } |
1399 | 1467 | |
1400 | 1468 | static void gen_shift(DisasContext *s1, int op, int ot, int d, int s) |
1401 | 1469 | { |
1402 | 1470 | if (d != OR_TMP0) |
1403 | - gen_op_mov_TN_reg[ot][0][d](); | |
1471 | + gen_op_mov_TN_reg(ot, 0, d); | |
1404 | 1472 | else |
1405 | - gen_op_ld_T0_A0[ot + s1->mem_index](); | |
1473 | + gen_op_ld_T0_A0(ot + s1->mem_index); | |
1406 | 1474 | if (s != OR_TMP1) |
1407 | - gen_op_mov_TN_reg[ot][1][s](); | |
1475 | + gen_op_mov_TN_reg(ot, 1, s); | |
1408 | 1476 | /* for zero counts, flags are not updated, so must do it dynamically */ |
1409 | 1477 | if (s1->cc_op != CC_OP_DYNAMIC) |
1410 | 1478 | gen_op_set_cc_op(s1->cc_op); |
... | ... | @@ -1414,7 +1482,7 @@ static void gen_shift(DisasContext *s1, int op, int ot, int d, int s) |
1414 | 1482 | else |
1415 | 1483 | gen_op_shift_mem_T0_T1_cc[ot + s1->mem_index][op](); |
1416 | 1484 | if (d != OR_TMP0) |
1417 | - gen_op_mov_reg_T0[ot][d](); | |
1485 | + gen_op_mov_reg_T0(ot, d); | |
1418 | 1486 | s1->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ |
1419 | 1487 | } |
1420 | 1488 | |
... | ... | @@ -1487,27 +1555,21 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ |
1487 | 1555 | disp += s->popl_esp_hack; |
1488 | 1556 | #ifdef TARGET_X86_64 |
1489 | 1557 | if (s->aflag == 2) { |
1490 | - gen_op_movq_A0_reg[base](); | |
1558 | + gen_op_movq_A0_reg(base); | |
1491 | 1559 | if (disp != 0) { |
1492 | - if ((int32_t)disp == disp) | |
1493 | - gen_op_addq_A0_im(disp); | |
1494 | - else | |
1495 | - gen_op_addq_A0_im64(disp >> 32, disp); | |
1560 | + gen_op_addq_A0_im(disp); | |
1496 | 1561 | } |
1497 | 1562 | } else |
1498 | 1563 | #endif |
1499 | 1564 | { |
1500 | - gen_op_movl_A0_reg[base](); | |
1565 | + gen_op_movl_A0_reg(base); | |
1501 | 1566 | if (disp != 0) |
1502 | 1567 | gen_op_addl_A0_im(disp); |
1503 | 1568 | } |
1504 | 1569 | } else { |
1505 | 1570 | #ifdef TARGET_X86_64 |
1506 | 1571 | if (s->aflag == 2) { |
1507 | - if ((int32_t)disp == disp) | |
1508 | - gen_op_movq_A0_im(disp); | |
1509 | - else | |
1510 | - gen_op_movq_A0_im64(disp >> 32, disp); | |
1572 | + gen_op_movq_A0_im(disp); | |
1511 | 1573 | } else |
1512 | 1574 | #endif |
1513 | 1575 | { |
... | ... | @@ -1518,11 +1580,11 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ |
1518 | 1580 | if (havesib && (index != 4 || scale != 0)) { |
1519 | 1581 | #ifdef TARGET_X86_64 |
1520 | 1582 | if (s->aflag == 2) { |
1521 | - gen_op_addq_A0_reg_sN[scale][index](); | |
1583 | + gen_op_addq_A0_reg_sN(scale, index); | |
1522 | 1584 | } else |
1523 | 1585 | #endif |
1524 | 1586 | { |
1525 | - gen_op_addl_A0_reg_sN[scale][index](); | |
1587 | + gen_op_addl_A0_reg_sN(scale, index); | |
1526 | 1588 | } |
1527 | 1589 | } |
1528 | 1590 | if (must_add_seg) { |
... | ... | @@ -1534,11 +1596,11 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ |
1534 | 1596 | } |
1535 | 1597 | #ifdef TARGET_X86_64 |
1536 | 1598 | if (s->aflag == 2) { |
1537 | - gen_op_addq_A0_seg(offsetof(CPUX86State,segs[override].base)); | |
1599 | + gen_op_addq_A0_seg(override); | |
1538 | 1600 | } else |
1539 | 1601 | #endif |
1540 | 1602 | { |
1541 | - gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base)); | |
1603 | + gen_op_addl_A0_seg(override); | |
1542 | 1604 | } |
1543 | 1605 | } |
1544 | 1606 | } else { |
... | ... | @@ -1565,33 +1627,33 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ |
1565 | 1627 | } |
1566 | 1628 | switch(rm) { |
1567 | 1629 | case 0: |
1568 | - gen_op_movl_A0_reg[R_EBX](); | |
1569 | - gen_op_addl_A0_reg_sN[0][R_ESI](); | |
1630 | + gen_op_movl_A0_reg(R_EBX); | |
1631 | + gen_op_addl_A0_reg_sN(0, R_ESI); | |
1570 | 1632 | break; |
1571 | 1633 | case 1: |
1572 | - gen_op_movl_A0_reg[R_EBX](); | |
1573 | - gen_op_addl_A0_reg_sN[0][R_EDI](); | |
1634 | + gen_op_movl_A0_reg(R_EBX); | |
1635 | + gen_op_addl_A0_reg_sN(0, R_EDI); | |
1574 | 1636 | break; |
1575 | 1637 | case 2: |
1576 | - gen_op_movl_A0_reg[R_EBP](); | |
1577 | - gen_op_addl_A0_reg_sN[0][R_ESI](); | |
1638 | + gen_op_movl_A0_reg(R_EBP); | |
1639 | + gen_op_addl_A0_reg_sN(0, R_ESI); | |
1578 | 1640 | break; |
1579 | 1641 | case 3: |
1580 | - gen_op_movl_A0_reg[R_EBP](); | |
1581 | - gen_op_addl_A0_reg_sN[0][R_EDI](); | |
1642 | + gen_op_movl_A0_reg(R_EBP); | |
1643 | + gen_op_addl_A0_reg_sN(0, R_EDI); | |
1582 | 1644 | break; |
1583 | 1645 | case 4: |
1584 | - gen_op_movl_A0_reg[R_ESI](); | |
1646 | + gen_op_movl_A0_reg(R_ESI); | |
1585 | 1647 | break; |
1586 | 1648 | case 5: |
1587 | - gen_op_movl_A0_reg[R_EDI](); | |
1649 | + gen_op_movl_A0_reg(R_EDI); | |
1588 | 1650 | break; |
1589 | 1651 | case 6: |
1590 | - gen_op_movl_A0_reg[R_EBP](); | |
1652 | + gen_op_movl_A0_reg(R_EBP); | |
1591 | 1653 | break; |
1592 | 1654 | default: |
1593 | 1655 | case 7: |
1594 | - gen_op_movl_A0_reg[R_EBX](); | |
1656 | + gen_op_movl_A0_reg(R_EBX); | |
1595 | 1657 | break; |
1596 | 1658 | } |
1597 | 1659 | if (disp != 0) |
... | ... | @@ -1605,7 +1667,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ |
1605 | 1667 | else |
1606 | 1668 | override = R_DS; |
1607 | 1669 | } |
1608 | - gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base)); | |
1670 | + gen_op_addl_A0_seg(override); | |
1609 | 1671 | } |
1610 | 1672 | } |
1611 | 1673 | |
... | ... | @@ -1680,11 +1742,11 @@ static void gen_add_A0_ds_seg(DisasContext *s) |
1680 | 1742 | if (must_add_seg) { |
1681 | 1743 | #ifdef TARGET_X86_64 |
1682 | 1744 | if (CODE64(s)) { |
1683 | - gen_op_addq_A0_seg(offsetof(CPUX86State,segs[override].base)); | |
1745 | + gen_op_addq_A0_seg(override); | |
1684 | 1746 | } else |
1685 | 1747 | #endif |
1686 | 1748 | { |
1687 | - gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base)); | |
1749 | + gen_op_addl_A0_seg(override); | |
1688 | 1750 | } |
1689 | 1751 | } |
1690 | 1752 | } |
... | ... | @@ -1700,23 +1762,23 @@ static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_s |
1700 | 1762 | if (mod == 3) { |
1701 | 1763 | if (is_store) { |
1702 | 1764 | if (reg != OR_TMP0) |
1703 | - gen_op_mov_TN_reg[ot][0][reg](); | |
1704 | - gen_op_mov_reg_T0[ot][rm](); | |
1765 | + gen_op_mov_TN_reg(ot, 0, reg); | |
1766 | + gen_op_mov_reg_T0(ot, rm); | |
1705 | 1767 | } else { |
1706 | - gen_op_mov_TN_reg[ot][0][rm](); | |
1768 | + gen_op_mov_TN_reg(ot, 0, rm); | |
1707 | 1769 | if (reg != OR_TMP0) |
1708 | - gen_op_mov_reg_T0[ot][reg](); | |
1770 | + gen_op_mov_reg_T0(ot, reg); | |
1709 | 1771 | } |
1710 | 1772 | } else { |
1711 | 1773 | gen_lea_modrm(s, modrm, &opreg, &disp); |
1712 | 1774 | if (is_store) { |
1713 | 1775 | if (reg != OR_TMP0) |
1714 | - gen_op_mov_TN_reg[ot][0][reg](); | |
1715 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
1776 | + gen_op_mov_TN_reg(ot, 0, reg); | |
1777 | + gen_op_st_T0_A0(ot + s->mem_index); | |
1716 | 1778 | } else { |
1717 | - gen_op_ld_T0_A0[ot + s->mem_index](); | |
1779 | + gen_op_ld_T0_A0(ot + s->mem_index); | |
1718 | 1780 | if (reg != OR_TMP0) |
1719 | - gen_op_mov_reg_T0[ot][reg](); | |
1781 | + gen_op_mov_reg_T0(ot, reg); | |
1720 | 1782 | } |
1721 | 1783 | } |
1722 | 1784 | } |
... | ... | @@ -1762,13 +1824,9 @@ static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip) |
1762 | 1824 | if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) || |
1763 | 1825 | (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) { |
1764 | 1826 | /* jump to same page: we can use a direct jump */ |
1765 | - if (tb_num == 0) | |
1766 | - gen_op_goto_tb0(TBPARAM(tb)); | |
1767 | - else | |
1768 | - gen_op_goto_tb1(TBPARAM(tb)); | |
1827 | + tcg_gen_goto_tb(tb_num); | |
1769 | 1828 | gen_jmp_im(eip); |
1770 | - gen_op_movl_T0_im((long)tb + tb_num); | |
1771 | - gen_op_exit_tb(); | |
1829 | + tcg_gen_exit_tb((long)tb + tb_num); | |
1772 | 1830 | } else { |
1773 | 1831 | /* jump to another page: currently not optimized */ |
1774 | 1832 | gen_jmp_im(eip); |
... | ... | @@ -1995,11 +2053,7 @@ static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip) |
1995 | 2053 | } |
1996 | 2054 | } |
1997 | 2055 | |
1998 | -#ifdef TARGET_X86_64 | |
1999 | -#define SVM_movq_T1_im(x) gen_op_movq_T1_im64((x) >> 32, x) | |
2000 | -#else | |
2001 | -#define SVM_movq_T1_im(x) gen_op_movl_T1_im(x) | |
2002 | -#endif | |
2056 | +#define SVM_movq_T1_im(x) gen_movtl_T1_im(x) | |
2003 | 2057 | |
2004 | 2058 | static inline int |
2005 | 2059 | gen_svm_check_io(DisasContext *s, target_ulong pc_start, uint64_t type) |
... | ... | @@ -2091,26 +2145,13 @@ static inline void gen_stack_update(DisasContext *s, int addend) |
2091 | 2145 | { |
2092 | 2146 | #ifdef TARGET_X86_64 |
2093 | 2147 | if (CODE64(s)) { |
2094 | - if (addend == 8) | |
2095 | - gen_op_addq_ESP_8(); | |
2096 | - else | |
2097 | - gen_op_addq_ESP_im(addend); | |
2148 | + gen_op_addq_ESP_im(addend); | |
2098 | 2149 | } else |
2099 | 2150 | #endif |
2100 | 2151 | if (s->ss32) { |
2101 | - if (addend == 2) | |
2102 | - gen_op_addl_ESP_2(); | |
2103 | - else if (addend == 4) | |
2104 | - gen_op_addl_ESP_4(); | |
2105 | - else | |
2106 | - gen_op_addl_ESP_im(addend); | |
2152 | + gen_op_addl_ESP_im(addend); | |
2107 | 2153 | } else { |
2108 | - if (addend == 2) | |
2109 | - gen_op_addw_ESP_2(); | |
2110 | - else if (addend == 4) | |
2111 | - gen_op_addw_ESP_4(); | |
2112 | - else | |
2113 | - gen_op_addw_ESP_im(addend); | |
2154 | + gen_op_addw_ESP_im(addend); | |
2114 | 2155 | } |
2115 | 2156 | } |
2116 | 2157 | |
... | ... | @@ -2119,38 +2160,38 @@ static void gen_push_T0(DisasContext *s) |
2119 | 2160 | { |
2120 | 2161 | #ifdef TARGET_X86_64 |
2121 | 2162 | if (CODE64(s)) { |
2122 | - gen_op_movq_A0_reg[R_ESP](); | |
2163 | + gen_op_movq_A0_reg(R_ESP); | |
2123 | 2164 | if (s->dflag) { |
2124 | - gen_op_subq_A0_8(); | |
2125 | - gen_op_st_T0_A0[OT_QUAD + s->mem_index](); | |
2165 | + gen_op_addq_A0_im(-8); | |
2166 | + gen_op_st_T0_A0(OT_QUAD + s->mem_index); | |
2126 | 2167 | } else { |
2127 | - gen_op_subq_A0_2(); | |
2128 | - gen_op_st_T0_A0[OT_WORD + s->mem_index](); | |
2168 | + gen_op_addq_A0_im(-2); | |
2169 | + gen_op_st_T0_A0(OT_WORD + s->mem_index); | |
2129 | 2170 | } |
2130 | - gen_op_movq_ESP_A0(); | |
2171 | + gen_op_mov_reg_A0(2, R_ESP); | |
2131 | 2172 | } else |
2132 | 2173 | #endif |
2133 | 2174 | { |
2134 | - gen_op_movl_A0_reg[R_ESP](); | |
2175 | + gen_op_movl_A0_reg(R_ESP); | |
2135 | 2176 | if (!s->dflag) |
2136 | - gen_op_subl_A0_2(); | |
2177 | + gen_op_addl_A0_im(-2); | |
2137 | 2178 | else |
2138 | - gen_op_subl_A0_4(); | |
2179 | + gen_op_addl_A0_im(-4); | |
2139 | 2180 | if (s->ss32) { |
2140 | 2181 | if (s->addseg) { |
2141 | 2182 | gen_op_movl_T1_A0(); |
2142 | - gen_op_addl_A0_SS(); | |
2183 | + gen_op_addl_A0_seg(R_SS); | |
2143 | 2184 | } |
2144 | 2185 | } else { |
2145 | 2186 | gen_op_andl_A0_ffff(); |
2146 | 2187 | gen_op_movl_T1_A0(); |
2147 | - gen_op_addl_A0_SS(); | |
2188 | + gen_op_addl_A0_seg(R_SS); | |
2148 | 2189 | } |
2149 | - gen_op_st_T0_A0[s->dflag + 1 + s->mem_index](); | |
2190 | + gen_op_st_T0_A0(s->dflag + 1 + s->mem_index); | |
2150 | 2191 | if (s->ss32 && !s->addseg) |
2151 | - gen_op_movl_ESP_A0(); | |
2192 | + gen_op_mov_reg_A0(1, R_ESP); | |
2152 | 2193 | else |
2153 | - gen_op_mov_reg_T1[s->ss32 + 1][R_ESP](); | |
2194 | + gen_op_mov_reg_T1(s->ss32 + 1, R_ESP); | |
2154 | 2195 | } |
2155 | 2196 | } |
2156 | 2197 | |
... | ... | @@ -2160,35 +2201,35 @@ static void gen_push_T1(DisasContext *s) |
2160 | 2201 | { |
2161 | 2202 | #ifdef TARGET_X86_64 |
2162 | 2203 | if (CODE64(s)) { |
2163 | - gen_op_movq_A0_reg[R_ESP](); | |
2204 | + gen_op_movq_A0_reg(R_ESP); | |
2164 | 2205 | if (s->dflag) { |
2165 | - gen_op_subq_A0_8(); | |
2166 | - gen_op_st_T1_A0[OT_QUAD + s->mem_index](); | |
2206 | + gen_op_addq_A0_im(-8); | |
2207 | + gen_op_st_T1_A0(OT_QUAD + s->mem_index); | |
2167 | 2208 | } else { |
2168 | - gen_op_subq_A0_2(); | |
2169 | - gen_op_st_T0_A0[OT_WORD + s->mem_index](); | |
2209 | + gen_op_addq_A0_im(-2); | |
2210 | + gen_op_st_T0_A0(OT_WORD + s->mem_index); | |
2170 | 2211 | } |
2171 | - gen_op_movq_ESP_A0(); | |
2212 | + gen_op_mov_reg_A0(2, R_ESP); | |
2172 | 2213 | } else |
2173 | 2214 | #endif |
2174 | 2215 | { |
2175 | - gen_op_movl_A0_reg[R_ESP](); | |
2216 | + gen_op_movl_A0_reg(R_ESP); | |
2176 | 2217 | if (!s->dflag) |
2177 | - gen_op_subl_A0_2(); | |
2218 | + gen_op_addl_A0_im(-2); | |
2178 | 2219 | else |
2179 | - gen_op_subl_A0_4(); | |
2220 | + gen_op_addl_A0_im(-4); | |
2180 | 2221 | if (s->ss32) { |
2181 | 2222 | if (s->addseg) { |
2182 | - gen_op_addl_A0_SS(); | |
2223 | + gen_op_addl_A0_seg(R_SS); | |
2183 | 2224 | } |
2184 | 2225 | } else { |
2185 | 2226 | gen_op_andl_A0_ffff(); |
2186 | - gen_op_addl_A0_SS(); | |
2227 | + gen_op_addl_A0_seg(R_SS); | |
2187 | 2228 | } |
2188 | - gen_op_st_T1_A0[s->dflag + 1 + s->mem_index](); | |
2229 | + gen_op_st_T1_A0(s->dflag + 1 + s->mem_index); | |
2189 | 2230 | |
2190 | 2231 | if (s->ss32 && !s->addseg) |
2191 | - gen_op_movl_ESP_A0(); | |
2232 | + gen_op_mov_reg_A0(1, R_ESP); | |
2192 | 2233 | else |
2193 | 2234 | gen_stack_update(s, (-2) << s->dflag); |
2194 | 2235 | } |
... | ... | @@ -2199,20 +2240,20 @@ static void gen_pop_T0(DisasContext *s) |
2199 | 2240 | { |
2200 | 2241 | #ifdef TARGET_X86_64 |
2201 | 2242 | if (CODE64(s)) { |
2202 | - gen_op_movq_A0_reg[R_ESP](); | |
2203 | - gen_op_ld_T0_A0[(s->dflag ? OT_QUAD : OT_WORD) + s->mem_index](); | |
2243 | + gen_op_movq_A0_reg(R_ESP); | |
2244 | + gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index); | |
2204 | 2245 | } else |
2205 | 2246 | #endif |
2206 | 2247 | { |
2207 | - gen_op_movl_A0_reg[R_ESP](); | |
2248 | + gen_op_movl_A0_reg(R_ESP); | |
2208 | 2249 | if (s->ss32) { |
2209 | 2250 | if (s->addseg) |
2210 | - gen_op_addl_A0_SS(); | |
2251 | + gen_op_addl_A0_seg(R_SS); | |
2211 | 2252 | } else { |
2212 | 2253 | gen_op_andl_A0_ffff(); |
2213 | - gen_op_addl_A0_SS(); | |
2254 | + gen_op_addl_A0_seg(R_SS); | |
2214 | 2255 | } |
2215 | - gen_op_ld_T0_A0[s->dflag + 1 + s->mem_index](); | |
2256 | + gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index); | |
2216 | 2257 | } |
2217 | 2258 | } |
2218 | 2259 | |
... | ... | @@ -2230,53 +2271,53 @@ static void gen_pop_update(DisasContext *s) |
2230 | 2271 | |
2231 | 2272 | static void gen_stack_A0(DisasContext *s) |
2232 | 2273 | { |
2233 | - gen_op_movl_A0_ESP(); | |
2274 | + gen_op_movl_A0_reg(R_ESP); | |
2234 | 2275 | if (!s->ss32) |
2235 | 2276 | gen_op_andl_A0_ffff(); |
2236 | 2277 | gen_op_movl_T1_A0(); |
2237 | 2278 | if (s->addseg) |
2238 | - gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base)); | |
2279 | + gen_op_addl_A0_seg(R_SS); | |
2239 | 2280 | } |
2240 | 2281 | |
2241 | 2282 | /* NOTE: wrap around in 16 bit not fully handled */ |
2242 | 2283 | static void gen_pusha(DisasContext *s) |
2243 | 2284 | { |
2244 | 2285 | int i; |
2245 | - gen_op_movl_A0_ESP(); | |
2286 | + gen_op_movl_A0_reg(R_ESP); | |
2246 | 2287 | gen_op_addl_A0_im(-16 << s->dflag); |
2247 | 2288 | if (!s->ss32) |
2248 | 2289 | gen_op_andl_A0_ffff(); |
2249 | 2290 | gen_op_movl_T1_A0(); |
2250 | 2291 | if (s->addseg) |
2251 | - gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base)); | |
2292 | + gen_op_addl_A0_seg(R_SS); | |
2252 | 2293 | for(i = 0;i < 8; i++) { |
2253 | - gen_op_mov_TN_reg[OT_LONG][0][7 - i](); | |
2254 | - gen_op_st_T0_A0[OT_WORD + s->dflag + s->mem_index](); | |
2294 | + gen_op_mov_TN_reg(OT_LONG, 0, 7 - i); | |
2295 | + gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index); | |
2255 | 2296 | gen_op_addl_A0_im(2 << s->dflag); |
2256 | 2297 | } |
2257 | - gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP](); | |
2298 | + gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP); | |
2258 | 2299 | } |
2259 | 2300 | |
2260 | 2301 | /* NOTE: wrap around in 16 bit not fully handled */ |
2261 | 2302 | static void gen_popa(DisasContext *s) |
2262 | 2303 | { |
2263 | 2304 | int i; |
2264 | - gen_op_movl_A0_ESP(); | |
2305 | + gen_op_movl_A0_reg(R_ESP); | |
2265 | 2306 | if (!s->ss32) |
2266 | 2307 | gen_op_andl_A0_ffff(); |
2267 | 2308 | gen_op_movl_T1_A0(); |
2268 | 2309 | gen_op_addl_T1_im(16 << s->dflag); |
2269 | 2310 | if (s->addseg) |
2270 | - gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base)); | |
2311 | + gen_op_addl_A0_seg(R_SS); | |
2271 | 2312 | for(i = 0;i < 8; i++) { |
2272 | 2313 | /* ESP is not reloaded */ |
2273 | 2314 | if (i != 3) { |
2274 | - gen_op_ld_T0_A0[OT_WORD + s->dflag + s->mem_index](); | |
2275 | - gen_op_mov_reg_T0[OT_WORD + s->dflag][7 - i](); | |
2315 | + gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index); | |
2316 | + gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i); | |
2276 | 2317 | } |
2277 | 2318 | gen_op_addl_A0_im(2 << s->dflag); |
2278 | 2319 | } |
2279 | - gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP](); | |
2320 | + gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP); | |
2280 | 2321 | } |
2281 | 2322 | |
2282 | 2323 | static void gen_enter(DisasContext *s, int esp_addend, int level) |
... | ... | @@ -2289,41 +2330,41 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) |
2289 | 2330 | ot = s->dflag ? OT_QUAD : OT_WORD; |
2290 | 2331 | opsize = 1 << ot; |
2291 | 2332 | |
2292 | - gen_op_movl_A0_ESP(); | |
2333 | + gen_op_movl_A0_reg(R_ESP); | |
2293 | 2334 | gen_op_addq_A0_im(-opsize); |
2294 | 2335 | gen_op_movl_T1_A0(); |
2295 | 2336 | |
2296 | 2337 | /* push bp */ |
2297 | - gen_op_mov_TN_reg[OT_LONG][0][R_EBP](); | |
2298 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
2338 | + gen_op_mov_TN_reg(OT_LONG, 0, R_EBP); | |
2339 | + gen_op_st_T0_A0(ot + s->mem_index); | |
2299 | 2340 | if (level) { |
2300 | 2341 | gen_op_enter64_level(level, (ot == OT_QUAD)); |
2301 | 2342 | } |
2302 | - gen_op_mov_reg_T1[ot][R_EBP](); | |
2343 | + gen_op_mov_reg_T1(ot, R_EBP); | |
2303 | 2344 | gen_op_addl_T1_im( -esp_addend + (-opsize * level) ); |
2304 | - gen_op_mov_reg_T1[OT_QUAD][R_ESP](); | |
2345 | + gen_op_mov_reg_T1(OT_QUAD, R_ESP); | |
2305 | 2346 | } else |
2306 | 2347 | #endif |
2307 | 2348 | { |
2308 | 2349 | ot = s->dflag + OT_WORD; |
2309 | 2350 | opsize = 2 << s->dflag; |
2310 | 2351 | |
2311 | - gen_op_movl_A0_ESP(); | |
2352 | + gen_op_movl_A0_reg(R_ESP); | |
2312 | 2353 | gen_op_addl_A0_im(-opsize); |
2313 | 2354 | if (!s->ss32) |
2314 | 2355 | gen_op_andl_A0_ffff(); |
2315 | 2356 | gen_op_movl_T1_A0(); |
2316 | 2357 | if (s->addseg) |
2317 | - gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base)); | |
2358 | + gen_op_addl_A0_seg(R_SS); | |
2318 | 2359 | /* push bp */ |
2319 | - gen_op_mov_TN_reg[OT_LONG][0][R_EBP](); | |
2320 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
2360 | + gen_op_mov_TN_reg(OT_LONG, 0, R_EBP); | |
2361 | + gen_op_st_T0_A0(ot + s->mem_index); | |
2321 | 2362 | if (level) { |
2322 | 2363 | gen_op_enter_level(level, s->dflag); |
2323 | 2364 | } |
2324 | - gen_op_mov_reg_T1[ot][R_EBP](); | |
2365 | + gen_op_mov_reg_T1(ot, R_EBP); | |
2325 | 2366 | gen_op_addl_T1_im( -esp_addend + (-opsize * level) ); |
2326 | - gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP](); | |
2367 | + gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP); | |
2327 | 2368 | } |
2328 | 2369 | } |
2329 | 2370 | |
... | ... | @@ -2371,8 +2412,7 @@ static void gen_eob(DisasContext *s) |
2371 | 2412 | } else if (s->tf) { |
2372 | 2413 | gen_op_single_step(); |
2373 | 2414 | } else { |
2374 | - gen_op_movl_T0_0(); | |
2375 | - gen_op_exit_tb(); | |
2415 | + tcg_gen_exit_tb(0); | |
2376 | 2416 | } |
2377 | 2417 | s->is_jmp = 3; |
2378 | 2418 | } |
... | ... | @@ -2399,42 +2439,6 @@ static void gen_jmp(DisasContext *s, target_ulong eip) |
2399 | 2439 | gen_jmp_tb(s, eip, 0); |
2400 | 2440 | } |
2401 | 2441 | |
2402 | -static void gen_movtl_T0_im(target_ulong val) | |
2403 | -{ | |
2404 | -#ifdef TARGET_X86_64 | |
2405 | - if ((int32_t)val == val) { | |
2406 | - gen_op_movl_T0_im(val); | |
2407 | - } else { | |
2408 | - gen_op_movq_T0_im64(val >> 32, val); | |
2409 | - } | |
2410 | -#else | |
2411 | - gen_op_movl_T0_im(val); | |
2412 | -#endif | |
2413 | -} | |
2414 | - | |
2415 | -static void gen_movtl_T1_im(target_ulong val) | |
2416 | -{ | |
2417 | -#ifdef TARGET_X86_64 | |
2418 | - if ((int32_t)val == val) { | |
2419 | - gen_op_movl_T1_im(val); | |
2420 | - } else { | |
2421 | - gen_op_movq_T1_im64(val >> 32, val); | |
2422 | - } | |
2423 | -#else | |
2424 | - gen_op_movl_T1_im(val); | |
2425 | -#endif | |
2426 | -} | |
2427 | - | |
2428 | -static void gen_add_A0_im(DisasContext *s, int val) | |
2429 | -{ | |
2430 | -#ifdef TARGET_X86_64 | |
2431 | - if (CODE64(s)) | |
2432 | - gen_op_addq_A0_im(val); | |
2433 | - else | |
2434 | -#endif | |
2435 | - gen_op_addl_A0_im(val); | |
2436 | -} | |
2437 | - | |
2438 | 2442 | static GenOpFunc1 *gen_ldq_env_A0[3] = { |
2439 | 2443 | gen_op_ldq_raw_env_A0, |
2440 | 2444 | #ifndef CONFIG_USER_ONLY |
... | ... | @@ -2764,7 +2768,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) |
2764 | 2768 | case 0x210: /* movss xmm, ea */ |
2765 | 2769 | if (mod != 3) { |
2766 | 2770 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
2767 | - gen_op_ld_T0_A0[OT_LONG + s->mem_index](); | |
2771 | + gen_op_ld_T0_A0(OT_LONG + s->mem_index); | |
2768 | 2772 | gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); |
2769 | 2773 | gen_op_movl_T0_0(); |
2770 | 2774 | gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))); |
... | ... | @@ -2921,7 +2925,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) |
2921 | 2925 | if (mod != 3) { |
2922 | 2926 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
2923 | 2927 | gen_op_movl_T0_env(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); |
2924 | - gen_op_st_T0_A0[OT_LONG + s->mem_index](); | |
2928 | + gen_op_st_T0_A0(OT_LONG + s->mem_index); | |
2925 | 2929 | } else { |
2926 | 2930 | rm = (modrm & 7) | REX_B(s); |
2927 | 2931 | gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)), |
... | ... | @@ -2991,12 +2995,12 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) |
2991 | 2995 | case 0x050: /* movmskps */ |
2992 | 2996 | rm = (modrm & 7) | REX_B(s); |
2993 | 2997 | gen_op_movmskps(offsetof(CPUX86State,xmm_regs[rm])); |
2994 | - gen_op_mov_reg_T0[OT_LONG][reg](); | |
2998 | + gen_op_mov_reg_T0(OT_LONG, reg); | |
2995 | 2999 | break; |
2996 | 3000 | case 0x150: /* movmskpd */ |
2997 | 3001 | rm = (modrm & 7) | REX_B(s); |
2998 | 3002 | gen_op_movmskpd(offsetof(CPUX86State,xmm_regs[rm])); |
2999 | - gen_op_mov_reg_T0[OT_LONG][reg](); | |
3003 | + gen_op_mov_reg_T0(OT_LONG, reg); | |
3000 | 3004 | break; |
3001 | 3005 | case 0x02a: /* cvtpi2ps */ |
3002 | 3006 | case 0x12a: /* cvtpi2pd */ |
... | ... | @@ -3066,7 +3070,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) |
3066 | 3070 | if ((b >> 8) & 1) { |
3067 | 3071 | gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_t0.XMM_Q(0))); |
3068 | 3072 | } else { |
3069 | - gen_op_ld_T0_A0[OT_LONG + s->mem_index](); | |
3073 | + gen_op_ld_T0_A0(OT_LONG + s->mem_index); | |
3070 | 3074 | gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(0))); |
3071 | 3075 | } |
3072 | 3076 | op2_offset = offsetof(CPUX86State,xmm_t0); |
... | ... | @@ -3076,7 +3080,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) |
3076 | 3080 | } |
3077 | 3081 | sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 + |
3078 | 3082 | (b & 1) * 4](op2_offset); |
3079 | - gen_op_mov_reg_T0[ot][reg](); | |
3083 | + gen_op_mov_reg_T0(ot, reg); | |
3080 | 3084 | break; |
3081 | 3085 | case 0xc4: /* pinsrw */ |
3082 | 3086 | case 0x1c4: |
... | ... | @@ -3106,7 +3110,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) |
3106 | 3110 | gen_op_pextrw_mmx(offsetof(CPUX86State,fpregs[rm].mmx), val); |
3107 | 3111 | } |
3108 | 3112 | reg = ((modrm >> 3) & 7) | rex_r; |
3109 | - gen_op_mov_reg_T0[OT_LONG][reg](); | |
3113 | + gen_op_mov_reg_T0(OT_LONG, reg); | |
3110 | 3114 | break; |
3111 | 3115 | case 0x1d6: /* movq ea, xmm */ |
3112 | 3116 | if (mod != 3) { |
... | ... | @@ -3144,7 +3148,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) |
3144 | 3148 | gen_op_pmovmskb_mmx(offsetof(CPUX86State,fpregs[rm].mmx)); |
3145 | 3149 | } |
3146 | 3150 | reg = ((modrm >> 3) & 7) | rex_r; |
3147 | - gen_op_mov_reg_T0[OT_LONG][reg](); | |
3151 | + gen_op_mov_reg_T0(OT_LONG, reg); | |
3148 | 3152 | break; |
3149 | 3153 | default: |
3150 | 3154 | goto illegal_op; |
... | ... | @@ -3158,11 +3162,11 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) |
3158 | 3162 | goto illegal_op; |
3159 | 3163 | #ifdef TARGET_X86_64 |
3160 | 3164 | if (s->aflag == 2) { |
3161 | - gen_op_movq_A0_reg[R_EDI](); | |
3165 | + gen_op_movq_A0_reg(R_EDI); | |
3162 | 3166 | } else |
3163 | 3167 | #endif |
3164 | 3168 | { |
3165 | - gen_op_movl_A0_reg[R_EDI](); | |
3169 | + gen_op_movl_A0_reg(R_EDI); | |
3166 | 3170 | if (s->aflag == 0) |
3167 | 3171 | gen_op_andl_A0_ffff(); |
3168 | 3172 | } |
... | ... | @@ -3186,7 +3190,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) |
3186 | 3190 | /* specific case for SSE single instructions */ |
3187 | 3191 | if (b1 == 2) { |
3188 | 3192 | /* 32 bit access */ |
3189 | - gen_op_ld_T0_A0[OT_LONG + s->mem_index](); | |
3193 | + gen_op_ld_T0_A0(OT_LONG + s->mem_index); | |
3190 | 3194 | gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(0))); |
3191 | 3195 | } else { |
3192 | 3196 | /* 64 bit access */ |
... | ... | @@ -3412,13 +3416,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3412 | 3416 | /* xor reg, reg optimisation */ |
3413 | 3417 | gen_op_movl_T0_0(); |
3414 | 3418 | s->cc_op = CC_OP_LOGICB + ot; |
3415 | - gen_op_mov_reg_T0[ot][reg](); | |
3419 | + gen_op_mov_reg_T0(ot, reg); | |
3416 | 3420 | gen_op_update1_cc(); |
3417 | 3421 | break; |
3418 | 3422 | } else { |
3419 | 3423 | opreg = rm; |
3420 | 3424 | } |
3421 | - gen_op_mov_TN_reg[ot][1][reg](); | |
3425 | + gen_op_mov_TN_reg(ot, 1, reg); | |
3422 | 3426 | gen_op(s, op, ot, opreg); |
3423 | 3427 | break; |
3424 | 3428 | case 1: /* OP Gv, Ev */ |
... | ... | @@ -3428,11 +3432,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3428 | 3432 | rm = (modrm & 7) | REX_B(s); |
3429 | 3433 | if (mod != 3) { |
3430 | 3434 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
3431 | - gen_op_ld_T1_A0[ot + s->mem_index](); | |
3435 | + gen_op_ld_T1_A0(ot + s->mem_index); | |
3432 | 3436 | } else if (op == OP_XORL && rm == reg) { |
3433 | 3437 | goto xor_zero; |
3434 | 3438 | } else { |
3435 | - gen_op_mov_TN_reg[ot][1][rm](); | |
3439 | + gen_op_mov_TN_reg(ot, 1, rm); | |
3436 | 3440 | } |
3437 | 3441 | gen_op(s, op, ot, reg); |
3438 | 3442 | break; |
... | ... | @@ -3514,9 +3518,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3514 | 3518 | if (op == 0) |
3515 | 3519 | s->rip_offset = insn_const_size(ot); |
3516 | 3520 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
3517 | - gen_op_ld_T0_A0[ot + s->mem_index](); | |
3521 | + gen_op_ld_T0_A0(ot + s->mem_index); | |
3518 | 3522 | } else { |
3519 | - gen_op_mov_TN_reg[ot][0][rm](); | |
3523 | + gen_op_mov_TN_reg(ot, 0, rm); | |
3520 | 3524 | } |
3521 | 3525 | |
3522 | 3526 | switch(op) { |
... | ... | @@ -3529,17 +3533,17 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3529 | 3533 | case 2: /* not */ |
3530 | 3534 | gen_op_notl_T0(); |
3531 | 3535 | if (mod != 3) { |
3532 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
3536 | + gen_op_st_T0_A0(ot + s->mem_index); | |
3533 | 3537 | } else { |
3534 | - gen_op_mov_reg_T0[ot][rm](); | |
3538 | + gen_op_mov_reg_T0(ot, rm); | |
3535 | 3539 | } |
3536 | 3540 | break; |
3537 | 3541 | case 3: /* neg */ |
3538 | 3542 | gen_op_negl_T0(); |
3539 | 3543 | if (mod != 3) { |
3540 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
3544 | + gen_op_st_T0_A0(ot + s->mem_index); | |
3541 | 3545 | } else { |
3542 | - gen_op_mov_reg_T0[ot][rm](); | |
3546 | + gen_op_mov_reg_T0(ot, rm); | |
3543 | 3547 | } |
3544 | 3548 | gen_op_update_neg_cc(); |
3545 | 3549 | s->cc_op = CC_OP_SUBB + ot; |
... | ... | @@ -3603,7 +3607,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3603 | 3607 | default: |
3604 | 3608 | case OT_LONG: |
3605 | 3609 | gen_jmp_im(pc_start - s->cs_base); |
3606 | - gen_op_divl_EAX_T0(); | |
3610 | +#ifdef MACRO_TEST | |
3611 | + /* XXX: this is just a test */ | |
3612 | + tcg_gen_macro_2(cpu_T[0], cpu_T[0], MACRO_TEST); | |
3613 | +#else | |
3614 | + tcg_gen_helper_0_1(helper_divl_EAX_T0, cpu_T[0]); | |
3615 | +#endif | |
3607 | 3616 | break; |
3608 | 3617 | #ifdef TARGET_X86_64 |
3609 | 3618 | case OT_QUAD: |
... | ... | @@ -3626,7 +3635,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3626 | 3635 | default: |
3627 | 3636 | case OT_LONG: |
3628 | 3637 | gen_jmp_im(pc_start - s->cs_base); |
3629 | - gen_op_idivl_EAX_T0(); | |
3638 | + tcg_gen_helper_0_1(helper_idivl_EAX_T0, cpu_T[0]); | |
3630 | 3639 | break; |
3631 | 3640 | #ifdef TARGET_X86_64 |
3632 | 3641 | case OT_QUAD: |
... | ... | @@ -3671,9 +3680,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3671 | 3680 | if (mod != 3) { |
3672 | 3681 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
3673 | 3682 | if (op >= 2 && op != 3 && op != 5) |
3674 | - gen_op_ld_T0_A0[ot + s->mem_index](); | |
3683 | + gen_op_ld_T0_A0(ot + s->mem_index); | |
3675 | 3684 | } else { |
3676 | - gen_op_mov_TN_reg[ot][0][rm](); | |
3685 | + gen_op_mov_TN_reg(ot, 0, rm); | |
3677 | 3686 | } |
3678 | 3687 | |
3679 | 3688 | switch(op) { |
... | ... | @@ -3702,9 +3711,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3702 | 3711 | gen_eob(s); |
3703 | 3712 | break; |
3704 | 3713 | case 3: /* lcall Ev */ |
3705 | - gen_op_ld_T1_A0[ot + s->mem_index](); | |
3714 | + gen_op_ld_T1_A0(ot + s->mem_index); | |
3706 | 3715 | gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); |
3707 | - gen_op_ldu_T0_A0[OT_WORD + s->mem_index](); | |
3716 | + gen_op_ldu_T0_A0(OT_WORD + s->mem_index); | |
3708 | 3717 | do_lcall: |
3709 | 3718 | if (s->pe && !s->vm86) { |
3710 | 3719 | if (s->cc_op != CC_OP_DYNAMIC) |
... | ... | @@ -3723,9 +3732,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3723 | 3732 | gen_eob(s); |
3724 | 3733 | break; |
3725 | 3734 | case 5: /* ljmp Ev */ |
3726 | - gen_op_ld_T1_A0[ot + s->mem_index](); | |
3735 | + gen_op_ld_T1_A0(ot + s->mem_index); | |
3727 | 3736 | gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); |
3728 | - gen_op_ldu_T0_A0[OT_WORD + s->mem_index](); | |
3737 | + gen_op_ldu_T0_A0(OT_WORD + s->mem_index); | |
3729 | 3738 | do_ljmp: |
3730 | 3739 | if (s->pe && !s->vm86) { |
3731 | 3740 | if (s->cc_op != CC_OP_DYNAMIC) |
... | ... | @@ -3760,7 +3769,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3760 | 3769 | reg = ((modrm >> 3) & 7) | rex_r; |
3761 | 3770 | |
3762 | 3771 | gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); |
3763 | - gen_op_mov_TN_reg[ot][1][reg](); | |
3772 | + gen_op_mov_TN_reg(ot, 1, reg); | |
3764 | 3773 | gen_op_testl_T0_T1_cc(); |
3765 | 3774 | s->cc_op = CC_OP_LOGICB + ot; |
3766 | 3775 | break; |
... | ... | @@ -3773,7 +3782,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3773 | 3782 | ot = dflag + OT_WORD; |
3774 | 3783 | val = insn_get(s, ot); |
3775 | 3784 | |
3776 | - gen_op_mov_TN_reg[ot][0][OR_EAX](); | |
3785 | + gen_op_mov_TN_reg(ot, 0, OR_EAX); | |
3777 | 3786 | gen_op_movl_T1_im(val); |
3778 | 3787 | gen_op_testl_T0_T1_cc(); |
3779 | 3788 | s->cc_op = CC_OP_LOGICB + ot; |
... | ... | @@ -3819,7 +3828,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3819 | 3828 | val = (int8_t)insn_get(s, OT_BYTE); |
3820 | 3829 | gen_op_movl_T1_im(val); |
3821 | 3830 | } else { |
3822 | - gen_op_mov_TN_reg[ot][1][reg](); | |
3831 | + gen_op_mov_TN_reg(ot, 1, reg); | |
3823 | 3832 | } |
3824 | 3833 | |
3825 | 3834 | #ifdef TARGET_X86_64 |
... | ... | @@ -3832,7 +3841,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3832 | 3841 | } else { |
3833 | 3842 | gen_op_imulw_T0_T1(); |
3834 | 3843 | } |
3835 | - gen_op_mov_reg_T0[ot][reg](); | |
3844 | + gen_op_mov_reg_T0(ot, reg); | |
3836 | 3845 | s->cc_op = CC_OP_MULB + ot; |
3837 | 3846 | break; |
3838 | 3847 | case 0x1c0: |
... | ... | @@ -3846,18 +3855,18 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3846 | 3855 | mod = (modrm >> 6) & 3; |
3847 | 3856 | if (mod == 3) { |
3848 | 3857 | rm = (modrm & 7) | REX_B(s); |
3849 | - gen_op_mov_TN_reg[ot][0][reg](); | |
3850 | - gen_op_mov_TN_reg[ot][1][rm](); | |
3858 | + gen_op_mov_TN_reg(ot, 0, reg); | |
3859 | + gen_op_mov_TN_reg(ot, 1, rm); | |
3851 | 3860 | gen_op_addl_T0_T1(); |
3852 | - gen_op_mov_reg_T1[ot][reg](); | |
3853 | - gen_op_mov_reg_T0[ot][rm](); | |
3861 | + gen_op_mov_reg_T1(ot, reg); | |
3862 | + gen_op_mov_reg_T0(ot, rm); | |
3854 | 3863 | } else { |
3855 | 3864 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
3856 | - gen_op_mov_TN_reg[ot][0][reg](); | |
3857 | - gen_op_ld_T1_A0[ot + s->mem_index](); | |
3865 | + gen_op_mov_TN_reg(ot, 0, reg); | |
3866 | + gen_op_ld_T1_A0(ot + s->mem_index); | |
3858 | 3867 | gen_op_addl_T0_T1(); |
3859 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
3860 | - gen_op_mov_reg_T1[ot][reg](); | |
3868 | + gen_op_st_T0_A0(ot + s->mem_index); | |
3869 | + gen_op_mov_reg_T1(ot, reg); | |
3861 | 3870 | } |
3862 | 3871 | gen_op_update2_cc(); |
3863 | 3872 | s->cc_op = CC_OP_ADDB + ot; |
... | ... | @@ -3871,15 +3880,15 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3871 | 3880 | modrm = ldub_code(s->pc++); |
3872 | 3881 | reg = ((modrm >> 3) & 7) | rex_r; |
3873 | 3882 | mod = (modrm >> 6) & 3; |
3874 | - gen_op_mov_TN_reg[ot][1][reg](); | |
3883 | + gen_op_mov_TN_reg(ot, 1, reg); | |
3875 | 3884 | if (mod == 3) { |
3876 | 3885 | rm = (modrm & 7) | REX_B(s); |
3877 | - gen_op_mov_TN_reg[ot][0][rm](); | |
3886 | + gen_op_mov_TN_reg(ot, 0, rm); | |
3878 | 3887 | gen_op_cmpxchg_T0_T1_EAX_cc[ot](); |
3879 | - gen_op_mov_reg_T0[ot][rm](); | |
3888 | + gen_op_mov_reg_T0(ot, rm); | |
3880 | 3889 | } else { |
3881 | 3890 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
3882 | - gen_op_ld_T0_A0[ot + s->mem_index](); | |
3891 | + gen_op_ld_T0_A0(ot + s->mem_index); | |
3883 | 3892 | gen_op_cmpxchg_mem_T0_T1_EAX_cc[ot + s->mem_index](); |
3884 | 3893 | } |
3885 | 3894 | s->cc_op = CC_OP_SUBB + ot; |
... | ... | @@ -3900,7 +3909,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3900 | 3909 | /**************************/ |
3901 | 3910 | /* push/pop */ |
3902 | 3911 | case 0x50 ... 0x57: /* push */ |
3903 | - gen_op_mov_TN_reg[OT_LONG][0][(b & 7) | REX_B(s)](); | |
3912 | + gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s)); | |
3904 | 3913 | gen_push_T0(s); |
3905 | 3914 | break; |
3906 | 3915 | case 0x58 ... 0x5f: /* pop */ |
... | ... | @@ -3912,7 +3921,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3912 | 3921 | gen_pop_T0(s); |
3913 | 3922 | /* NOTE: order is important for pop %sp */ |
3914 | 3923 | gen_pop_update(s); |
3915 | - gen_op_mov_reg_T0[ot][(b & 7) | REX_B(s)](); | |
3924 | + gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s)); | |
3916 | 3925 | break; |
3917 | 3926 | case 0x60: /* pusha */ |
3918 | 3927 | if (CODE64(s)) |
... | ... | @@ -3951,7 +3960,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3951 | 3960 | /* NOTE: order is important for pop %sp */ |
3952 | 3961 | gen_pop_update(s); |
3953 | 3962 | rm = (modrm & 7) | REX_B(s); |
3954 | - gen_op_mov_reg_T0[ot][rm](); | |
3963 | + gen_op_mov_reg_T0(ot, rm); | |
3955 | 3964 | } else { |
3956 | 3965 | /* NOTE: order is important too for MMU exceptions */ |
3957 | 3966 | s->popl_esp_hack = 1 << ot; |
... | ... | @@ -3972,14 +3981,14 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3972 | 3981 | case 0xc9: /* leave */ |
3973 | 3982 | /* XXX: exception not precise (ESP is updated before potential exception) */ |
3974 | 3983 | if (CODE64(s)) { |
3975 | - gen_op_mov_TN_reg[OT_QUAD][0][R_EBP](); | |
3976 | - gen_op_mov_reg_T0[OT_QUAD][R_ESP](); | |
3984 | + gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP); | |
3985 | + gen_op_mov_reg_T0(OT_QUAD, R_ESP); | |
3977 | 3986 | } else if (s->ss32) { |
3978 | - gen_op_mov_TN_reg[OT_LONG][0][R_EBP](); | |
3979 | - gen_op_mov_reg_T0[OT_LONG][R_ESP](); | |
3987 | + gen_op_mov_TN_reg(OT_LONG, 0, R_EBP); | |
3988 | + gen_op_mov_reg_T0(OT_LONG, R_ESP); | |
3980 | 3989 | } else { |
3981 | - gen_op_mov_TN_reg[OT_WORD][0][R_EBP](); | |
3982 | - gen_op_mov_reg_T0[OT_WORD][R_ESP](); | |
3990 | + gen_op_mov_TN_reg(OT_WORD, 0, R_EBP); | |
3991 | + gen_op_mov_reg_T0(OT_WORD, R_ESP); | |
3983 | 3992 | } |
3984 | 3993 | gen_pop_T0(s); |
3985 | 3994 | if (CODE64(s)) { |
... | ... | @@ -3987,7 +3996,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
3987 | 3996 | } else { |
3988 | 3997 | ot = dflag + OT_WORD; |
3989 | 3998 | } |
3990 | - gen_op_mov_reg_T0[ot][R_EBP](); | |
3999 | + gen_op_mov_reg_T0(ot, R_EBP); | |
3991 | 4000 | gen_pop_update(s); |
3992 | 4001 | break; |
3993 | 4002 | case 0x06: /* push es */ |
... | ... | @@ -4066,9 +4075,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4066 | 4075 | val = insn_get(s, ot); |
4067 | 4076 | gen_op_movl_T0_im(val); |
4068 | 4077 | if (mod != 3) |
4069 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
4078 | + gen_op_st_T0_A0(ot + s->mem_index); | |
4070 | 4079 | else |
4071 | - gen_op_mov_reg_T0[ot][(modrm & 7) | REX_B(s)](); | |
4080 | + gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s)); | |
4072 | 4081 | break; |
4073 | 4082 | case 0x8a: |
4074 | 4083 | case 0x8b: /* mov Ev, Gv */ |
... | ... | @@ -4080,7 +4089,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4080 | 4089 | reg = ((modrm >> 3) & 7) | rex_r; |
4081 | 4090 | |
4082 | 4091 | gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); |
4083 | - gen_op_mov_reg_T0[ot][reg](); | |
4092 | + gen_op_mov_reg_T0(ot, reg); | |
4084 | 4093 | break; |
4085 | 4094 | case 0x8e: /* mov seg, Gv */ |
4086 | 4095 | modrm = ldub_code(s->pc++); |
... | ... | @@ -4132,7 +4141,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4132 | 4141 | rm = (modrm & 7) | REX_B(s); |
4133 | 4142 | |
4134 | 4143 | if (mod == 3) { |
4135 | - gen_op_mov_TN_reg[ot][0][rm](); | |
4144 | + gen_op_mov_TN_reg(ot, 0, rm); | |
4136 | 4145 | switch(ot | (b & 8)) { |
4137 | 4146 | case OT_BYTE: |
4138 | 4147 | gen_op_movzbl_T0_T0(); |
... | ... | @@ -4148,15 +4157,15 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4148 | 4157 | gen_op_movswl_T0_T0(); |
4149 | 4158 | break; |
4150 | 4159 | } |
4151 | - gen_op_mov_reg_T0[d_ot][reg](); | |
4160 | + gen_op_mov_reg_T0(d_ot, reg); | |
4152 | 4161 | } else { |
4153 | 4162 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
4154 | 4163 | if (b & 8) { |
4155 | - gen_op_lds_T0_A0[ot + s->mem_index](); | |
4164 | + gen_op_lds_T0_A0(ot + s->mem_index); | |
4156 | 4165 | } else { |
4157 | - gen_op_ldu_T0_A0[ot + s->mem_index](); | |
4166 | + gen_op_ldu_T0_A0(ot + s->mem_index); | |
4158 | 4167 | } |
4159 | - gen_op_mov_reg_T0[d_ot][reg](); | |
4168 | + gen_op_mov_reg_T0(d_ot, reg); | |
4160 | 4169 | } |
4161 | 4170 | } |
4162 | 4171 | break; |
... | ... | @@ -4174,7 +4183,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4174 | 4183 | s->addseg = 0; |
4175 | 4184 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
4176 | 4185 | s->addseg = val; |
4177 | - gen_op_mov_reg_A0[ot - OT_WORD][reg](); | |
4186 | + gen_op_mov_reg_A0(ot - OT_WORD, reg); | |
4178 | 4187 | break; |
4179 | 4188 | |
4180 | 4189 | case 0xa0: /* mov EAX, Ov */ |
... | ... | @@ -4192,10 +4201,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4192 | 4201 | if (s->aflag == 2) { |
4193 | 4202 | offset_addr = ldq_code(s->pc); |
4194 | 4203 | s->pc += 8; |
4195 | - if (offset_addr == (int32_t)offset_addr) | |
4196 | - gen_op_movq_A0_im(offset_addr); | |
4197 | - else | |
4198 | - gen_op_movq_A0_im64(offset_addr >> 32, offset_addr); | |
4204 | + gen_op_movq_A0_im(offset_addr); | |
4199 | 4205 | } else |
4200 | 4206 | #endif |
4201 | 4207 | { |
... | ... | @@ -4208,35 +4214,35 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4208 | 4214 | } |
4209 | 4215 | gen_add_A0_ds_seg(s); |
4210 | 4216 | if ((b & 2) == 0) { |
4211 | - gen_op_ld_T0_A0[ot + s->mem_index](); | |
4212 | - gen_op_mov_reg_T0[ot][R_EAX](); | |
4217 | + gen_op_ld_T0_A0(ot + s->mem_index); | |
4218 | + gen_op_mov_reg_T0(ot, R_EAX); | |
4213 | 4219 | } else { |
4214 | - gen_op_mov_TN_reg[ot][0][R_EAX](); | |
4215 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
4220 | + gen_op_mov_TN_reg(ot, 0, R_EAX); | |
4221 | + gen_op_st_T0_A0(ot + s->mem_index); | |
4216 | 4222 | } |
4217 | 4223 | } |
4218 | 4224 | break; |
4219 | 4225 | case 0xd7: /* xlat */ |
4220 | 4226 | #ifdef TARGET_X86_64 |
4221 | 4227 | if (s->aflag == 2) { |
4222 | - gen_op_movq_A0_reg[R_EBX](); | |
4228 | + gen_op_movq_A0_reg(R_EBX); | |
4223 | 4229 | gen_op_addq_A0_AL(); |
4224 | 4230 | } else |
4225 | 4231 | #endif |
4226 | 4232 | { |
4227 | - gen_op_movl_A0_reg[R_EBX](); | |
4233 | + gen_op_movl_A0_reg(R_EBX); | |
4228 | 4234 | gen_op_addl_A0_AL(); |
4229 | 4235 | if (s->aflag == 0) |
4230 | 4236 | gen_op_andl_A0_ffff(); |
4231 | 4237 | } |
4232 | 4238 | gen_add_A0_ds_seg(s); |
4233 | - gen_op_ldu_T0_A0[OT_BYTE + s->mem_index](); | |
4234 | - gen_op_mov_reg_T0[OT_BYTE][R_EAX](); | |
4239 | + gen_op_ldu_T0_A0(OT_BYTE + s->mem_index); | |
4240 | + gen_op_mov_reg_T0(OT_BYTE, R_EAX); | |
4235 | 4241 | break; |
4236 | 4242 | case 0xb0 ... 0xb7: /* mov R, Ib */ |
4237 | 4243 | val = insn_get(s, OT_BYTE); |
4238 | 4244 | gen_op_movl_T0_im(val); |
4239 | - gen_op_mov_reg_T0[OT_BYTE][(b & 7) | REX_B(s)](); | |
4245 | + gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s)); | |
4240 | 4246 | break; |
4241 | 4247 | case 0xb8 ... 0xbf: /* mov R, Iv */ |
4242 | 4248 | #ifdef TARGET_X86_64 |
... | ... | @@ -4247,7 +4253,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4247 | 4253 | s->pc += 8; |
4248 | 4254 | reg = (b & 7) | REX_B(s); |
4249 | 4255 | gen_movtl_T0_im(tmp); |
4250 | - gen_op_mov_reg_T0[OT_QUAD][reg](); | |
4256 | + gen_op_mov_reg_T0(OT_QUAD, reg); | |
4251 | 4257 | } else |
4252 | 4258 | #endif |
4253 | 4259 | { |
... | ... | @@ -4255,7 +4261,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4255 | 4261 | val = insn_get(s, ot); |
4256 | 4262 | reg = (b & 7) | REX_B(s); |
4257 | 4263 | gen_op_movl_T0_im(val); |
4258 | - gen_op_mov_reg_T0[ot][reg](); | |
4264 | + gen_op_mov_reg_T0(ot, reg); | |
4259 | 4265 | } |
4260 | 4266 | break; |
4261 | 4267 | |
... | ... | @@ -4276,21 +4282,21 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4276 | 4282 | if (mod == 3) { |
4277 | 4283 | rm = (modrm & 7) | REX_B(s); |
4278 | 4284 | do_xchg_reg: |
4279 | - gen_op_mov_TN_reg[ot][0][reg](); | |
4280 | - gen_op_mov_TN_reg[ot][1][rm](); | |
4281 | - gen_op_mov_reg_T0[ot][rm](); | |
4282 | - gen_op_mov_reg_T1[ot][reg](); | |
4285 | + gen_op_mov_TN_reg(ot, 0, reg); | |
4286 | + gen_op_mov_TN_reg(ot, 1, rm); | |
4287 | + gen_op_mov_reg_T0(ot, rm); | |
4288 | + gen_op_mov_reg_T1(ot, reg); | |
4283 | 4289 | } else { |
4284 | 4290 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
4285 | - gen_op_mov_TN_reg[ot][0][reg](); | |
4291 | + gen_op_mov_TN_reg(ot, 0, reg); | |
4286 | 4292 | /* for xchg, lock is implicit */ |
4287 | 4293 | if (!(prefixes & PREFIX_LOCK)) |
4288 | 4294 | gen_op_lock(); |
4289 | - gen_op_ld_T1_A0[ot + s->mem_index](); | |
4290 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
4295 | + gen_op_ld_T1_A0(ot + s->mem_index); | |
4296 | + gen_op_st_T0_A0(ot + s->mem_index); | |
4291 | 4297 | if (!(prefixes & PREFIX_LOCK)) |
4292 | 4298 | gen_op_unlock(); |
4293 | - gen_op_mov_reg_T1[ot][reg](); | |
4299 | + gen_op_mov_reg_T1(ot, reg); | |
4294 | 4300 | } |
4295 | 4301 | break; |
4296 | 4302 | case 0xc4: /* les Gv */ |
... | ... | @@ -4319,13 +4325,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4319 | 4325 | if (mod == 3) |
4320 | 4326 | goto illegal_op; |
4321 | 4327 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
4322 | - gen_op_ld_T1_A0[ot + s->mem_index](); | |
4328 | + gen_op_ld_T1_A0(ot + s->mem_index); | |
4323 | 4329 | gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); |
4324 | 4330 | /* load the segment first to handle exceptions properly */ |
4325 | - gen_op_ldu_T0_A0[OT_WORD + s->mem_index](); | |
4331 | + gen_op_ldu_T0_A0(OT_WORD + s->mem_index); | |
4326 | 4332 | gen_movl_seg_T0(s, op, pc_start - s->cs_base); |
4327 | 4333 | /* then put the data */ |
4328 | - gen_op_mov_reg_T1[ot][reg](); | |
4334 | + gen_op_mov_reg_T1(ot, reg); | |
4329 | 4335 | if (s->is_jmp) { |
4330 | 4336 | gen_jmp_im(s->pc - s->cs_base); |
4331 | 4337 | gen_eob(s); |
... | ... | @@ -4405,11 +4411,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4405 | 4411 | |
4406 | 4412 | if (mod != 3) { |
4407 | 4413 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
4408 | - gen_op_ld_T0_A0[ot + s->mem_index](); | |
4414 | + gen_op_ld_T0_A0(ot + s->mem_index); | |
4409 | 4415 | } else { |
4410 | - gen_op_mov_TN_reg[ot][0][rm](); | |
4416 | + gen_op_mov_TN_reg(ot, 0, rm); | |
4411 | 4417 | } |
4412 | - gen_op_mov_TN_reg[ot][1][reg](); | |
4418 | + gen_op_mov_TN_reg(ot, 1, reg); | |
4413 | 4419 | |
4414 | 4420 | if (shift) { |
4415 | 4421 | val = ldub_code(s->pc++); |
... | ... | @@ -4437,7 +4443,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4437 | 4443 | s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ |
4438 | 4444 | } |
4439 | 4445 | if (mod == 3) { |
4440 | - gen_op_mov_reg_T0[ot][rm](); | |
4446 | + gen_op_mov_reg_T0(ot, rm); | |
4441 | 4447 | } |
4442 | 4448 | break; |
4443 | 4449 | |
... | ... | @@ -4969,7 +4975,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4969 | 4975 | else |
4970 | 4976 | ot = dflag ? OT_LONG : OT_WORD; |
4971 | 4977 | gen_check_io(s, ot, 1, pc_start - s->cs_base); |
4972 | - gen_op_mov_TN_reg[OT_WORD][0][R_EDX](); | |
4978 | + gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); | |
4973 | 4979 | gen_op_andl_T0_ffff(); |
4974 | 4980 | if (gen_svm_check_io(s, pc_start, |
4975 | 4981 | SVM_IOIO_TYPE_MASK | (1 << (4+ot)) | |
... | ... | @@ -4988,7 +4994,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
4988 | 4994 | else |
4989 | 4995 | ot = dflag ? OT_LONG : OT_WORD; |
4990 | 4996 | gen_check_io(s, ot, 1, pc_start - s->cs_base); |
4991 | - gen_op_mov_TN_reg[OT_WORD][0][R_EDX](); | |
4997 | + gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); | |
4992 | 4998 | gen_op_andl_T0_ffff(); |
4993 | 4999 | if (gen_svm_check_io(s, pc_start, |
4994 | 5000 | (1 << (4+ot)) | svm_is_rep(prefixes) | |
... | ... | @@ -5018,7 +5024,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5018 | 5024 | (1 << (4+ot)))) |
5019 | 5025 | break; |
5020 | 5026 | gen_op_in[ot](); |
5021 | - gen_op_mov_reg_T1[ot][R_EAX](); | |
5027 | + gen_op_mov_reg_T1(ot, R_EAX); | |
5022 | 5028 | break; |
5023 | 5029 | case 0xe6: |
5024 | 5030 | case 0xe7: |
... | ... | @@ -5032,7 +5038,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5032 | 5038 | if (gen_svm_check_io(s, pc_start, svm_is_rep(prefixes) | |
5033 | 5039 | (1 << (4+ot)))) |
5034 | 5040 | break; |
5035 | - gen_op_mov_TN_reg[ot][1][R_EAX](); | |
5041 | + gen_op_mov_TN_reg(ot, 1, R_EAX); | |
5036 | 5042 | gen_op_out[ot](); |
5037 | 5043 | break; |
5038 | 5044 | case 0xec: |
... | ... | @@ -5041,7 +5047,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5041 | 5047 | ot = OT_BYTE; |
5042 | 5048 | else |
5043 | 5049 | ot = dflag ? OT_LONG : OT_WORD; |
5044 | - gen_op_mov_TN_reg[OT_WORD][0][R_EDX](); | |
5050 | + gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); | |
5045 | 5051 | gen_op_andl_T0_ffff(); |
5046 | 5052 | gen_check_io(s, ot, 0, pc_start - s->cs_base); |
5047 | 5053 | if (gen_svm_check_io(s, pc_start, |
... | ... | @@ -5049,7 +5055,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5049 | 5055 | (1 << (4+ot)))) |
5050 | 5056 | break; |
5051 | 5057 | gen_op_in[ot](); |
5052 | - gen_op_mov_reg_T1[ot][R_EAX](); | |
5058 | + gen_op_mov_reg_T1(ot, R_EAX); | |
5053 | 5059 | break; |
5054 | 5060 | case 0xee: |
5055 | 5061 | case 0xef: |
... | ... | @@ -5057,13 +5063,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5057 | 5063 | ot = OT_BYTE; |
5058 | 5064 | else |
5059 | 5065 | ot = dflag ? OT_LONG : OT_WORD; |
5060 | - gen_op_mov_TN_reg[OT_WORD][0][R_EDX](); | |
5066 | + gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); | |
5061 | 5067 | gen_op_andl_T0_ffff(); |
5062 | 5068 | gen_check_io(s, ot, 0, pc_start - s->cs_base); |
5063 | 5069 | if (gen_svm_check_io(s, pc_start, |
5064 | 5070 | svm_is_rep(prefixes) | (1 << (4+ot)))) |
5065 | 5071 | break; |
5066 | - gen_op_mov_TN_reg[ot][1][R_EAX](); | |
5072 | + gen_op_mov_TN_reg(ot, 1, R_EAX); | |
5067 | 5073 | gen_op_out[ot](); |
5068 | 5074 | break; |
5069 | 5075 | |
... | ... | @@ -5101,7 +5107,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5101 | 5107 | } else { |
5102 | 5108 | gen_stack_A0(s); |
5103 | 5109 | /* pop offset */ |
5104 | - gen_op_ld_T0_A0[1 + s->dflag + s->mem_index](); | |
5110 | + gen_op_ld_T0_A0(1 + s->dflag + s->mem_index); | |
5105 | 5111 | if (s->dflag == 0) |
5106 | 5112 | gen_op_andl_T0_ffff(); |
5107 | 5113 | /* NOTE: keeping EIP updated is not a problem in case of |
... | ... | @@ -5109,7 +5115,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5109 | 5115 | gen_op_jmp_T0(); |
5110 | 5116 | /* pop selector */ |
5111 | 5117 | gen_op_addl_A0_im(2 << s->dflag); |
5112 | - gen_op_ld_T0_A0[1 + s->dflag + s->mem_index](); | |
5118 | + gen_op_ld_T0_A0(1 + s->dflag + s->mem_index); | |
5113 | 5119 | gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS])); |
5114 | 5120 | /* add stack offset */ |
5115 | 5121 | gen_stack_update(s, val + (4 << s->dflag)); |
... | ... | @@ -5232,10 +5238,10 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5232 | 5238 | gen_setcc(s, b); |
5233 | 5239 | if (mod != 3) { |
5234 | 5240 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
5235 | - gen_op_ld_T1_A0[ot + s->mem_index](); | |
5241 | + gen_op_ld_T1_A0(ot + s->mem_index); | |
5236 | 5242 | } else { |
5237 | 5243 | rm = (modrm & 7) | REX_B(s); |
5238 | - gen_op_mov_TN_reg[ot][1][rm](); | |
5244 | + gen_op_mov_TN_reg(ot, 1, rm); | |
5239 | 5245 | } |
5240 | 5246 | gen_op_cmov_reg_T1_T0[ot - OT_WORD][reg](); |
5241 | 5247 | break; |
... | ... | @@ -5292,7 +5298,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5292 | 5298 | case 0x9e: /* sahf */ |
5293 | 5299 | if (CODE64(s)) |
5294 | 5300 | goto illegal_op; |
5295 | - gen_op_mov_TN_reg[OT_BYTE][0][R_AH](); | |
5301 | + gen_op_mov_TN_reg(OT_BYTE, 0, R_AH); | |
5296 | 5302 | if (s->cc_op != CC_OP_DYNAMIC) |
5297 | 5303 | gen_op_set_cc_op(s->cc_op); |
5298 | 5304 | gen_op_movb_eflags_T0(); |
... | ... | @@ -5304,7 +5310,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5304 | 5310 | if (s->cc_op != CC_OP_DYNAMIC) |
5305 | 5311 | gen_op_set_cc_op(s->cc_op); |
5306 | 5312 | gen_op_movl_T0_eflags(); |
5307 | - gen_op_mov_reg_T0[OT_BYTE][R_AH](); | |
5313 | + gen_op_mov_reg_T0(OT_BYTE, R_AH); | |
5308 | 5314 | break; |
5309 | 5315 | case 0xf5: /* cmc */ |
5310 | 5316 | if (s->cc_op != CC_OP_DYNAMIC) |
... | ... | @@ -5342,9 +5348,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5342 | 5348 | if (mod != 3) { |
5343 | 5349 | s->rip_offset = 1; |
5344 | 5350 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
5345 | - gen_op_ld_T0_A0[ot + s->mem_index](); | |
5351 | + gen_op_ld_T0_A0(ot + s->mem_index); | |
5346 | 5352 | } else { |
5347 | - gen_op_mov_TN_reg[ot][0][rm](); | |
5353 | + gen_op_mov_TN_reg(ot, 0, rm); | |
5348 | 5354 | } |
5349 | 5355 | /* load shift */ |
5350 | 5356 | val = ldub_code(s->pc++); |
... | ... | @@ -5356,9 +5362,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5356 | 5362 | s->cc_op = CC_OP_SARB + ot; |
5357 | 5363 | if (op != 0) { |
5358 | 5364 | if (mod != 3) |
5359 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
5365 | + gen_op_st_T0_A0(ot + s->mem_index); | |
5360 | 5366 | else |
5361 | - gen_op_mov_reg_T0[ot][rm](); | |
5367 | + gen_op_mov_reg_T0(ot, rm); | |
5362 | 5368 | gen_op_update_bt_cc(); |
5363 | 5369 | } |
5364 | 5370 | break; |
... | ... | @@ -5379,22 +5385,22 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5379 | 5385 | reg = ((modrm >> 3) & 7) | rex_r; |
5380 | 5386 | mod = (modrm >> 6) & 3; |
5381 | 5387 | rm = (modrm & 7) | REX_B(s); |
5382 | - gen_op_mov_TN_reg[OT_LONG][1][reg](); | |
5388 | + gen_op_mov_TN_reg(OT_LONG, 1, reg); | |
5383 | 5389 | if (mod != 3) { |
5384 | 5390 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
5385 | 5391 | /* specific case: we need to add a displacement */ |
5386 | 5392 | gen_op_add_bit_A0_T1[ot - OT_WORD](); |
5387 | - gen_op_ld_T0_A0[ot + s->mem_index](); | |
5393 | + gen_op_ld_T0_A0(ot + s->mem_index); | |
5388 | 5394 | } else { |
5389 | - gen_op_mov_TN_reg[ot][0][rm](); | |
5395 | + gen_op_mov_TN_reg(ot, 0, rm); | |
5390 | 5396 | } |
5391 | 5397 | gen_op_btx_T0_T1_cc[ot - OT_WORD][op](); |
5392 | 5398 | s->cc_op = CC_OP_SARB + ot; |
5393 | 5399 | if (op != 0) { |
5394 | 5400 | if (mod != 3) |
5395 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
5401 | + gen_op_st_T0_A0(ot + s->mem_index); | |
5396 | 5402 | else |
5397 | - gen_op_mov_reg_T0[ot][rm](); | |
5403 | + gen_op_mov_reg_T0(ot, rm); | |
5398 | 5404 | gen_op_update_bt_cc(); |
5399 | 5405 | } |
5400 | 5406 | break; |
... | ... | @@ -5406,9 +5412,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5406 | 5412 | gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); |
5407 | 5413 | /* NOTE: in order to handle the 0 case, we must load the |
5408 | 5414 | result. It could be optimized with a generated jump */ |
5409 | - gen_op_mov_TN_reg[ot][1][reg](); | |
5415 | + gen_op_mov_TN_reg(ot, 1, reg); | |
5410 | 5416 | gen_op_bsx_T0_cc[ot - OT_WORD][b & 1](); |
5411 | - gen_op_mov_reg_T1[ot][reg](); | |
5417 | + gen_op_mov_reg_T1(ot, reg); | |
5412 | 5418 | s->cc_op = CC_OP_LOGICB + ot; |
5413 | 5419 | break; |
5414 | 5420 | /************************/ |
... | ... | @@ -5569,7 +5575,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5569 | 5575 | mod = (modrm >> 6) & 3; |
5570 | 5576 | if (mod == 3) |
5571 | 5577 | goto illegal_op; |
5572 | - gen_op_mov_TN_reg[ot][0][reg](); | |
5578 | + gen_op_mov_TN_reg(ot, 0, reg); | |
5573 | 5579 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
5574 | 5580 | gen_jmp_im(pc_start - s->cs_base); |
5575 | 5581 | if (ot == OT_WORD) |
... | ... | @@ -5581,16 +5587,27 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5581 | 5587 | reg = (b & 7) | REX_B(s); |
5582 | 5588 | #ifdef TARGET_X86_64 |
5583 | 5589 | if (dflag == 2) { |
5584 | - gen_op_mov_TN_reg[OT_QUAD][0][reg](); | |
5585 | - gen_op_bswapq_T0(); | |
5586 | - gen_op_mov_reg_T0[OT_QUAD][reg](); | |
5590 | + gen_op_mov_TN_reg(OT_QUAD, 0, reg); | |
5591 | + tcg_gen_bswap_i64(cpu_T[0], cpu_T[0]); | |
5592 | + gen_op_mov_reg_T0(OT_QUAD, reg); | |
5587 | 5593 | } else |
5588 | -#endif | |
5589 | 5594 | { |
5590 | - gen_op_mov_TN_reg[OT_LONG][0][reg](); | |
5591 | - gen_op_bswapl_T0(); | |
5592 | - gen_op_mov_reg_T0[OT_LONG][reg](); | |
5595 | + int tmp0; | |
5596 | + gen_op_mov_TN_reg(OT_LONG, 0, reg); | |
5597 | + | |
5598 | + tmp0 = tcg_temp_new(TCG_TYPE_I32); | |
5599 | + tcg_gen_trunc_i64_i32(tmp0, cpu_T[0]); | |
5600 | + tcg_gen_bswap_i32(tmp0, tmp0); | |
5601 | + tcg_gen_extu_i32_i64(cpu_T[0], tmp0); | |
5602 | + gen_op_mov_reg_T0(OT_LONG, reg); | |
5603 | + } | |
5604 | +#else | |
5605 | + { | |
5606 | + gen_op_mov_TN_reg(OT_LONG, 0, reg); | |
5607 | + tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); | |
5608 | + gen_op_mov_reg_T0(OT_LONG, reg); | |
5593 | 5609 | } |
5610 | +#endif | |
5594 | 5611 | break; |
5595 | 5612 | case 0xd6: /* salc */ |
5596 | 5613 | if (CODE64(s)) |
... | ... | @@ -5821,12 +5838,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5821 | 5838 | break; |
5822 | 5839 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
5823 | 5840 | gen_op_movl_T0_env(offsetof(CPUX86State, gdt.limit)); |
5824 | - gen_op_st_T0_A0[OT_WORD + s->mem_index](); | |
5841 | + gen_op_st_T0_A0(OT_WORD + s->mem_index); | |
5825 | 5842 | gen_add_A0_im(s, 2); |
5826 | 5843 | gen_op_movtl_T0_env(offsetof(CPUX86State, gdt.base)); |
5827 | 5844 | if (!s->dflag) |
5828 | 5845 | gen_op_andl_T0_im(0xffffff); |
5829 | - gen_op_st_T0_A0[CODE64(s) + OT_LONG + s->mem_index](); | |
5846 | + gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index); | |
5830 | 5847 | break; |
5831 | 5848 | case 1: |
5832 | 5849 | if (mod == 3) { |
... | ... | @@ -5840,12 +5857,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5840 | 5857 | gen_jmp_im(pc_start - s->cs_base); |
5841 | 5858 | #ifdef TARGET_X86_64 |
5842 | 5859 | if (s->aflag == 2) { |
5843 | - gen_op_movq_A0_reg[R_EBX](); | |
5860 | + gen_op_movq_A0_reg(R_EBX); | |
5844 | 5861 | gen_op_addq_A0_AL(); |
5845 | 5862 | } else |
5846 | 5863 | #endif |
5847 | 5864 | { |
5848 | - gen_op_movl_A0_reg[R_EBX](); | |
5865 | + gen_op_movl_A0_reg(R_EBX); | |
5849 | 5866 | gen_op_addl_A0_AL(); |
5850 | 5867 | if (s->aflag == 0) |
5851 | 5868 | gen_op_andl_A0_ffff(); |
... | ... | @@ -5875,12 +5892,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5875 | 5892 | break; |
5876 | 5893 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
5877 | 5894 | gen_op_movl_T0_env(offsetof(CPUX86State, idt.limit)); |
5878 | - gen_op_st_T0_A0[OT_WORD + s->mem_index](); | |
5895 | + gen_op_st_T0_A0(OT_WORD + s->mem_index); | |
5879 | 5896 | gen_add_A0_im(s, 2); |
5880 | 5897 | gen_op_movtl_T0_env(offsetof(CPUX86State, idt.base)); |
5881 | 5898 | if (!s->dflag) |
5882 | 5899 | gen_op_andl_T0_im(0xffffff); |
5883 | - gen_op_st_T0_A0[CODE64(s) + OT_LONG + s->mem_index](); | |
5900 | + gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index); | |
5884 | 5901 | } |
5885 | 5902 | break; |
5886 | 5903 | case 2: /* lgdt */ |
... | ... | @@ -5943,9 +5960,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5943 | 5960 | op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE)) |
5944 | 5961 | break; |
5945 | 5962 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
5946 | - gen_op_ld_T1_A0[OT_WORD + s->mem_index](); | |
5963 | + gen_op_ld_T1_A0(OT_WORD + s->mem_index); | |
5947 | 5964 | gen_add_A0_im(s, 2); |
5948 | - gen_op_ld_T0_A0[CODE64(s) + OT_LONG + s->mem_index](); | |
5965 | + gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index); | |
5949 | 5966 | if (!s->dflag) |
5950 | 5967 | gen_op_andl_T0_im(0xffffff); |
5951 | 5968 | if (op == 2) { |
... | ... | @@ -6029,19 +6046,19 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
6029 | 6046 | rm = (modrm & 7) | REX_B(s); |
6030 | 6047 | |
6031 | 6048 | if (mod == 3) { |
6032 | - gen_op_mov_TN_reg[OT_LONG][0][rm](); | |
6049 | + gen_op_mov_TN_reg(OT_LONG, 0, rm); | |
6033 | 6050 | /* sign extend */ |
6034 | 6051 | if (d_ot == OT_QUAD) |
6035 | 6052 | gen_op_movslq_T0_T0(); |
6036 | - gen_op_mov_reg_T0[d_ot][reg](); | |
6053 | + gen_op_mov_reg_T0(d_ot, reg); | |
6037 | 6054 | } else { |
6038 | 6055 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
6039 | 6056 | if (d_ot == OT_QUAD) { |
6040 | - gen_op_lds_T0_A0[OT_LONG + s->mem_index](); | |
6057 | + gen_op_lds_T0_A0(OT_LONG + s->mem_index); | |
6041 | 6058 | } else { |
6042 | - gen_op_ld_T0_A0[OT_LONG + s->mem_index](); | |
6059 | + gen_op_ld_T0_A0(OT_LONG + s->mem_index); | |
6043 | 6060 | } |
6044 | - gen_op_mov_reg_T0[d_ot][reg](); | |
6061 | + gen_op_mov_reg_T0(d_ot, reg); | |
6045 | 6062 | } |
6046 | 6063 | } else |
6047 | 6064 | #endif |
... | ... | @@ -6055,18 +6072,18 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
6055 | 6072 | rm = modrm & 7; |
6056 | 6073 | if (mod != 3) { |
6057 | 6074 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
6058 | - gen_op_ld_T0_A0[ot + s->mem_index](); | |
6075 | + gen_op_ld_T0_A0(ot + s->mem_index); | |
6059 | 6076 | } else { |
6060 | - gen_op_mov_TN_reg[ot][0][rm](); | |
6077 | + gen_op_mov_TN_reg(ot, 0, rm); | |
6061 | 6078 | } |
6062 | 6079 | if (s->cc_op != CC_OP_DYNAMIC) |
6063 | 6080 | gen_op_set_cc_op(s->cc_op); |
6064 | 6081 | gen_op_arpl(); |
6065 | 6082 | s->cc_op = CC_OP_EFLAGS; |
6066 | 6083 | if (mod != 3) { |
6067 | - gen_op_st_T0_A0[ot + s->mem_index](); | |
6084 | + gen_op_st_T0_A0(ot + s->mem_index); | |
6068 | 6085 | } else { |
6069 | - gen_op_mov_reg_T0[ot][rm](); | |
6086 | + gen_op_mov_reg_T0(ot, rm); | |
6070 | 6087 | } |
6071 | 6088 | gen_op_arpl_update(); |
6072 | 6089 | } |
... | ... | @@ -6079,7 +6096,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
6079 | 6096 | modrm = ldub_code(s->pc++); |
6080 | 6097 | reg = ((modrm >> 3) & 7) | rex_r; |
6081 | 6098 | gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); |
6082 | - gen_op_mov_TN_reg[ot][1][reg](); | |
6099 | + gen_op_mov_TN_reg(ot, 1, reg); | |
6083 | 6100 | if (s->cc_op != CC_OP_DYNAMIC) |
6084 | 6101 | gen_op_set_cc_op(s->cc_op); |
6085 | 6102 | if (b == 0x102) |
... | ... | @@ -6087,7 +6104,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
6087 | 6104 | else |
6088 | 6105 | gen_op_lsl(); |
6089 | 6106 | s->cc_op = CC_OP_EFLAGS; |
6090 | - gen_op_mov_reg_T1[ot][reg](); | |
6107 | + gen_op_mov_reg_T1(ot, reg); | |
6091 | 6108 | break; |
6092 | 6109 | case 0x118: |
6093 | 6110 | modrm = ldub_code(s->pc++); |
... | ... | @@ -6134,7 +6151,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
6134 | 6151 | case 8: |
6135 | 6152 | if (b & 2) { |
6136 | 6153 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0 + reg); |
6137 | - gen_op_mov_TN_reg[ot][0][rm](); | |
6154 | + gen_op_mov_TN_reg(ot, 0, rm); | |
6138 | 6155 | gen_op_movl_crN_T0(reg); |
6139 | 6156 | gen_jmp_im(s->pc - s->cs_base); |
6140 | 6157 | gen_eob(s); |
... | ... | @@ -6146,7 +6163,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
6146 | 6163 | else |
6147 | 6164 | #endif |
6148 | 6165 | gen_op_movtl_T0_env(offsetof(CPUX86State,cr[reg])); |
6149 | - gen_op_mov_reg_T0[ot][rm](); | |
6166 | + gen_op_mov_reg_T0(ot, rm); | |
6150 | 6167 | } |
6151 | 6168 | break; |
6152 | 6169 | default: |
... | ... | @@ -6173,14 +6190,14 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
6173 | 6190 | goto illegal_op; |
6174 | 6191 | if (b & 2) { |
6175 | 6192 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); |
6176 | - gen_op_mov_TN_reg[ot][0][rm](); | |
6193 | + gen_op_mov_TN_reg(ot, 0, rm); | |
6177 | 6194 | gen_op_movl_drN_T0(reg); |
6178 | 6195 | gen_jmp_im(s->pc - s->cs_base); |
6179 | 6196 | gen_eob(s); |
6180 | 6197 | } else { |
6181 | 6198 | gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); |
6182 | 6199 | gen_op_movtl_T0_env(offsetof(CPUX86State,dr[reg])); |
6183 | - gen_op_mov_reg_T0[ot][rm](); | |
6200 | + gen_op_mov_reg_T0(ot, rm); | |
6184 | 6201 | } |
6185 | 6202 | } |
6186 | 6203 | break; |
... | ... | @@ -6246,11 +6263,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
6246 | 6263 | goto illegal_op; |
6247 | 6264 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
6248 | 6265 | if (op == 2) { |
6249 | - gen_op_ld_T0_A0[OT_LONG + s->mem_index](); | |
6266 | + gen_op_ld_T0_A0(OT_LONG + s->mem_index); | |
6250 | 6267 | gen_op_movl_env_T0(offsetof(CPUX86State, mxcsr)); |
6251 | 6268 | } else { |
6252 | 6269 | gen_op_movl_T0_env(offsetof(CPUX86State, mxcsr)); |
6253 | - gen_op_st_T0_A0[OT_LONG + s->mem_index](); | |
6270 | + gen_op_st_T0_A0(OT_LONG + s->mem_index); | |
6254 | 6271 | } |
6255 | 6272 | break; |
6256 | 6273 | case 5: /* lfence */ |
... | ... | @@ -6647,6 +6664,17 @@ static uint16_t opc_simpler[NB_OPS] = { |
6647 | 6664 | #endif |
6648 | 6665 | }; |
6649 | 6666 | |
6667 | +static void tcg_macro_func(TCGContext *s, int macro_id, const int *dead_args) | |
6668 | +{ | |
6669 | + switch(macro_id) { | |
6670 | +#ifdef MACRO_TEST | |
6671 | + case MACRO_TEST: | |
6672 | + tcg_gen_helper_0_1(helper_divl_EAX_T0, cpu_T[0]); | |
6673 | + break; | |
6674 | +#endif | |
6675 | + } | |
6676 | +} | |
6677 | + | |
6650 | 6678 | void optimize_flags_init(void) |
6651 | 6679 | { |
6652 | 6680 | int i; |
... | ... | @@ -6655,6 +6683,25 @@ void optimize_flags_init(void) |
6655 | 6683 | if (opc_simpler[i] == 0) |
6656 | 6684 | opc_simpler[i] = i; |
6657 | 6685 | } |
6686 | + | |
6687 | + tcg_set_macro_func(&tcg_ctx, tcg_macro_func); | |
6688 | + | |
6689 | + cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env"); | |
6690 | +#if TARGET_LONG_BITS > HOST_LONG_BITS | |
6691 | + cpu_T[0] = tcg_global_mem_new(TCG_TYPE_TL, | |
6692 | + TCG_AREG0, offsetof(CPUState, t0), "T0"); | |
6693 | + cpu_T[1] = tcg_global_mem_new(TCG_TYPE_TL, | |
6694 | + TCG_AREG0, offsetof(CPUState, t1), "T1"); | |
6695 | + cpu_A0 = tcg_global_mem_new(TCG_TYPE_TL, | |
6696 | + TCG_AREG0, offsetof(CPUState, t2), "A0"); | |
6697 | +#else | |
6698 | + cpu_T[0] = tcg_global_reg_new(TCG_TYPE_TL, TCG_AREG1, "T0"); | |
6699 | + cpu_T[1] = tcg_global_reg_new(TCG_TYPE_TL, TCG_AREG2, "T1"); | |
6700 | + cpu_A0 = tcg_global_reg_new(TCG_TYPE_TL, TCG_AREG3, "A0"); | |
6701 | +#endif | |
6702 | + /* the helpers are only registered to print debug info */ | |
6703 | + TCG_HELPER(helper_divl_EAX_T0); | |
6704 | + TCG_HELPER(helper_idivl_EAX_T0); | |
6658 | 6705 | } |
6659 | 6706 | |
6660 | 6707 | /* CPU flags computation optimization: we move backward thru the |
... | ... | @@ -6746,10 +6793,9 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
6746 | 6793 | printf("ERROR addseg\n"); |
6747 | 6794 | #endif |
6748 | 6795 | |
6749 | - gen_opc_ptr = gen_opc_buf; | |
6796 | + cpu_tmp0 = tcg_temp_new(TCG_TYPE_TL); | |
6797 | + | |
6750 | 6798 | gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; |
6751 | - gen_opparam_ptr = gen_opparam_buf; | |
6752 | - nb_gen_labels = 0; | |
6753 | 6799 | |
6754 | 6800 | dc->is_jmp = DISAS_NEXT; |
6755 | 6801 | pc_ptr = pc_start; |
... | ... | @@ -6824,9 +6870,9 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
6824 | 6870 | disas_flags = !dc->code32; |
6825 | 6871 | target_disas(logfile, pc_start, pc_ptr - pc_start, disas_flags); |
6826 | 6872 | fprintf(logfile, "\n"); |
6827 | - if (loglevel & CPU_LOG_TB_OP) { | |
6828 | - fprintf(logfile, "OP:\n"); | |
6829 | - dump_ops(gen_opc_buf, gen_opparam_buf); | |
6873 | + if (loglevel & CPU_LOG_TB_OP_OPT) { | |
6874 | + fprintf(logfile, "OP before opt:\n"); | |
6875 | + tcg_dump_ops(&tcg_ctx, logfile); | |
6830 | 6876 | fprintf(logfile, "\n"); |
6831 | 6877 | } |
6832 | 6878 | } |
... | ... | @@ -6835,13 +6881,6 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
6835 | 6881 | /* optimize flag computations */ |
6836 | 6882 | optimize_flags(gen_opc_buf, gen_opc_ptr - gen_opc_buf); |
6837 | 6883 | |
6838 | -#ifdef DEBUG_DISAS | |
6839 | - if (loglevel & CPU_LOG_TB_OP_OPT) { | |
6840 | - fprintf(logfile, "AFTER FLAGS OPT:\n"); | |
6841 | - dump_ops(gen_opc_buf, gen_opparam_buf); | |
6842 | - fprintf(logfile, "\n"); | |
6843 | - } | |
6844 | -#endif | |
6845 | 6884 | if (!search_pc) |
6846 | 6885 | tb->size = pc_ptr - pc_start; |
6847 | 6886 | return 0; | ... | ... |
target-m68k/op.c
... | ... | @@ -482,7 +482,7 @@ OP(set_sr) |
482 | 482 | FORCE_RET(); |
483 | 483 | } |
484 | 484 | |
485 | -OP(jmp) | |
485 | +OP(jmp_im) | |
486 | 486 | { |
487 | 487 | GOTO_LABEL_PARAM(1); |
488 | 488 | } |
... | ... | @@ -522,22 +522,6 @@ OP(jmp_T0) |
522 | 522 | FORCE_RET(); |
523 | 523 | } |
524 | 524 | |
525 | -void OPPROTO op_goto_tb0(void) | |
526 | -{ | |
527 | - GOTO_TB(op_goto_tb0, PARAM1, 0); | |
528 | -} | |
529 | - | |
530 | -void OPPROTO op_goto_tb1(void) | |
531 | -{ | |
532 | - GOTO_TB(op_goto_tb1, PARAM1, 1); | |
533 | -} | |
534 | - | |
535 | -OP(exit_tb) | |
536 | -{ | |
537 | - EXIT_TB(); | |
538 | -} | |
539 | - | |
540 | - | |
541 | 525 | /* Floating point. */ |
542 | 526 | OP(f64_to_i32) |
543 | 527 | { | ... | ... |
target-m68k/translate.c
... | ... | @@ -28,6 +28,7 @@ |
28 | 28 | #include "cpu.h" |
29 | 29 | #include "exec-all.h" |
30 | 30 | #include "disas.h" |
31 | +#include "tcg-op.h" | |
31 | 32 | #include "m68k-qreg.h" |
32 | 33 | |
33 | 34 | //#define DEBUG_DISPATCH 1 |
... | ... | @@ -67,20 +68,9 @@ typedef struct DisasContext { |
67 | 68 | static void *gen_throws_exception; |
68 | 69 | #define gen_last_qop NULL |
69 | 70 | |
70 | -static uint16_t *gen_opc_ptr; | |
71 | -static uint32_t *gen_opparam_ptr; | |
72 | 71 | extern FILE *logfile; |
73 | 72 | extern int loglevel; |
74 | 73 | |
75 | -enum { | |
76 | -#define DEF(s, n, copy_size) INDEX_op_ ## s, | |
77 | -#include "opc.h" | |
78 | -#undef DEF | |
79 | - NB_OPS, | |
80 | -}; | |
81 | - | |
82 | -#include "gen-op.h" | |
83 | - | |
84 | 74 | #if defined(CONFIG_USER_ONLY) |
85 | 75 | #define gen_st(s, name, addr, val) gen_op_st##name##_raw(addr, val) |
86 | 76 | #define gen_ld(s, name, val, addr) gen_op_ld##name##_raw(val, addr) |
... | ... | @@ -622,7 +612,7 @@ static void gen_jmpcc(DisasContext *s, int cond, int l1) |
622 | 612 | gen_flush_flags(s); |
623 | 613 | switch (cond) { |
624 | 614 | case 0: /* T */ |
625 | - gen_op_jmp(l1); | |
615 | + gen_op_jmp_im(l1); | |
626 | 616 | break; |
627 | 617 | case 1: /* F */ |
628 | 618 | break; |
... | ... | @@ -702,7 +692,7 @@ static void gen_jmpcc(DisasContext *s, int cond, int l1) |
702 | 692 | gen_op_xor32(tmp, tmp, QREG_CC_DEST); |
703 | 693 | gen_op_and32(tmp, tmp, gen_im32(CCF_V)); |
704 | 694 | gen_op_jmp_nz32(tmp, l2); |
705 | - gen_op_jmp(l1); | |
695 | + gen_op_jmp_im(l1); | |
706 | 696 | gen_set_label(l2); |
707 | 697 | } |
708 | 698 | break; |
... | ... | @@ -791,14 +781,12 @@ static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest) |
791 | 781 | gen_exception(s, dest, EXCP_DEBUG); |
792 | 782 | } else if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) || |
793 | 783 | (s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { |
794 | - gen_op_goto_tb(0, n, (long)tb); | |
784 | + tcg_gen_goto_tb(n); | |
795 | 785 | gen_op_mov32(QREG_PC, gen_im32(dest)); |
796 | - gen_op_mov32(QREG_T0, gen_im32((long)tb + n)); | |
797 | - gen_op_exit_tb(); | |
786 | + tcg_gen_exit_tb((long)tb + n); | |
798 | 787 | } else { |
799 | 788 | gen_jmp(s, gen_im32(dest)); |
800 | - gen_op_mov32(QREG_T0, gen_im32(0)); | |
801 | - gen_op_exit_tb(); | |
789 | + tcg_gen_exit_tb(0); | |
802 | 790 | } |
803 | 791 | s->is_jmp = DISAS_TB_JUMP; |
804 | 792 | } |
... | ... | @@ -3073,7 +3061,7 @@ static void expand_op_addx_cc(qOP *qop) |
3073 | 3061 | gen_op_add32(arg0, arg0, gen_im32(1)); |
3074 | 3062 | gen_op_mov32(QREG_CC_OP, gen_im32(CC_OP_ADDX)); |
3075 | 3063 | gen_op_set_leu32(QREG_CC_X, arg0, arg1); |
3076 | - gen_op_jmp(l2); | |
3064 | + gen_op_jmp_im(l2); | |
3077 | 3065 | gen_set_label(l1); |
3078 | 3066 | gen_op_mov32(QREG_CC_OP, gen_im32(CC_OP_ADD)); |
3079 | 3067 | gen_op_set_ltu32(QREG_CC_X, arg0, arg1); |
... | ... | @@ -3093,7 +3081,7 @@ static void expand_op_subx_cc(qOP *qop) |
3093 | 3081 | gen_op_set_leu32(QREG_CC_X, arg0, arg1); |
3094 | 3082 | gen_op_sub32(arg0, arg0, gen_im32(1)); |
3095 | 3083 | gen_op_mov32(QREG_CC_OP, gen_im32(CC_OP_SUBX)); |
3096 | - gen_op_jmp(l2); | |
3084 | + gen_op_jmp_im(l2); | |
3097 | 3085 | gen_set_label(l1); |
3098 | 3086 | gen_op_set_ltu32(QREG_CC_X, arg0, arg1); |
3099 | 3087 | gen_op_mov32(QREG_CC_OP, gen_im32(CC_OP_SUB)); |
... | ... | @@ -3162,9 +3150,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
3162 | 3150 | |
3163 | 3151 | dc->tb = tb; |
3164 | 3152 | |
3165 | - gen_opc_ptr = gen_opc_buf; | |
3166 | 3153 | gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; |
3167 | - gen_opparam_ptr = gen_opparam_buf; | |
3168 | 3154 | |
3169 | 3155 | dc->env = env; |
3170 | 3156 | dc->is_jmp = DISAS_NEXT; |
... | ... | @@ -3174,7 +3160,6 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
3174 | 3160 | dc->fpcr = env->fpcr; |
3175 | 3161 | dc->user = (env->sr & SR_S) == 0; |
3176 | 3162 | dc->is_mem = 0; |
3177 | - nb_gen_labels = 0; | |
3178 | 3163 | lj = -1; |
3179 | 3164 | do { |
3180 | 3165 | free_qreg = 0; |
... | ... | @@ -3232,8 +3217,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
3232 | 3217 | case DISAS_UPDATE: |
3233 | 3218 | gen_flush_cc_op(dc); |
3234 | 3219 | /* indicate that the hash table must be used to find the next TB */ |
3235 | - gen_op_mov32(QREG_T0, gen_im32(0)); | |
3236 | - gen_op_exit_tb(); | |
3220 | + tcg_gen_exit_tb(0); | |
3237 | 3221 | break; |
3238 | 3222 | case DISAS_TB_JUMP: |
3239 | 3223 | /* nothing more to generate */ |
... | ... | @@ -3248,11 +3232,6 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
3248 | 3232 | fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start)); |
3249 | 3233 | target_disas(logfile, pc_start, dc->pc - pc_start, 0); |
3250 | 3234 | fprintf(logfile, "\n"); |
3251 | - if (loglevel & (CPU_LOG_TB_OP)) { | |
3252 | - fprintf(logfile, "OP:\n"); | |
3253 | - dump_ops(gen_opc_buf, gen_opparam_buf); | |
3254 | - fprintf(logfile, "\n"); | |
3255 | - } | |
3256 | 3235 | } |
3257 | 3236 | #endif |
3258 | 3237 | if (search_pc) { | ... | ... |
target-mips/op.c
... | ... | @@ -1093,18 +1093,6 @@ OP_COND(lez, (target_long)T0 <= 0); |
1093 | 1093 | OP_COND(ltz, (target_long)T0 < 0); |
1094 | 1094 | |
1095 | 1095 | /* Branches */ |
1096 | -void OPPROTO op_goto_tb0(void) | |
1097 | -{ | |
1098 | - GOTO_TB(op_goto_tb0, PARAM1, 0); | |
1099 | - FORCE_RET(); | |
1100 | -} | |
1101 | - | |
1102 | -void OPPROTO op_goto_tb1(void) | |
1103 | -{ | |
1104 | - GOTO_TB(op_goto_tb1, PARAM1, 1); | |
1105 | - FORCE_RET(); | |
1106 | -} | |
1107 | - | |
1108 | 1096 | /* Branch to register */ |
1109 | 1097 | void op_save_breg_target (void) |
1110 | 1098 | { |
... | ... | @@ -3252,12 +3240,6 @@ void op_raise_exception_err (void) |
3252 | 3240 | FORCE_RET(); |
3253 | 3241 | } |
3254 | 3242 | |
3255 | -void op_exit_tb (void) | |
3256 | -{ | |
3257 | - EXIT_TB(); | |
3258 | - FORCE_RET(); | |
3259 | -} | |
3260 | - | |
3261 | 3243 | void op_wait (void) |
3262 | 3244 | { |
3263 | 3245 | env->halted = 1; | ... | ... |
target-mips/translate.c
... | ... | @@ -29,29 +29,12 @@ |
29 | 29 | #include "cpu.h" |
30 | 30 | #include "exec-all.h" |
31 | 31 | #include "disas.h" |
32 | +#include "tcg-op.h" | |
32 | 33 | |
33 | 34 | //#define MIPS_DEBUG_DISAS |
34 | 35 | //#define MIPS_DEBUG_SIGN_EXTENSIONS |
35 | 36 | //#define MIPS_SINGLE_STEP |
36 | 37 | |
37 | -#ifdef USE_DIRECT_JUMP | |
38 | -#define TBPARAM(x) | |
39 | -#else | |
40 | -#define TBPARAM(x) (long)(x) | |
41 | -#endif | |
42 | - | |
43 | -enum { | |
44 | -#define DEF(s, n, copy_size) INDEX_op_ ## s, | |
45 | -#include "opc.h" | |
46 | -#undef DEF | |
47 | - NB_OPS, | |
48 | -}; | |
49 | - | |
50 | -static uint16_t *gen_opc_ptr; | |
51 | -static uint32_t *gen_opparam_ptr; | |
52 | - | |
53 | -#include "gen-op.h" | |
54 | - | |
55 | 38 | /* MIPS major opcodes */ |
56 | 39 | #define MASK_OP_MAJOR(op) (op & (0x3F << 26)) |
57 | 40 | |
... | ... | @@ -1777,17 +1760,13 @@ static always_inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong des |
1777 | 1760 | TranslationBlock *tb; |
1778 | 1761 | tb = ctx->tb; |
1779 | 1762 | if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { |
1780 | - if (n == 0) | |
1781 | - gen_op_goto_tb0(TBPARAM(tb)); | |
1782 | - else | |
1783 | - gen_op_goto_tb1(TBPARAM(tb)); | |
1763 | + tcg_gen_goto_tb(n); | |
1784 | 1764 | gen_save_pc(dest); |
1785 | - gen_op_set_T0((long)tb + n); | |
1765 | + tcg_gen_exit_tb((long)tb + n); | |
1786 | 1766 | } else { |
1787 | 1767 | gen_save_pc(dest); |
1788 | - gen_op_reset_T0(); | |
1768 | + tcg_gen_exit_tb(0); | |
1789 | 1769 | } |
1790 | - gen_op_exit_tb(); | |
1791 | 1770 | } |
1792 | 1771 | |
1793 | 1772 | /* Branches (before delay slot) */ |
... | ... | @@ -6642,8 +6621,7 @@ static void decode_opc (CPUState *env, DisasContext *ctx) |
6642 | 6621 | /* unconditional branch to register */ |
6643 | 6622 | MIPS_DEBUG("branch to register"); |
6644 | 6623 | gen_op_breg(); |
6645 | - gen_op_reset_T0(); | |
6646 | - gen_op_exit_tb(); | |
6624 | + tcg_gen_exit_tb(0); | |
6647 | 6625 | break; |
6648 | 6626 | default: |
6649 | 6627 | MIPS_DEBUG("unknown branch"); |
... | ... | @@ -6665,10 +6643,7 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb, |
6665 | 6643 | fprintf (logfile, "search pc %d\n", search_pc); |
6666 | 6644 | |
6667 | 6645 | pc_start = tb->pc; |
6668 | - gen_opc_ptr = gen_opc_buf; | |
6669 | 6646 | gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; |
6670 | - gen_opparam_ptr = gen_opparam_buf; | |
6671 | - nb_gen_labels = 0; | |
6672 | 6647 | ctx.pc = pc_start; |
6673 | 6648 | ctx.saved_pc = -1; |
6674 | 6649 | ctx.tb = tb; |
... | ... | @@ -6748,8 +6723,7 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb, |
6748 | 6723 | break; |
6749 | 6724 | case BS_EXCP: |
6750 | 6725 | gen_op_interrupt_restart(); |
6751 | - gen_op_reset_T0(); | |
6752 | - gen_op_exit_tb(); | |
6726 | + tcg_gen_exit_tb(0); | |
6753 | 6727 | break; |
6754 | 6728 | case BS_BRANCH: |
6755 | 6729 | default: |
... | ... | @@ -6777,11 +6751,6 @@ done_generating: |
6777 | 6751 | target_disas(logfile, pc_start, ctx.pc - pc_start, 0); |
6778 | 6752 | fprintf(logfile, "\n"); |
6779 | 6753 | } |
6780 | - if (loglevel & CPU_LOG_TB_OP) { | |
6781 | - fprintf(logfile, "OP:\n"); | |
6782 | - dump_ops(gen_opc_buf, gen_opparam_buf); | |
6783 | - fprintf(logfile, "\n"); | |
6784 | - } | |
6785 | 6754 | if (loglevel & CPU_LOG_TB_CPU) { |
6786 | 6755 | fprintf(logfile, "---------------- %d %08x\n", ctx.bstate, ctx.hflags); |
6787 | 6756 | } | ... | ... |