Commit 810260a8f334d6faa2749a3729f180dff8bae76b

Authored by malc
1 parent e0e6c8c0

Preliminary PPC64/Linux host support

ppc64.ld from Heikki Lindholm's patch
http://marc.info/?l=qemu-devel&m=114086179024634&w=2

Issues:
x86_64 tripple faults shortly after decompressing the kernel
No immediate versions of most 64 bit operations
More...

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4932 c046a42c-6fe2-441c-8c8c-71466251a162
Makefile.target
... ... @@ -342,6 +342,10 @@ ifeq ($(ARCH),ppc)
342 342 LDFLAGS+=-Wl,-T,$(SRC_PATH)/$(ARCH).ld
343 343 endif
344 344  
  345 +ifeq ($(ARCH),ppc64)
  346 +LDFLAGS+=-Wl,-T,$(SRC_PATH)/$(ARCH).ld
  347 +endif
  348 +
345 349 ifeq ($(ARCH),s390)
346 350 LDFLAGS+=-Wl,-T,$(SRC_PATH)/$(ARCH).ld
347 351 endif
... ...
configure
... ... @@ -637,6 +637,24 @@ if test "$cpu" = "x86_64" \
637 637 hostlongbits="64"
638 638 fi
639 639  
  640 +# ppc specific hostlongbits selection
  641 +if test "$cpu" = "powerpc" ; then
  642 + cat > $TMPC <<EOF
  643 +int main(void){return sizeof(long);}
  644 +EOF
  645 +
  646 + if $cc $ARCH_CFLAGS -o $TMPE $TMPC 2> /dev/null; then
  647 + $TMPE
  648 + case $? in
  649 + 4) hostlongbits="32";;
  650 + 8) hostlongbits="64";;
  651 + *) echo "Couldn't determine bits per long value";;
  652 + esac
  653 + else
  654 + echo hostlongbits test failed
  655 + fi
  656 +fi
  657 +
640 658 # check gcc options support
641 659 cat > $TMPC <<EOF
642 660 int main(void) {
... ... @@ -995,8 +1013,13 @@ case &quot;$cpu&quot; in
995 1013 echo "#define HOST_MIPS64 1" >> $config_h
996 1014 ;;
997 1015 powerpc)
998   - echo "ARCH=ppc" >> $config_mak
999   - echo "#define HOST_PPC 1" >> $config_h
  1016 + if test "$hostlongbits" = "32"; then
  1017 + echo "ARCH=ppc" >> $config_mak
  1018 + echo "#define HOST_PPC 1" >> $config_h
  1019 + else
  1020 + echo "ARCH=ppc64" >> $config_mak
  1021 + echo "#define HOST_PPC64 1" >> $config_h
  1022 + fi
1000 1023 ;;
1001 1024 s390)
1002 1025 echo "ARCH=s390" >> $config_mak
... ...
dyngen-exec.h
... ... @@ -38,7 +38,7 @@ typedef unsigned int uint32_t;
38 38 // Linux/Sparc64 defines uint64_t
39 39 #if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__))
40 40 /* XXX may be done for all 64 bits targets ? */
41   -#if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__)
  41 +#if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(__powerpc64__)
42 42 typedef unsigned long uint64_t;
43 43 #else
44 44 typedef unsigned long long uint64_t;
... ... @@ -55,7 +55,7 @@ typedef signed short int16_t;
55 55 typedef signed int int32_t;
56 56 // Linux/Sparc64 defines int64_t
57 57 #if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__))
58   -#if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__)
  58 +#if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(__powerpc64__)
59 59 typedef signed long int64_t;
60 60 #else
61 61 typedef signed long long int64_t;
... ...
dyngen.c
... ... @@ -68,6 +68,13 @@
68 68 #define elf_check_arch(x) ((x) == EM_PPC)
69 69 #define ELF_USES_RELOCA
70 70  
  71 +#elif defined(HOST_PPC64)
  72 +
  73 +#define ELF_CLASS ELFCLASS64
  74 +#define ELF_ARCH EM_PPC64
  75 +#define elf_check_arch(x) ((x) == EM_PPC64)
  76 +#define ELF_USES_RELOCA
  77 +
71 78 #elif defined(HOST_S390)
72 79  
73 80 #define ELF_CLASS ELFCLASS32
... ... @@ -1551,6 +1558,8 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
1551 1558 }
1552 1559 #elif defined(HOST_ARM)
1553 1560 error("dyngen targets not supported on ARM");
  1561 +#elif defined(HOST_PPC64)
  1562 + error("dyngen targets not supported on PPC64");
1554 1563 #else
1555 1564 #error unsupported CPU
1556 1565 #endif
... ... @@ -2592,6 +2601,8 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
2592 2601 }
2593 2602 #elif defined(HOST_ARM)
2594 2603 error("dyngen targets not supported on ARM");
  2604 +#elif defined(HOST_PPC64)
  2605 + error("dyngen targets not supported on PPC64");
2595 2606 #else
2596 2607 #error unsupported CPU
2597 2608 #endif
... ...
exec-all.h
... ... @@ -191,6 +191,10 @@ extern int code_gen_max_blocks;
191 191 #if defined(USE_DIRECT_JUMP)
192 192  
193 193 #if defined(__powerpc__)
  194 +#if defined(__powerpc64__)
  195 +extern void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
  196 +#define tb_set_jmp_target1 ppc_tb_set_jmp_target
  197 +#else
194 198 static inline void flush_icache_range(unsigned long start, unsigned long stop);
195 199 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
196 200 {
... ... @@ -223,6 +227,7 @@ static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr
223 227 /* flush icache */
224 228 flush_icache_range(jmp_addr, jmp_addr + patch_size);
225 229 }
  230 +#endif
226 231 #elif defined(__i386__) || defined(__x86_64__)
227 232 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
228 233 {
... ...
ppc64.ld 0 โ†’ 100644
  1 +/* Script for -z combreloc: combine and sort reloc sections */
  2 +OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc",
  3 + "elf64-powerpc")
  4 +OUTPUT_ARCH(powerpc:common64)
  5 +ENTRY(_start)
  6 +SEARCH_DIR("/usr/powerpc64-unknown-linux-gnu/lib64");
  7 +EARCH_DIR("/usr/lib/binutils/powerpc64-unknown-linux-gnu/2.16.164");
  8 +EARCH_DIR("/usr/local/lib64"); SEARCH_DIR("/lib64"); SEARCH_DIR("/usr/lib64");
  9 +EARCH_DIR("/usr/powerpc64-unknown-linux-gnu/lib");
  10 +EARCH_DIR("/usr/lib/binutils/powerpc64-unknown-linux-gnu/2.16.1");
  11 +EARCH_DIR("/usr/local/lib"); SEARCH_DIR("/lib"); SEARCH_DIR("/usr/lib");
  12 +/* Do we need any of these for elf?
  13 + __DYNAMIC = 0; */
  14 +SECTIONS
  15 +{
  16 + /* Read-only sections, merged into text segment: */
  17 + PROVIDE (__executable_start = 0x60000000); . = 0x60000000 + SIZEOF_HEADERS;
  18 + .interp : { *(.interp) }
  19 + .hash : { *(.hash) }
  20 + .dynsym : { *(.dynsym) }
  21 + .dynstr : { *(.dynstr) }
  22 + .gnu.version : { *(.gnu.version) }
  23 + .gnu.version_d : { *(.gnu.version_d) }
  24 + .gnu.version_r : { *(.gnu.version_r) }
  25 + .rel.dyn :
  26 + {
  27 + *(.rel.init)
  28 + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
  29 + *(.rel.fini)
  30 + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
  31 + *(.rel.data.rel.ro*)
  32 + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
  33 + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
  34 + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
  35 + *(.rel.ctors)
  36 + *(.rel.dtors)
  37 + *(.rel.got)
  38 + *(.rel.sdata .rel.sdata.* .rel.gnu.linkonce.s.*)
  39 + *(.rel.sbss .rel.sbss.* .rel.gnu.linkonce.sb.*)
  40 + *(.rel.sdata2 .rel.sdata2.* .rel.gnu.linkonce.s2.*)
  41 + *(.rel.sbss2 .rel.sbss2.* .rel.gnu.linkonce.sb2.*)
  42 + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
  43 + }
  44 + .rela.dyn :
  45 + {
  46 + *(.rela.init)
  47 + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
  48 + *(.rela.fini)
  49 + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
  50 + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
  51 + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
  52 + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
  53 + *(.rela.ctors)
  54 + *(.rela.dtors)
  55 + *(.rela.got)
  56 + *(.rela.toc)
  57 + *(.rela.opd)
  58 + *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*)
  59 + *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*)
  60 + *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*)
  61 + *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*)
  62 + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
  63 + }
  64 + .rel.plt : { *(.rel.plt) }
  65 + .rela.plt : { *(.rela.plt) }
  66 + .rela.tocbss : { *(.rela.tocbss) }
  67 + .init :
  68 + {
  69 + KEEP (*(.init))
  70 + } =0x60000000
  71 + .text :
  72 + {
  73 + *(.text .stub .text.* .gnu.linkonce.t.*)
  74 + KEEP (*(.text.*personality*))
  75 + /* .gnu.warning sections are handled specially by elf32.em. */
  76 + *(.gnu.warning)
  77 + *(.sfpr .glink)
  78 + } =0x60000000
  79 + .fini :
  80 + {
  81 + KEEP (*(.fini))
  82 + } =0x60000000
  83 + PROVIDE (__etext = .);
  84 + PROVIDE (_etext = .);
  85 + PROVIDE (etext = .);
  86 + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
  87 + .rodata1 : { *(.rodata1) }
  88 + .sdata2 : { *(.sdata2 .sdata2.* .gnu.linkonce.s2.*) }
  89 + .sbss2 : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) }
  90 + .eh_frame_hdr : { *(.eh_frame_hdr) }
  91 + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
  92 + .gcc_except_table : ONLY_IF_RO { KEEP (*(.gcc_except_table))
  93 +(.gcc_except_table.*) }
  94 + /* Adjust the address for the data segment. We want to adjust up to
  95 + the same address within the page on the next page up. */
  96 + . = ALIGN (0x10000) - ((0x10000 - .) & (0x10000 - 1)); . =
  97 +ATA_SEGMENT_ALIGN (0x10000, 0x1000);
  98 + /* Exception handling */
  99 + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
  100 + .gcc_except_table : ONLY_IF_RW { KEEP (*(.gcc_except_table))
  101 +(.gcc_except_table.*) }
  102 + /* Thread Local Storage sections */
  103 + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
  104 + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
  105 + /* Ensure the __preinit_array_start label is properly aligned. We
  106 + could instead move the label definition inside the section, but
  107 + the linker would then create the section even if it turns out to
  108 + be empty, which isn't pretty. */
  109 + . = ALIGN(64 / 8);
  110 + PROVIDE (__preinit_array_start = .);
  111 + .preinit_array : { KEEP (*(.preinit_array)) }
  112 + PROVIDE (__preinit_array_end = .);
  113 + PROVIDE (__init_array_start = .);
  114 + .init_array : { KEEP (*(.init_array)) }
  115 + PROVIDE (__init_array_end = .);
  116 + PROVIDE (__fini_array_start = .);
  117 + .fini_array : { KEEP (*(.fini_array)) }
  118 + PROVIDE (__fini_array_end = .);
  119 + .ctors :
  120 + {
  121 + /* gcc uses crtbegin.o to find the start of
  122 + the constructors, so we make sure it is
  123 + first. Because this is a wildcard, it
  124 + doesn't matter if the user does not
  125 + actually link against crtbegin.o; the
  126 + linker won't look for a file to match a
  127 + wildcard. The wildcard also means that it
  128 + doesn't matter which directory crtbegin.o
  129 + is in. */
  130 + KEEP (*crtbegin*.o(.ctors))
  131 + /* We don't want to include the .ctor section from
  132 + from the crtend.o file until after the sorted ctors.
  133 + The .ctor section from the crtend file contains the
  134 + end of ctors marker and it must be last */
  135 + KEEP (*(EXCLUDE_FILE (*crtend*.o ) .ctors))
  136 + KEEP (*(SORT(.ctors.*)))
  137 + KEEP (*(.ctors))
  138 + }
  139 + .dtors :
  140 + {
  141 + KEEP (*crtbegin*.o(.dtors))
  142 + KEEP (*(EXCLUDE_FILE (*crtend*.o ) .dtors))
  143 + KEEP (*(SORT(.dtors.*)))
  144 + KEEP (*(.dtors))
  145 + }
  146 + .jcr : { KEEP (*(.jcr)) }
  147 + .data.rel.ro : { *(.data.rel.ro.local) *(.data.rel.ro*) }
  148 + .dynamic : { *(.dynamic) }
  149 + . = DATA_SEGMENT_RELRO_END (0, .);
  150 + .data :
  151 + {
  152 + *(.data .data.* .gnu.linkonce.d.*)
  153 + KEEP (*(.gnu.linkonce.d.*personality*))
  154 + SORT(CONSTRUCTORS)
  155 + }
  156 + .data1 : { *(.data1) }
  157 + .toc1 ALIGN(8) : { *(.toc1) }
  158 + .opd ALIGN(8) : { KEEP (*(.opd)) }
  159 + .got ALIGN(8) : { *(.got .toc) }
  160 + /* We want the small data sections together, so single-instruction offsets
  161 + can access them all, and initialized data all before uninitialized, so
  162 + we can shorten the on-disk segment size. */
  163 + .sdata :
  164 + {
  165 + *(.sdata .sdata.* .gnu.linkonce.s.*)
  166 + }
  167 + _edata = .;
  168 + PROVIDE (edata = .);
  169 + __bss_start = .;
  170 + .tocbss ALIGN(8) : { *(.tocbss)}
  171 + .sbss :
  172 + {
  173 + PROVIDE (__sbss_start = .);
  174 + PROVIDE (___sbss_start = .);
  175 + *(.dynsbss)
  176 + *(.sbss .sbss.* .gnu.linkonce.sb.*)
  177 + *(.scommon)
  178 + PROVIDE (__sbss_end = .);
  179 + PROVIDE (___sbss_end = .);
  180 + }
  181 + .plt : { *(.plt) }
  182 + .bss :
  183 + {
  184 + *(.dynbss)
  185 + *(.bss .bss.* .gnu.linkonce.b.*)
  186 + *(COMMON)
  187 + /* Align here to ensure that the .bss section occupies space up to
  188 + _end. Align after .bss to ensure correct alignment even if the
  189 + .bss section disappears because there are no input sections. */
  190 + . = ALIGN(64 / 8);
  191 + }
  192 + . = ALIGN(64 / 8);
  193 + _end = .;
  194 + PROVIDE (end = .);
  195 + . = DATA_SEGMENT_END (.);
  196 + /* Stabs debugging sections. */
  197 + .stab 0 : { *(.stab) }
  198 + .stabstr 0 : { *(.stabstr) }
  199 + .stab.excl 0 : { *(.stab.excl) }
  200 + .stab.exclstr 0 : { *(.stab.exclstr) }
  201 + .stab.index 0 : { *(.stab.index) }
  202 + .stab.indexstr 0 : { *(.stab.indexstr) }
  203 + .comment 0 : { *(.comment) }
  204 + /* DWARF debug sections.
  205 + Symbols in the DWARF debugging sections are relative to the beginning
  206 + of the section so we begin them at 0. */
  207 + /* DWARF 1 */
  208 + .debug 0 : { *(.debug) }
  209 + .line 0 : { *(.line) }
  210 + /* GNU DWARF 1 extensions */
  211 + .debug_srcinfo 0 : { *(.debug_srcinfo) }
  212 + .debug_sfnames 0 : { *(.debug_sfnames) }
  213 + /* DWARF 1.1 and DWARF 2 */
  214 + .debug_aranges 0 : { *(.debug_aranges) }
  215 + .debug_pubnames 0 : { *(.debug_pubnames) }
  216 + /* DWARF 2 */
  217 + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
  218 + .debug_abbrev 0 : { *(.debug_abbrev) }
  219 + .debug_line 0 : { *(.debug_line) }
  220 + .debug_frame 0 : { *(.debug_frame) }
  221 + .debug_str 0 : { *(.debug_str) }
  222 + .debug_loc 0 : { *(.debug_loc) }
  223 + .debug_macinfo 0 : { *(.debug_macinfo) }
  224 + /* SGI/MIPS DWARF 2 extensions */
  225 + .debug_weaknames 0 : { *(.debug_weaknames) }
  226 + .debug_funcnames 0 : { *(.debug_funcnames) }
  227 + .debug_typenames 0 : { *(.debug_typenames) }
  228 + .debug_varnames 0 : { *(.debug_varnames) }
  229 + /DISCARD/ : { *(.note.GNU-stack) }
  230 +}
... ...
tcg/ppc64/tcg-target.c 0 โ†’ 100644
  1 +/*
  2 + * Tiny Code Generator for QEMU
  3 + *
  4 + * Copyright (c) 2008 Fabrice Bellard
  5 + *
  6 + * Permission is hereby granted, free of charge, to any person obtaining a copy
  7 + * of this software and associated documentation files (the "Software"), to deal
  8 + * in the Software without restriction, including without limitation the rights
  9 + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 + * copies of the Software, and to permit persons to whom the Software is
  11 + * furnished to do so, subject to the following conditions:
  12 + *
  13 + * The above copyright notice and this permission notice shall be included in
  14 + * all copies or substantial portions of the Software.
  15 + *
  16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 + * THE SOFTWARE.
  23 + */
  24 +
  25 +static uint8_t *tb_ret_addr;
  26 +
  27 +#define FAST_PATH
  28 +
  29 +#if TARGET_PHYS_ADDR_BITS == 32
  30 +#define LD_ADDEND LWZ
  31 +#else
  32 +#define LD_ADDEND LD
  33 +#endif
  34 +
  35 +#if TARGET_LONG_BITS == 32
  36 +#define LD_ADDR LWZU
  37 +#else
  38 +#define LD_ADDR LDU
  39 +#endif
  40 +
  41 +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
  42 + "r0",
  43 + "r1",
  44 + "rp",
  45 + "r3",
  46 + "r4",
  47 + "r5",
  48 + "r6",
  49 + "r7",
  50 + "r8",
  51 + "r9",
  52 + "r10",
  53 + "r11",
  54 + "r12",
  55 + "r13",
  56 + "r14",
  57 + "r15",
  58 + "r16",
  59 + "r17",
  60 + "r18",
  61 + "r19",
  62 + "r20",
  63 + "r21",
  64 + "r22",
  65 + "r23",
  66 + "r24",
  67 + "r25",
  68 + "r26",
  69 + "r27",
  70 + "r28",
  71 + "r29",
  72 + "r30",
  73 + "r31"
  74 +};
  75 +
  76 +static const int tcg_target_reg_alloc_order[] = {
  77 + TCG_REG_R14,
  78 + TCG_REG_R15,
  79 + TCG_REG_R16,
  80 + TCG_REG_R17,
  81 + TCG_REG_R18,
  82 + TCG_REG_R19,
  83 + TCG_REG_R20,
  84 + TCG_REG_R21,
  85 + TCG_REG_R22,
  86 + TCG_REG_R23,
  87 + TCG_REG_R28,
  88 + TCG_REG_R29,
  89 + TCG_REG_R30,
  90 + TCG_REG_R31,
  91 + TCG_REG_R3,
  92 + TCG_REG_R4,
  93 + TCG_REG_R5,
  94 + TCG_REG_R6,
  95 + TCG_REG_R7,
  96 + TCG_REG_R8,
  97 + TCG_REG_R9,
  98 + TCG_REG_R10,
  99 + TCG_REG_R11,
  100 + TCG_REG_R12,
  101 + TCG_REG_R13,
  102 + TCG_REG_R0,
  103 + TCG_REG_R1,
  104 + TCG_REG_R2,
  105 + TCG_REG_R24,
  106 + TCG_REG_R25,
  107 + TCG_REG_R26,
  108 + TCG_REG_R27
  109 +};
  110 +
  111 +static const int tcg_target_call_iarg_regs[] = {
  112 + TCG_REG_R3,
  113 + TCG_REG_R4,
  114 + TCG_REG_R5,
  115 + TCG_REG_R6,
  116 + TCG_REG_R7,
  117 + TCG_REG_R8,
  118 + TCG_REG_R9,
  119 + TCG_REG_R10
  120 +};
  121 +
  122 +static const int tcg_target_call_oarg_regs[2] = {
  123 + TCG_REG_R3
  124 +};
  125 +
  126 +static const int tcg_target_callee_save_regs[] = {
  127 + TCG_REG_R14,
  128 + TCG_REG_R15,
  129 + TCG_REG_R16,
  130 + TCG_REG_R17,
  131 + TCG_REG_R18,
  132 + TCG_REG_R19,
  133 + TCG_REG_R20,
  134 + TCG_REG_R21,
  135 + TCG_REG_R22,
  136 + TCG_REG_R23,
  137 + TCG_REG_R28,
  138 + TCG_REG_R29,
  139 + TCG_REG_R30,
  140 + TCG_REG_R31
  141 +};
  142 +
  143 +static uint32_t reloc_pc24_val (void *pc, tcg_target_long target)
  144 +{
  145 + tcg_target_long disp;
  146 +
  147 + disp = target - (tcg_target_long) pc;
  148 + if ((disp << 38) >> 38 != disp)
  149 + tcg_abort ();
  150 +
  151 + return disp & 0x3fffffc;
  152 +}
  153 +
  154 +static void reloc_pc24 (void *pc, tcg_target_long target)
  155 +{
  156 + *(uint32_t *) pc = (*(uint32_t *) pc & ~0x3fffffc)
  157 + | reloc_pc24_val (pc, target);
  158 +}
  159 +
  160 +static uint16_t reloc_pc14_val (void *pc, tcg_target_long target)
  161 +{
  162 + tcg_target_long disp;
  163 +
  164 + disp = target - (tcg_target_long) pc;
  165 + if (disp != (int16_t) disp)
  166 + tcg_abort ();
  167 +
  168 + return disp & 0xfffc;
  169 +}
  170 +
  171 +static void reloc_pc14 (void *pc, tcg_target_long target)
  172 +{
  173 + *(uint32_t *) pc = (*(uint32_t *) pc & ~0xfffc)
  174 + | reloc_pc14_val (pc, target);
  175 +}
  176 +
  177 +static void patch_reloc (uint8_t *code_ptr, int type,
  178 + tcg_target_long value, tcg_target_long addend)
  179 +{
  180 + value += addend;
  181 + switch (type) {
  182 + case R_PPC_REL14:
  183 + reloc_pc14 (code_ptr, value);
  184 + break;
  185 + case R_PPC_REL24:
  186 + reloc_pc24 (code_ptr, value);
  187 + break;
  188 + default:
  189 + tcg_abort ();
  190 + }
  191 +}
  192 +
  193 +/* maximum number of register used for input function arguments */
  194 +static int tcg_target_get_call_iarg_regs_count (int flags)
  195 +{
  196 + return sizeof (tcg_target_call_iarg_regs) / sizeof (tcg_target_call_iarg_regs[0]);
  197 +}
  198 +
  199 +/* parse target specific constraints */
  200 +static int target_parse_constraint (TCGArgConstraint *ct, const char **pct_str)
  201 +{
  202 + const char *ct_str;
  203 +
  204 + ct_str = *pct_str;
  205 + switch (ct_str[0]) {
  206 + case 'A': case 'B': case 'C': case 'D':
  207 + ct->ct |= TCG_CT_REG;
  208 + tcg_regset_set_reg (ct->u.regs, 3 + ct_str[0] - 'A');
  209 + break;
  210 + case 'r':
  211 + ct->ct |= TCG_CT_REG;
  212 + tcg_regset_set32 (ct->u.regs, 0, 0xffffffff);
  213 + break;
  214 + case 'L': /* qemu_ld constraint */
  215 + ct->ct |= TCG_CT_REG;
  216 + tcg_regset_set32 (ct->u.regs, 0, 0xffffffff);
  217 + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
  218 + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
  219 + break;
  220 + case 'K': /* qemu_st[8..32] constraint */
  221 + ct->ct |= TCG_CT_REG;
  222 + tcg_regset_set32 (ct->u.regs, 0, 0xffffffff);
  223 + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
  224 + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
  225 + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R5);
  226 +#if TARGET_LONG_BITS == 64
  227 + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R6);
  228 +#endif
  229 + break;
  230 + case 'M': /* qemu_st64 constraint */
  231 + ct->ct |= TCG_CT_REG;
  232 + tcg_regset_set32 (ct->u.regs, 0, 0xffffffff);
  233 + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
  234 + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
  235 + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R5);
  236 + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R6);
  237 + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R7);
  238 + break;
  239 + default:
  240 + return -1;
  241 + }
  242 + ct_str++;
  243 + *pct_str = ct_str;
  244 + return 0;
  245 +}
  246 +
  247 +/* test if a constant matches the constraint */
  248 +static int tcg_target_const_match (tcg_target_long val,
  249 + const TCGArgConstraint *arg_ct)
  250 +{
  251 + int ct;
  252 +
  253 + ct = arg_ct->ct;
  254 + if (ct & TCG_CT_CONST)
  255 + return 1;
  256 + return 0;
  257 +}
  258 +
  259 +#define OPCD(opc) ((opc)<<26)
  260 +#define XO19(opc) (OPCD(19)|((opc)<<1))
  261 +#define XO30(opc) (OPCD(30)|((opc)<<2))
  262 +#define XO31(opc) (OPCD(31)|((opc)<<1))
  263 +#define XO58(opc) (OPCD(58)|(opc))
  264 +#define XO62(opc) (OPCD(62)|(opc))
  265 +
  266 +#define B OPCD( 18)
  267 +#define BC OPCD( 16)
  268 +#define LBZ OPCD( 34)
  269 +#define LHZ OPCD( 40)
  270 +#define LHA OPCD( 42)
  271 +#define LWZ OPCD( 32)
  272 +#define STB OPCD( 38)
  273 +#define STH OPCD( 44)
  274 +#define STW OPCD( 36)
  275 +
  276 +#define STD XO62( 0)
  277 +#define STDU XO62( 1)
  278 +#define STDX XO31(149)
  279 +
  280 +#define LD XO58( 0)
  281 +#define LDX XO31( 21)
  282 +#define LDU XO58( 1)
  283 +#define LWA XO58( 10)
  284 +#define LWAX XO31(341)
  285 +
  286 +#define ADDI OPCD( 14)
  287 +#define ADDIS OPCD( 15)
  288 +#define ORI OPCD( 24)
  289 +#define ORIS OPCD( 25)
  290 +#define XORI OPCD( 26)
  291 +#define XORIS OPCD( 27)
  292 +#define ANDI OPCD( 28)
  293 +#define ANDIS OPCD( 29)
  294 +#define MULLI OPCD( 7)
  295 +#define CMPLI OPCD( 10)
  296 +#define CMPI OPCD( 11)
  297 +
  298 +#define LWZU OPCD( 33)
  299 +#define STWU OPCD( 37)
  300 +
  301 +#define RLWINM OPCD( 21)
  302 +
  303 +#define RLDICL XO30( 0)
  304 +#define RLDICR XO30( 1)
  305 +
  306 +#define BCLR XO19( 16)
  307 +#define BCCTR XO19(528)
  308 +#define CRAND XO19(257)
  309 +#define CRANDC XO19(129)
  310 +#define CRNAND XO19(225)
  311 +#define CROR XO19(449)
  312 +
  313 +#define EXTSB XO31(954)
  314 +#define EXTSH XO31(922)
  315 +#define EXTSW XO31(986)
  316 +#define ADD XO31(266)
  317 +#define ADDE XO31(138)
  318 +#define ADDC XO31( 10)
  319 +#define AND XO31( 28)
  320 +#define SUBF XO31( 40)
  321 +#define SUBFC XO31( 8)
  322 +#define SUBFE XO31(136)
  323 +#define OR XO31(444)
  324 +#define XOR XO31(316)
  325 +#define MULLW XO31(235)
  326 +#define MULHWU XO31( 11)
  327 +#define DIVW XO31(491)
  328 +#define DIVWU XO31(459)
  329 +#define CMP XO31( 0)
  330 +#define CMPL XO31( 32)
  331 +#define LHBRX XO31(790)
  332 +#define LWBRX XO31(534)
  333 +#define STHBRX XO31(918)
  334 +#define STWBRX XO31(662)
  335 +#define MFSPR XO31(339)
  336 +#define MTSPR XO31(467)
  337 +#define SRAWI XO31(824)
  338 +#define NEG XO31(104)
  339 +
  340 +#define MULLD XO31(233)
  341 +#define MULHD XO31( 73)
  342 +#define MULHDU XO31( 9)
  343 +#define DIVD XO31(489)
  344 +#define DIVDU XO31(457)
  345 +
  346 +#define LBZX XO31( 87)
  347 +#define LHZX XO31(276)
  348 +#define LHAX XO31(343)
  349 +#define LWZX XO31( 23)
  350 +#define STBX XO31(215)
  351 +#define STHX XO31(407)
  352 +#define STWX XO31(151)
  353 +
  354 +#define SPR(a,b) ((((a)<<5)|(b))<<11)
  355 +#define LR SPR(8, 0)
  356 +#define CTR SPR(9, 0)
  357 +
  358 +#define SLW XO31( 24)
  359 +#define SRW XO31(536)
  360 +#define SRAW XO31(792)
  361 +
  362 +#define SLD XO31( 27)
  363 +#define SRD XO31(539)
  364 +#define SRAD XO31(794)
  365 +
  366 +#define LMW OPCD( 46)
  367 +#define STMW OPCD( 47)
  368 +
  369 +#define TW XO31( 4)
  370 +#define TRAP (TW | TO (31))
  371 +
  372 +#define RT(r) ((r)<<21)
  373 +#define RS(r) ((r)<<21)
  374 +#define RA(r) ((r)<<16)
  375 +#define RB(r) ((r)<<11)
  376 +#define TO(t) ((t)<<21)
  377 +#define SH(s) ((s)<<11)
  378 +#define MB(b) ((b)<<6)
  379 +#define ME(e) ((e)<<1)
  380 +#define BO(o) ((o)<<21)
  381 +#define MB64(b) ((b)<<5)
  382 +
  383 +#define LK 1
  384 +
  385 +#define TAB(t,a,b) (RT(t) | RA(a) | RB(b))
  386 +#define SAB(s,a,b) (RS(s) | RA(a) | RB(b))
  387 +
  388 +#define BF(n) ((n)<<23)
  389 +#define BI(n, c) (((c)+((n)*4))<<16)
  390 +#define BT(n, c) (((c)+((n)*4))<<21)
  391 +#define BA(n, c) (((c)+((n)*4))<<16)
  392 +#define BB(n, c) (((c)+((n)*4))<<11)
  393 +
  394 +#define BO_COND_TRUE BO (12)
  395 +#define BO_COND_FALSE BO ( 4)
  396 +#define BO_ALWAYS BO (20)
  397 +
  398 +enum {
  399 + CR_LT,
  400 + CR_GT,
  401 + CR_EQ,
  402 + CR_SO
  403 +};
  404 +
  405 +static const uint32_t tcg_to_bc[10] = {
  406 + [TCG_COND_EQ] = BC | BI (7, CR_EQ) | BO_COND_TRUE,
  407 + [TCG_COND_NE] = BC | BI (7, CR_EQ) | BO_COND_FALSE,
  408 + [TCG_COND_LT] = BC | BI (7, CR_LT) | BO_COND_TRUE,
  409 + [TCG_COND_GE] = BC | BI (7, CR_LT) | BO_COND_FALSE,
  410 + [TCG_COND_LE] = BC | BI (7, CR_GT) | BO_COND_FALSE,
  411 + [TCG_COND_GT] = BC | BI (7, CR_GT) | BO_COND_TRUE,
  412 + [TCG_COND_LTU] = BC | BI (7, CR_LT) | BO_COND_TRUE,
  413 + [TCG_COND_GEU] = BC | BI (7, CR_LT) | BO_COND_FALSE,
  414 + [TCG_COND_LEU] = BC | BI (7, CR_GT) | BO_COND_FALSE,
  415 + [TCG_COND_GTU] = BC | BI (7, CR_GT) | BO_COND_TRUE,
  416 +};
  417 +
  418 +static void tcg_out_mov (TCGContext *s, int ret, int arg)
  419 +{
  420 + tcg_out32 (s, OR | SAB (arg, ret, arg));
  421 +}
  422 +
  423 +static void tcg_out_rld (TCGContext *s, int op, int ra, int rs, int sh, int mb)
  424 +{
  425 + sh = SH (sh & 0x1f) | (((sh >> 5) & 1) << 1);
  426 + mb = MB64 ((mb >> 5) | ((mb << 1) & 0x3f));
  427 + tcg_out32 (s, op | RA (ra) | RS (rs) | sh | mb);
  428 +}
  429 +
  430 +static void tcg_out_movi32 (TCGContext *s, int ret, int32_t arg)
  431 +{
  432 + if (arg == (int16_t) arg)
  433 + tcg_out32 (s, ADDI | RT (ret) | RA (0) | (arg & 0xffff));
  434 + else {
  435 + tcg_out32 (s, ADDIS | RT (ret) | RA (0) | ((arg >> 16) & 0xffff));
  436 + if (arg & 0xffff)
  437 + tcg_out32 (s, ORI | RS (ret) | RA (ret) | (arg & 0xffff));
  438 + }
  439 +}
  440 +
  441 +static void tcg_out_movi (TCGContext *s, TCGType type,
  442 + int ret, tcg_target_long arg)
  443 +{
  444 + int32_t arg32 = arg;
  445 +
  446 + if (type == TCG_TYPE_I32 || arg == arg32) {
  447 + tcg_out_movi32 (s, ret, arg32);
  448 + }
  449 + else {
  450 + if ((uint64_t) arg >> 32) {
  451 + tcg_out_movi32 (s, ret, (arg >> 32) + (arg32 < 0));
  452 + tcg_out_rld (s, RLDICR, ret, ret, 32, 31);
  453 + if (arg32) {
  454 + tcg_out_movi32 (s, 0, arg32);
  455 + tcg_out32 (s, ADD | TAB (ret, ret, 0));
  456 + }
  457 + }
  458 + else {
  459 + tcg_out_movi32 (s, ret, arg32);
  460 + }
  461 + }
  462 +}
  463 +
  464 +static void tcg_out_call (TCGContext *s, tcg_target_long arg, int const_arg)
  465 +{
  466 + int reg;
  467 +
  468 + if (const_arg) {
  469 + reg = 2;
  470 + tcg_out_movi (s, TCG_TYPE_I64, reg, arg);
  471 + }
  472 + else reg = arg;
  473 +
  474 + tcg_out32 (s, LD | RT (0) | RA (reg));
  475 + tcg_out32 (s, MTSPR | RA (0) | CTR);
  476 + tcg_out32 (s, LD | RT (11) | RA (reg) | 16);
  477 + tcg_out32 (s, LD | RT (2) | RA (reg) | 8);
  478 + tcg_out32 (s, BCCTR | BO_ALWAYS | LK);
  479 +}
  480 +
  481 +static void tcg_out_ldst (TCGContext *s, int ret, int addr,
  482 + int offset, int op1, int op2)
  483 +{
  484 + if (offset == (int16_t) offset)
  485 + tcg_out32 (s, op1 | RT (ret) | RA (addr) | (offset & 0xffff));
  486 + else {
  487 + tcg_out_movi (s, TCG_TYPE_I64, 0, offset);
  488 + tcg_out32 (s, op2 | RT (ret) | RA (addr) | RB (0));
  489 + }
  490 +}
  491 +
  492 +static void tcg_out_b (TCGContext *s, int mask, tcg_target_long target)
  493 +{
  494 + tcg_target_long disp;
  495 +
  496 + disp = target - (tcg_target_long) s->code_ptr;
  497 + if ((disp << 38) >> 38 == disp)
  498 + tcg_out32 (s, B | (disp & 0x3fffffc) | mask);
  499 + else {
  500 + tcg_out_movi (s, TCG_TYPE_I64, 0, (tcg_target_long) target);
  501 + tcg_out32 (s, MTSPR | RS (0) | CTR);
  502 + tcg_out32 (s, BCCTR | BO_ALWAYS | mask);
  503 + }
  504 +}
  505 +
  506 +#if defined (CONFIG_SOFTMMU)
  507 +extern void __ldb_mmu(void);
  508 +extern void __ldw_mmu(void);
  509 +extern void __ldl_mmu(void);
  510 +extern void __ldq_mmu(void);
  511 +
  512 +extern void __stb_mmu(void);
  513 +extern void __stw_mmu(void);
  514 +extern void __stl_mmu(void);
  515 +extern void __stq_mmu(void);
  516 +
  517 +static void *qemu_ld_helpers[4] = {
  518 + __ldb_mmu,
  519 + __ldw_mmu,
  520 + __ldl_mmu,
  521 + __ldq_mmu,
  522 +};
  523 +
  524 +static void *qemu_st_helpers[4] = {
  525 + __stb_mmu,
  526 + __stw_mmu,
  527 + __stl_mmu,
  528 + __stq_mmu,
  529 +};
  530 +#endif
  531 +
  532 +static void tcg_out_tlb_read (TCGContext *s, int r0, int r1, int r2,
  533 + int addr_reg, int s_bits, int offset)
  534 +{
  535 +#if TARGET_LONG_BITS == 32
  536 + tcg_out_rld (s, RLDICL, addr_reg, addr_reg, 0, 32);
  537 +#endif
  538 +
  539 + tcg_out_rld (s, RLDICL, r0, addr_reg,
  540 + 64 - TARGET_PAGE_BITS,
  541 + 64 - CPU_TLB_BITS);
  542 + tcg_out_rld (s, RLDICR, r0, r0,
  543 + CPU_TLB_ENTRY_BITS,
  544 + 63 - CPU_TLB_ENTRY_BITS);
  545 +
  546 + tcg_out32 (s, ADD | TAB (r0, r0, TCG_AREG0));
  547 + tcg_out32 (s, LD_ADDR | RT (r1) | RA (r0) | offset);
  548 +
  549 + tcg_out_rld (s, RLDICL, r2, addr_reg,
  550 + 64 - TARGET_PAGE_BITS,
  551 + TARGET_PAGE_BITS - s_bits);
  552 + tcg_out_rld (s, RLDICL, r2, r2, TARGET_PAGE_BITS, 0);
  553 +}
  554 +
  555 +static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
  556 +{
  557 + int addr_reg, data_reg, r0, mem_index, s_bits, bswap;
  558 +#ifdef CONFIG_SOFTMMU
  559 + int r1, r2;
  560 + void *label1_ptr, *label2_ptr;
  561 +#endif
  562 +
  563 + data_reg = *args++;
  564 + addr_reg = *args++;
  565 + mem_index = *args;
  566 + s_bits = opc & 3;
  567 +
  568 +#ifdef CONFIG_SOFTMMU
  569 + r0 = 3;
  570 + r1 = 4;
  571 + r2 = 0;
  572 +
  573 + tcg_out_tlb_read (s, r0, r1, r2, addr_reg, s_bits,
  574 + offsetof (CPUState, tlb_table[mem_index][0].addr_read));
  575 +
  576 + tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1));
  577 +
  578 + label1_ptr = s->code_ptr;
  579 +#ifdef FAST_PATH
  580 + tcg_out32 (s, BC | BI (7, CR_EQ) | BO_COND_TRUE);
  581 +#endif
  582 +
  583 + /* slow path */
  584 + tcg_out_mov (s, 3, addr_reg);
  585 + tcg_out_movi (s, TCG_TYPE_I64, 4, mem_index);
  586 +
  587 + tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1);
  588 +
  589 + switch (opc) {
  590 + case 0|4:
  591 + tcg_out32 (s, EXTSB | RA (data_reg) | RS (3));
  592 + break;
  593 + case 1|4:
  594 + tcg_out32 (s, EXTSH | RA (data_reg) | RS (3));
  595 + break;
  596 + case 2|4:
  597 + tcg_out32 (s, EXTSW | RA (data_reg) | RS (3));
  598 + break;
  599 + case 0:
  600 + case 1:
  601 + case 2:
  602 + case 3:
  603 + if (data_reg != 3)
  604 + tcg_out_mov (s, data_reg, 3);
  605 + break;
  606 + }
  607 + label2_ptr = s->code_ptr;
  608 + tcg_out32 (s, B);
  609 +
  610 + /* label1: fast path */
  611 +#ifdef FAST_PATH
  612 + reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr);
  613 +#endif
  614 +
  615 + /* r0 now contains &env->tlb_table[mem_index][index].addr_read */
  616 + tcg_out32 (s, (LD_ADDEND
  617 + | RT (r0)
  618 + | RA (r0)
  619 + | (offsetof (CPUTLBEntry, addend)
  620 + - offsetof (CPUTLBEntry, addr_read))
  621 + ));
  622 + /* r0 = env->tlb_table[mem_index][index].addend */
  623 + tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (addr_reg));
  624 + /* r0 = env->tlb_table[mem_index][index].addend + addr */
  625 +
  626 +#else /* !CONFIG_SOFTMMU */
  627 + r0 = addr_reg;
  628 +#endif
  629 +
  630 +#ifdef TARGET_WORDS_BIGENDIAN
  631 + bswap = 0;
  632 +#else
  633 + bswap = 1;
  634 +#endif
  635 + switch (opc) {
  636 + default:
  637 + case 0:
  638 + tcg_out32 (s, LBZ | RT (data_reg) | RA (r0));
  639 + break;
  640 + case 0|4:
  641 + tcg_out32 (s, LBZ | RT (data_reg) | RA (r0));
  642 + tcg_out32 (s, EXTSB | RA (data_reg) | RS (data_reg));
  643 + break;
  644 + case 1:
  645 + if (bswap) tcg_out32 (s, LHBRX | RT (data_reg) | RB (r0));
  646 + else tcg_out32 (s, LHZ | RT (data_reg) | RA (r0));
  647 + break;
  648 + case 1|4:
  649 + if (bswap) {
  650 + tcg_out32 (s, LHBRX | RT (data_reg) | RB (r0));
  651 + tcg_out32 (s, EXTSH | RA (data_reg) | RS (data_reg));
  652 + }
  653 + else tcg_out32 (s, LHA | RT (data_reg) | RA (r0));
  654 + break;
  655 + case 2:
  656 + if (bswap) tcg_out32 (s, LWBRX | RT (data_reg) | RB (r0));
  657 + else tcg_out32 (s, LWZ | RT (data_reg)| RA (r0));
  658 + break;
  659 + case 2|4:
  660 + if (bswap) {
  661 + tcg_out32 (s, LWBRX | RT (data_reg) | RB (r0));
  662 + tcg_out32 (s, EXTSW | RT (data_reg) | RS (data_reg));
  663 + }
  664 + else tcg_out32 (s, LWA | RT (data_reg)| RA (r0));
  665 + break;
  666 + case 3:
  667 + if (bswap) {
  668 + tcg_out32 (s, LWBRX | RT (data_reg) | RB (r0));
  669 + tcg_out32 (s, ADDI | RT (r0) | RA (r0) | 4);
  670 + tcg_out32 (s, LWBRX | RT (r0) | RB (r0));
  671 + tcg_out_rld (s, RLDICR, r0, r0, 32, 31);
  672 + tcg_out32 (s, OR | SAB (r0, data_reg, data_reg));
  673 + }
  674 + else tcg_out32 (s, LD | RT (data_reg) | RA (r0));
  675 + break;
  676 + }
  677 +
  678 +#ifdef CONFIG_SOFTMMU
  679 + reloc_pc24 (label2_ptr, (tcg_target_long) s->code_ptr);
  680 +#endif
  681 +}
  682 +
  683 +static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
  684 +{
  685 + int addr_reg, r0, r1, data_reg, mem_index, bswap;
  686 +#ifdef CONFIG_SOFTMMU
  687 + int r2;
  688 + void *label1_ptr, *label2_ptr;
  689 +#endif
  690 +
  691 + data_reg = *args++;
  692 + addr_reg = *args++;
  693 + mem_index = *args;
  694 +
  695 +#ifdef CONFIG_SOFTMMU
  696 + r0 = 3;
  697 + r1 = 4;
  698 + r2 = 0;
  699 +
  700 + tcg_out_tlb_read (s, r0, r1, r2, addr_reg, opc,
  701 + offsetof (CPUState, tlb_table[mem_index][0].addr_write));
  702 +
  703 + tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1));
  704 +
  705 + label1_ptr = s->code_ptr;
  706 +#ifdef FAST_PATH
  707 + tcg_out32 (s, BC | BI (7, CR_EQ) | BO_COND_TRUE);
  708 +#endif
  709 +
  710 + /* slow path */
  711 + tcg_out_mov (s, 3, addr_reg);
  712 + tcg_out_rld (s, RLDICL, 4, data_reg, 0, 64 - (1 << (3 + opc)));
  713 + tcg_out_movi (s, TCG_TYPE_I64, 5, mem_index);
  714 +
  715 + tcg_out_call (s, (tcg_target_long) qemu_st_helpers[opc], 1);
  716 +
  717 + label2_ptr = s->code_ptr;
  718 + tcg_out32 (s, B);
  719 +
  720 + /* label1: fast path */
  721 +#ifdef FAST_PATH
  722 + reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr);
  723 +#endif
  724 +
  725 + tcg_out32 (s, (LD_ADDEND
  726 + | RT (r0)
  727 + | RA (r0)
  728 + | (offsetof (CPUTLBEntry, addend)
  729 + - offsetof (CPUTLBEntry, addr_write))
  730 + ));
  731 + /* r0 = env->tlb_table[mem_index][index].addend */
  732 + tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (addr_reg));
  733 + /* r0 = env->tlb_table[mem_index][index].addend + addr */
  734 +
  735 +#else /* !CONFIG_SOFTMMU */
  736 + r1 = 4;
  737 + r0 = addr_reg;
  738 +#endif
  739 +
  740 +#ifdef TARGET_WORDS_BIGENDIAN
  741 + bswap = 0;
  742 +#else
  743 + bswap = 1;
  744 +#endif
  745 + switch (opc) {
  746 + case 0:
  747 + tcg_out32 (s, STB | RS (data_reg) | RA (r0));
  748 + break;
  749 + case 1:
  750 + if (bswap) tcg_out32 (s, STHBRX | RS (data_reg) | RA (0) | RB (r0));
  751 + else tcg_out32 (s, STH | RS (data_reg) | RA (r0));
  752 + break;
  753 + case 2:
  754 + if (bswap) tcg_out32 (s, STWBRX | RS (data_reg) | RA (0) | RB (r0));
  755 + else tcg_out32 (s, STW | RS (data_reg) | RA (r0));
  756 + break;
  757 + case 3:
  758 + if (bswap) {
  759 + tcg_out32 (s, STWBRX | RS (data_reg) | RA (0) | RB (r0));
  760 + tcg_out32 (s, ADDI | RT (r0) | RA (r0) | 4);
  761 + tcg_out_rld (s, RLDICL, 0, data_reg, 32, 0);
  762 + tcg_out32 (s, STWBRX | RS (0) | RA (0) | RB (r0));
  763 + }
  764 + else tcg_out32 (s, STD | RS (data_reg) | RA (r0));
  765 + break;
  766 + }
  767 +
  768 +#ifdef CONFIG_SOFTMMU
  769 + reloc_pc24 (label2_ptr, (tcg_target_long) s->code_ptr);
  770 +#endif
  771 +}
  772 +
  773 +void tcg_target_qemu_prologue (TCGContext *s)
  774 +{
  775 + int i, frame_size;
  776 +
  777 + frame_size = 0
  778 + + 8 /* back chain */
  779 + + 8 /* CR */
  780 + + 8 /* LR */
  781 + + 8 /* compiler doubleword */
  782 + + 8 /* link editor doubleword */
  783 + + 8 /* TOC save area */
  784 + + TCG_STATIC_CALL_ARGS_SIZE
  785 + + ARRAY_SIZE (tcg_target_callee_save_regs) * 8
  786 + ;
  787 + frame_size = (frame_size + 15) & ~15;
  788 +
  789 + tcg_out32 (s, MFSPR | RT (0) | LR);
  790 + tcg_out32 (s, STDU | RS (1) | RA (1) | (-frame_size & 0xffff));
  791 + for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
  792 + tcg_out32 (s, (STD
  793 + | RS (tcg_target_callee_save_regs[i])
  794 + | RA (1)
  795 + | (i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE)
  796 + )
  797 + );
  798 + tcg_out32 (s, STD | RS (0) | RA (1) | (frame_size + 20));
  799 + tcg_out32 (s, STD | RS (2) | RA (1) | (frame_size + 40));
  800 +
  801 + tcg_out32 (s, MTSPR | RS (3) | CTR);
  802 + tcg_out32 (s, BCCTR | BO_ALWAYS);
  803 + tb_ret_addr = s->code_ptr;
  804 +
  805 + for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
  806 + tcg_out32 (s, (LD
  807 + | RT (tcg_target_callee_save_regs[i])
  808 + | RA (1)
  809 + | (i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE)
  810 + )
  811 + );
  812 + tcg_out32 (s, LD | RT (0) | RA (1) | (frame_size + 20));
  813 + tcg_out32 (s, LD | RT (2) | RA (1) | (frame_size + 40));
  814 + tcg_out32 (s, MTSPR | RS (0) | LR);
  815 + tcg_out32 (s, ADDI | RT (1) | RA (1) | frame_size);
  816 + tcg_out32 (s, BCLR | BO_ALWAYS);
  817 +}
  818 +
  819 +static void tcg_out_ld (TCGContext *s, TCGType type, int ret, int arg1,
  820 + tcg_target_long arg2)
  821 +{
  822 + if (type == TCG_TYPE_I32)
  823 + tcg_out_ldst (s, ret, arg1, arg2, LWZ, LWZX);
  824 + else
  825 + tcg_out_ldst (s, ret, arg1, arg2, LD, LDX);
  826 +}
  827 +
  828 +static void tcg_out_st (TCGContext *s, TCGType type, int arg, int arg1,
  829 + tcg_target_long arg2)
  830 +{
  831 + if (type == TCG_TYPE_I32)
  832 + tcg_out_ldst (s, arg, arg1, arg2, STW, STWX);
  833 + else
  834 + tcg_out_ldst (s, arg, arg1, arg2, STD, STDX);
  835 +}
  836 +
  837 +static void ppc_addi32 (TCGContext *s, int rt, int ra, tcg_target_long si)
  838 +{
  839 + if (!si && rt == ra)
  840 + return;
  841 +
  842 + if (si == (int16_t) si)
  843 + tcg_out32 (s, ADDI | RT (rt) | RA (ra) | (si & 0xffff));
  844 + else {
  845 + uint16_t h = ((si >> 16) & 0xffff) + ((uint16_t) si >> 15);
  846 + tcg_out32 (s, ADDIS | RT (rt) | RA (ra) | h);
  847 + tcg_out32 (s, ADDI | RT (rt) | RA (rt) | (si & 0xffff));
  848 + }
  849 +}
  850 +
  851 +static void ppc_addi64 (TCGContext *s, int rt, int ra, tcg_target_long si)
  852 +{
  853 + tcg_out_movi (s, TCG_TYPE_I64, 0, si);
  854 + tcg_out32 (s, ADD | RT (rt) | RA (ra));
  855 +}
  856 +
  857 +static void tcg_out_addi (TCGContext *s, int reg, tcg_target_long val)
  858 +{
  859 + ppc_addi64 (s, reg, reg, val);
  860 +}
  861 +
  862 +static void tcg_out_cmp (TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
  863 + int const_arg2, int cr)
  864 +{
  865 + int imm;
  866 + uint32_t op;
  867 +
  868 + switch (cond) {
  869 + case TCG_COND_EQ:
  870 + case TCG_COND_NE:
  871 + if (const_arg2) {
  872 + if ((int16_t) arg2 == arg2) {
  873 + op = CMPI;
  874 + imm = 1;
  875 + break;
  876 + }
  877 + else if ((uint16_t) arg2 == arg2) {
  878 + op = CMPLI;
  879 + imm = 1;
  880 + break;
  881 + }
  882 + }
  883 + op = CMPL;
  884 + imm = 0;
  885 + break;
  886 +
  887 + case TCG_COND_LT:
  888 + case TCG_COND_GE:
  889 + case TCG_COND_LE:
  890 + case TCG_COND_GT:
  891 + if (const_arg2) {
  892 + if ((int16_t) arg2 == arg2) {
  893 + op = CMPI;
  894 + imm = 1;
  895 + break;
  896 + }
  897 + }
  898 + op = CMP;
  899 + imm = 0;
  900 + break;
  901 +
  902 + case TCG_COND_LTU:
  903 + case TCG_COND_GEU:
  904 + case TCG_COND_LEU:
  905 + case TCG_COND_GTU:
  906 + if (const_arg2) {
  907 + if ((uint16_t) arg2 == arg2) {
  908 + op = CMPLI;
  909 + imm = 1;
  910 + break;
  911 + }
  912 + }
  913 + op = CMPL;
  914 + imm = 0;
  915 + break;
  916 +
  917 + default:
  918 + tcg_abort ();
  919 + }
  920 + op |= BF (cr);
  921 +
  922 + if (imm)
  923 + tcg_out32 (s, op | RA (arg1) | (arg2 & 0xffff));
  924 + else {
  925 + if (const_arg2) {
  926 + tcg_out_movi (s, TCG_TYPE_I64, 0, arg2);
  927 + tcg_out32 (s, op | RA (arg1) | RB (0));
  928 + }
  929 + else
  930 + tcg_out32 (s, op | RA (arg1) | RB (arg2));
  931 + }
  932 +
  933 +}
  934 +
  935 +static void tcg_out_bc (TCGContext *s, int bc, int label_index)
  936 +{
  937 + TCGLabel *l = &s->labels[label_index];
  938 +
  939 + if (l->has_value)
  940 + tcg_out32 (s, bc | reloc_pc14_val (s->code_ptr, l->u.value));
  941 + else {
  942 + uint16_t val = *(uint16_t *) &s->code_ptr[2];
  943 +
  944 + /* Thanks to Andrzej Zaborowski */
  945 + tcg_out32 (s, bc | (val & 0xfffc));
  946 + tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL14, label_index, 0);
  947 + }
  948 +}
  949 +
  950 +static void tcg_out_brcond (TCGContext *s, int cond,
  951 + TCGArg arg1, TCGArg arg2, int const_arg2,
  952 + int label_index)
  953 +{
  954 + tcg_out_cmp (s, cond, arg1, arg2, const_arg2, 7);
  955 + tcg_out_bc (s, tcg_to_bc[cond], label_index);
  956 +}
  957 +
  958 +void ppc_tb_set_jmp_target (unsigned long jmp_addr, unsigned long addr)
  959 +{
  960 + TCGContext s;
  961 + unsigned long patch_size;
  962 +
  963 + s.code_ptr = (uint8_t *) jmp_addr;
  964 + tcg_out_b (&s, 0, addr);
  965 + patch_size = s.code_ptr - (uint8_t *) jmp_addr;
  966 + flush_icache_range (jmp_addr, jmp_addr + patch_size);
  967 +}
  968 +
  969 +static void tcg_out_op (TCGContext *s, int opc, const TCGArg *args,
  970 + const int *const_args)
  971 +{
  972 + switch (opc) {
  973 + case INDEX_op_exit_tb:
  974 + tcg_out_movi (s, TCG_TYPE_I64, TCG_REG_R3, args[0]);
  975 + tcg_out_b (s, 0, (tcg_target_long) tb_ret_addr);
  976 + break;
  977 + case INDEX_op_goto_tb:
  978 + if (s->tb_jmp_offset) {
  979 + /* direct jump method */
  980 +
  981 + s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
  982 + s->code_ptr += 32;
  983 + }
  984 + else {
  985 + tcg_abort ();
  986 + }
  987 + s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
  988 + break;
  989 + case INDEX_op_br:
  990 + {
  991 + TCGLabel *l = &s->labels[args[0]];
  992 +
  993 + if (l->has_value) {
  994 + tcg_out_b (s, 0, l->u.value);
  995 + }
  996 + else {
  997 + uint32_t val = *(uint32_t *) s->code_ptr;
  998 +
  999 + /* Thanks to Andrzej Zaborowski */
  1000 + tcg_out32 (s, B | (val & 0x3fffffc));
  1001 + tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL24, args[0], 0);
  1002 + }
  1003 + }
  1004 + break;
  1005 + case INDEX_op_call:
  1006 + tcg_out_call (s, args[0], const_args[0]);
  1007 + break;
  1008 + case INDEX_op_jmp:
  1009 + if (const_args[0]) {
  1010 + tcg_out_b (s, 0, args[0]);
  1011 + }
  1012 + else {
  1013 + tcg_out32 (s, MTSPR | RS (args[0]) | CTR);
  1014 + tcg_out32 (s, BCCTR | BO_ALWAYS);
  1015 + }
  1016 + break;
  1017 + case INDEX_op_movi_i32:
  1018 + tcg_out_movi (s, TCG_TYPE_I32, args[0], args[1]);
  1019 + break;
  1020 + case INDEX_op_movi_i64:
  1021 + tcg_out_movi (s, TCG_TYPE_I64, args[0], args[1]);
  1022 + break;
  1023 + case INDEX_op_ld8u_i32:
  1024 + case INDEX_op_ld8u_i64:
  1025 + tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX);
  1026 + break;
  1027 + case INDEX_op_ld8s_i32:
  1028 + case INDEX_op_ld8s_i64:
  1029 + tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX);
  1030 + tcg_out32 (s, EXTSB | RS (args[0]) | RA (args[0]));
  1031 + break;
  1032 + case INDEX_op_ld16u_i32:
  1033 + case INDEX_op_ld16u_i64:
  1034 + tcg_out_ldst (s, args[0], args[1], args[2], LHZ, LHZX);
  1035 + break;
  1036 + case INDEX_op_ld16s_i32:
  1037 + case INDEX_op_ld16s_i64:
  1038 + tcg_out_ldst (s, args[0], args[1], args[2], LHA, LHAX);
  1039 + break;
  1040 + case INDEX_op_ld_i32:
  1041 + case INDEX_op_ld32u_i64:
  1042 + tcg_out_ldst (s, args[0], args[1], args[2], LWZ, LWZX);
  1043 + break;
  1044 + case INDEX_op_ld32s_i64:
  1045 + tcg_out_ldst (s, args[0], args[1], args[2], LWA, LWAX);
  1046 + break;
  1047 + case INDEX_op_ld_i64:
  1048 + tcg_out_ldst (s, args[0], args[1], args[2], LD, LDX);
  1049 + break;
  1050 + case INDEX_op_st8_i32:
  1051 + case INDEX_op_st8_i64:
  1052 + tcg_out_ldst (s, args[0], args[1], args[2], STB, STBX);
  1053 + break;
  1054 + case INDEX_op_st16_i32:
  1055 + case INDEX_op_st16_i64:
  1056 + tcg_out_ldst (s, args[0], args[1], args[2], STH, STHX);
  1057 + break;
  1058 + case INDEX_op_st_i32:
  1059 + case INDEX_op_st32_i64:
  1060 + tcg_out_ldst (s, args[0], args[1], args[2], STW, STWX);
  1061 + break;
  1062 + case INDEX_op_st_i64:
  1063 + tcg_out_ldst (s, args[0], args[1], args[2], STD, STDX);
  1064 + break;
  1065 +
  1066 + case INDEX_op_add_i32:
  1067 + if (const_args[2])
  1068 + ppc_addi32 (s, args[0], args[1], args[2]);
  1069 + else
  1070 + tcg_out32 (s, ADD | TAB (args[0], args[1], args[2]));
  1071 + break;
  1072 + case INDEX_op_sub_i32:
  1073 + if (const_args[2])
  1074 + ppc_addi32 (s, args[0], args[1], -args[2]);
  1075 + else
  1076 + tcg_out32 (s, SUBF | TAB (args[0], args[2], args[1]));
  1077 + break;
  1078 +
  1079 + case INDEX_op_and_i32:
  1080 + if (const_args[2]) {
  1081 + if (!args[2])
  1082 + tcg_out_movi (s, TCG_TYPE_I32, args[0], 0);
  1083 + else {
  1084 + if ((args[2] & 0xffff) == args[2])
  1085 + tcg_out32 (s, ANDI | RS (args[1]) | RA (args[0]) | args[2]);
  1086 + else if ((args[2] & 0xffff0000) == args[2])
  1087 + tcg_out32 (s, ANDIS | RS (args[1]) | RA (args[0])
  1088 + | ((args[2] >> 16) & 0xffff));
  1089 + else if (args[2] == 0xffffffff) {
  1090 + if (args[0] != args[1])
  1091 + tcg_out_mov (s, args[0], args[1]);
  1092 + }
  1093 + else {
  1094 + tcg_out_movi (s, TCG_TYPE_I32, 0, args[2]);
  1095 + tcg_out32 (s, AND | SAB (args[1], args[0], 0));
  1096 + }
  1097 + }
  1098 + }
  1099 + else
  1100 + tcg_out32 (s, AND | SAB (args[1], args[0], args[2]));
  1101 + break;
  1102 + case INDEX_op_or_i32:
  1103 + if (const_args[2]) {
  1104 + if (args[2]) {
  1105 + if (args[2] & 0xffff) {
  1106 + tcg_out32 (s, ORI | RS (args[1]) | RA (args[0])
  1107 + | (args[2] & 0xffff));
  1108 + if (args[2] >> 16)
  1109 + tcg_out32 (s, ORIS | RS (args[0]) | RA (args[0])
  1110 + | ((args[2] >> 16) & 0xffff));
  1111 + }
  1112 + else {
  1113 + tcg_out32 (s, ORIS | RS (args[1]) | RA (args[0])
  1114 + | ((args[2] >> 16) & 0xffff));
  1115 + }
  1116 + }
  1117 + else {
  1118 + if (args[0] != args[1])
  1119 + tcg_out_mov (s, args[0], args[1]);
  1120 + }
  1121 + }
  1122 + else
  1123 + tcg_out32 (s, OR | SAB (args[1], args[0], args[2]));
  1124 + break;
  1125 + case INDEX_op_xor_i32:
  1126 + if (const_args[2]) {
  1127 + if (args[2]) {
  1128 + if ((args[2] & 0xffff) == args[2])
  1129 + tcg_out32 (s, XORI | RS (args[1]) | RA (args[0])
  1130 + | (args[2] & 0xffff));
  1131 + else if ((args[2] & 0xffff0000) == args[2])
  1132 + tcg_out32 (s, XORIS | RS (args[1]) | RA (args[0])
  1133 + | ((args[2] >> 16) & 0xffff));
  1134 + else {
  1135 + tcg_out_movi (s, TCG_TYPE_I32, 0, args[2]);
  1136 + tcg_out32 (s, XOR | SAB (args[1], args[0], 0));
  1137 + }
  1138 + }
  1139 + else {
  1140 + if (args[0] != args[1])
  1141 + tcg_out_mov (s, args[0], args[1]);
  1142 + }
  1143 + }
  1144 + else
  1145 + tcg_out32 (s, XOR | SAB (args[1], args[0], args[2]));
  1146 + break;
  1147 +
  1148 + case INDEX_op_mul_i32:
  1149 + if (const_args[2]) {
  1150 + if (args[2] == (int16_t) args[2])
  1151 + tcg_out32 (s, MULLI | RT (args[0]) | RA (args[1])
  1152 + | (args[2] & 0xffff));
  1153 + else {
  1154 + tcg_out_movi (s, TCG_TYPE_I32, 0, args[2]);
  1155 + tcg_out32 (s, MULLW | TAB (args[0], args[1], 0));
  1156 + }
  1157 + }
  1158 + else
  1159 + tcg_out32 (s, MULLW | TAB (args[0], args[1], args[2]));
  1160 + break;
  1161 +
  1162 + case INDEX_op_div_i32:
  1163 + tcg_out32 (s, DIVW | TAB (args[0], args[1], args[2]));
  1164 + break;
  1165 +
  1166 + case INDEX_op_divu_i32:
  1167 + tcg_out32 (s, DIVWU | TAB (args[0], args[1], args[2]));
  1168 + break;
  1169 +
  1170 + case INDEX_op_rem_i32:
  1171 + tcg_out32 (s, DIVW | TAB (0, args[1], args[2]));
  1172 + tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
  1173 + tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
  1174 + break;
  1175 +
  1176 + case INDEX_op_remu_i32:
  1177 + tcg_out32 (s, DIVWU | TAB (0, args[1], args[2]));
  1178 + tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
  1179 + tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
  1180 + break;
  1181 +
  1182 + case INDEX_op_shl_i32:
  1183 + if (const_args[2]) {
  1184 + if (args[2])
  1185 + tcg_out32 (s, (RLWINM
  1186 + | RA (args[0])
  1187 + | RS (args[1])
  1188 + | SH (args[2])
  1189 + | MB (0)
  1190 + | ME (31 - args[2])
  1191 + )
  1192 + );
  1193 + else
  1194 + tcg_out_mov (s, args[0], args[1]);
  1195 + }
  1196 + else
  1197 + tcg_out32 (s, SLW | SAB (args[1], args[0], args[2]));
  1198 + break;
  1199 + case INDEX_op_shr_i32:
  1200 + if (const_args[2]) {
  1201 + if (args[2])
  1202 + tcg_out32 (s, (RLWINM
  1203 + | RA (args[0])
  1204 + | RS (args[1])
  1205 + | SH (32 - args[2])
  1206 + | MB (args[2])
  1207 + | ME (31)
  1208 + )
  1209 + );
  1210 + else
  1211 + tcg_out_mov (s, args[0], args[1]);
  1212 + }
  1213 + else
  1214 + tcg_out32 (s, SRW | SAB (args[1], args[0], args[2]));
  1215 + break;
  1216 + case INDEX_op_sar_i32:
  1217 + if (const_args[2])
  1218 + tcg_out32 (s, SRAWI | RS (args[1]) | RA (args[0]) | SH (args[2]));
  1219 + else
  1220 + tcg_out32 (s, SRAW | SAB (args[1], args[0], args[2]));
  1221 + break;
  1222 +
  1223 + case INDEX_op_brcond_i32:
  1224 + case INDEX_op_brcond_i64:
  1225 + tcg_out_brcond (s, args[2], args[0], args[1], const_args[1], args[3]);
  1226 + break;
  1227 +
  1228 + case INDEX_op_neg_i32:
  1229 + tcg_out32 (s, TRAP);
  1230 + break;
  1231 + case INDEX_op_neg_i64:
  1232 + tcg_out32 (s, NEG | RT (args[0]) | RA (args[1]));
  1233 + break;
  1234 +
  1235 + case INDEX_op_add_i64:
  1236 + tcg_out32 (s, ADD | TAB (args[0], args[1], args[2]));
  1237 + break;
  1238 + case INDEX_op_sub_i64:
  1239 + tcg_out32 (s, SUBF | TAB (args[0], args[2], args[1]));
  1240 + break;
  1241 +
  1242 + case INDEX_op_and_i64:
  1243 + tcg_out32 (s, AND | SAB (args[1], args[0], args[2]));
  1244 + break;
  1245 + case INDEX_op_or_i64:
  1246 + tcg_out32 (s, OR | SAB (args[1], args[0], args[2]));
  1247 + break;
  1248 + case INDEX_op_xor_i64:
  1249 + tcg_out32 (s, XOR | SAB (args[1], args[0], args[2]));
  1250 + break;
  1251 +
  1252 + case INDEX_op_shl_i64:
  1253 + tcg_out32 (s, SLD | SAB (args[1], args[0], args[2]));
  1254 + break;
  1255 + case INDEX_op_shr_i64:
  1256 + tcg_out32 (s, SRD | SAB (args[1], args[0], args[2]));
  1257 + break;
  1258 + case INDEX_op_sar_i64:
  1259 + tcg_out32 (s, SRAD | SAB (args[1], args[0], args[2]));
  1260 + break;
  1261 +
  1262 + case INDEX_op_mul_i64:
  1263 + tcg_out32 (s, MULLD | TAB (args[0], args[1], args[2]));
  1264 + break;
  1265 + case INDEX_op_div_i64:
  1266 + tcg_out32 (s, DIVD | TAB (args[0], args[1], args[2]));
  1267 + break;
  1268 + case INDEX_op_divu_i64:
  1269 + tcg_out32 (s, DIVDU | TAB (args[0], args[1], args[2]));
  1270 + break;
  1271 + case INDEX_op_rem_i64:
  1272 + tcg_out32 (s, DIVD | TAB (0, args[1], args[2]));
  1273 + tcg_out32 (s, MULLD | TAB (0, 0, args[2]));
  1274 + tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
  1275 + break;
  1276 + case INDEX_op_remu_i64:
  1277 + tcg_out32 (s, DIVDU | TAB (0, args[1], args[2]));
  1278 + tcg_out32 (s, MULLD | TAB (0, 0, args[2]));
  1279 + tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
  1280 + break;
  1281 +
  1282 + case INDEX_op_qemu_ld8u:
  1283 + tcg_out_qemu_ld (s, args, 0);
  1284 + break;
  1285 + case INDEX_op_qemu_ld8s:
  1286 + tcg_out_qemu_ld (s, args, 0 | 4);
  1287 + break;
  1288 + case INDEX_op_qemu_ld16u:
  1289 + tcg_out_qemu_ld (s, args, 1);
  1290 + break;
  1291 + case INDEX_op_qemu_ld16s:
  1292 + tcg_out_qemu_ld (s, args, 1 | 4);
  1293 + break;
  1294 + case INDEX_op_qemu_ld32u:
  1295 + tcg_out_qemu_ld (s, args, 2);
  1296 + break;
  1297 + case INDEX_op_qemu_ld32s:
  1298 + tcg_out_qemu_ld (s, args, 2 | 4);
  1299 + break;
  1300 + case INDEX_op_qemu_ld64:
  1301 + tcg_out_qemu_ld (s, args, 3);
  1302 + break;
  1303 + case INDEX_op_qemu_st8:
  1304 + tcg_out_qemu_st (s, args, 0);
  1305 + break;
  1306 + case INDEX_op_qemu_st16:
  1307 + tcg_out_qemu_st (s, args, 1);
  1308 + break;
  1309 + case INDEX_op_qemu_st32:
  1310 + tcg_out_qemu_st (s, args, 2);
  1311 + break;
  1312 + case INDEX_op_qemu_st64:
  1313 + tcg_out_qemu_st (s, args, 3);
  1314 + break;
  1315 +
  1316 + default:
  1317 + tcg_dump_ops (s, stderr);
  1318 + tcg_abort ();
  1319 + }
  1320 +}
  1321 +
  1322 +static const TCGTargetOpDef ppc_op_defs[] = {
  1323 + { INDEX_op_exit_tb, { } },
  1324 + { INDEX_op_goto_tb, { } },
  1325 + { INDEX_op_call, { "ri" } },
  1326 + { INDEX_op_jmp, { "ri" } },
  1327 + { INDEX_op_br, { } },
  1328 +
  1329 + { INDEX_op_mov_i32, { "r", "r" } },
  1330 + { INDEX_op_mov_i64, { "r", "r" } },
  1331 + { INDEX_op_movi_i32, { "r" } },
  1332 + { INDEX_op_movi_i64, { "r" } },
  1333 +
  1334 + { INDEX_op_ld8u_i32, { "r", "r" } },
  1335 + { INDEX_op_ld8s_i32, { "r", "r" } },
  1336 + { INDEX_op_ld16u_i32, { "r", "r" } },
  1337 + { INDEX_op_ld16s_i32, { "r", "r" } },
  1338 + { INDEX_op_ld_i32, { "r", "r" } },
  1339 + { INDEX_op_ld_i64, { "r", "r" } },
  1340 + { INDEX_op_st8_i32, { "r", "r" } },
  1341 + { INDEX_op_st8_i64, { "r", "r" } },
  1342 + { INDEX_op_st16_i32, { "r", "r" } },
  1343 + { INDEX_op_st16_i64, { "r", "r" } },
  1344 + { INDEX_op_st_i32, { "r", "r" } },
  1345 + { INDEX_op_st_i64, { "r", "r" } },
  1346 + { INDEX_op_st32_i64, { "r", "r" } },
  1347 +
  1348 + { INDEX_op_ld8u_i64, { "r", "r" } },
  1349 + { INDEX_op_ld8s_i64, { "r", "r" } },
  1350 + { INDEX_op_ld16u_i64, { "r", "r" } },
  1351 + { INDEX_op_ld16s_i64, { "r", "r" } },
  1352 + { INDEX_op_ld32u_i64, { "r", "r" } },
  1353 + { INDEX_op_ld32s_i64, { "r", "r" } },
  1354 + { INDEX_op_ld_i64, { "r", "r" } },
  1355 +
  1356 + { INDEX_op_add_i32, { "r", "r", "ri" } },
  1357 + { INDEX_op_mul_i32, { "r", "r", "ri" } },
  1358 + { INDEX_op_div_i32, { "r", "r", "r" } },
  1359 + { INDEX_op_divu_i32, { "r", "r", "r" } },
  1360 + { INDEX_op_rem_i32, { "r", "r", "r" } },
  1361 + { INDEX_op_remu_i32, { "r", "r", "r" } },
  1362 + { INDEX_op_sub_i32, { "r", "r", "ri" } },
  1363 + { INDEX_op_and_i32, { "r", "r", "ri" } },
  1364 + { INDEX_op_or_i32, { "r", "r", "ri" } },
  1365 + { INDEX_op_xor_i32, { "r", "r", "ri" } },
  1366 +
  1367 + { INDEX_op_shl_i32, { "r", "r", "ri" } },
  1368 + { INDEX_op_shr_i32, { "r", "r", "ri" } },
  1369 + { INDEX_op_sar_i32, { "r", "r", "ri" } },
  1370 +
  1371 + { INDEX_op_brcond_i32, { "r", "ri" } },
  1372 + { INDEX_op_brcond_i64, { "r", "ri" } },
  1373 +
  1374 + { INDEX_op_neg_i32, { "r", "r" } },
  1375 +
  1376 + { INDEX_op_add_i64, { "r", "r", "r" } },
  1377 + { INDEX_op_sub_i64, { "r", "r", "r" } },
  1378 + { INDEX_op_and_i64, { "r", "r", "r" } },
  1379 + { INDEX_op_or_i64, { "r", "r", "r" } },
  1380 + { INDEX_op_xor_i64, { "r", "r", "r" } },
  1381 +
  1382 + { INDEX_op_shl_i64, { "r", "r", "r" } },
  1383 + { INDEX_op_shr_i64, { "r", "r", "r" } },
  1384 + { INDEX_op_sar_i64, { "r", "r", "r" } },
  1385 +
  1386 + { INDEX_op_mul_i64, { "r", "r", "ri" } },
  1387 + { INDEX_op_div_i64, { "r", "r", "r" } },
  1388 + { INDEX_op_divu_i64, { "r", "r", "r" } },
  1389 + { INDEX_op_rem_i64, { "r", "r", "r" } },
  1390 + { INDEX_op_remu_i64, { "r", "r", "r" } },
  1391 +
  1392 + { INDEX_op_neg_i64, { "r", "r" } },
  1393 +
  1394 + { INDEX_op_qemu_ld8u, { "r", "L" } },
  1395 + { INDEX_op_qemu_ld8s, { "r", "L" } },
  1396 + { INDEX_op_qemu_ld16u, { "r", "L" } },
  1397 + { INDEX_op_qemu_ld16s, { "r", "L" } },
  1398 + { INDEX_op_qemu_ld32u, { "r", "L" } },
  1399 + { INDEX_op_qemu_ld32s, { "r", "L" } },
  1400 + { INDEX_op_qemu_ld64, { "r", "r", "L" } },
  1401 +
  1402 + { INDEX_op_qemu_st8, { "K", "K" } },
  1403 + { INDEX_op_qemu_st16, { "K", "K" } },
  1404 + { INDEX_op_qemu_st32, { "K", "K" } },
  1405 + { INDEX_op_qemu_st64, { "M", "M", "M" } },
  1406 +
  1407 + { -1 },
  1408 +};
  1409 +
  1410 +void tcg_target_init (TCGContext *s)
  1411 +{
  1412 + tcg_regset_set32 (tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
  1413 + tcg_regset_set32 (tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
  1414 + tcg_regset_set32 (tcg_target_call_clobber_regs, 0,
  1415 + (1 << TCG_REG_R0) |
  1416 + (1 << TCG_REG_R3) |
  1417 + (1 << TCG_REG_R4) |
  1418 + (1 << TCG_REG_R5) |
  1419 + (1 << TCG_REG_R6) |
  1420 + (1 << TCG_REG_R7) |
  1421 + (1 << TCG_REG_R8) |
  1422 + (1 << TCG_REG_R9) |
  1423 + (1 << TCG_REG_R10) |
  1424 + (1 << TCG_REG_R11) |
  1425 + (1 << TCG_REG_R12)
  1426 + );
  1427 +
  1428 + tcg_regset_clear (s->reserved_regs);
  1429 + tcg_regset_set_reg (s->reserved_regs, TCG_REG_R0);
  1430 + tcg_regset_set_reg (s->reserved_regs, TCG_REG_R1);
  1431 + tcg_regset_set_reg (s->reserved_regs, TCG_REG_R2);
  1432 + tcg_regset_set_reg (s->reserved_regs, TCG_REG_R13);
  1433 +
  1434 + tcg_add_target_add_op_defs (ppc_op_defs);
  1435 +}
... ...
tcg/ppc64/tcg-target.h 0 โ†’ 100644
  1 +/*
  2 + * Tiny Code Generator for QEMU
  3 + *
  4 + * Copyright (c) 2008 Fabrice Bellard
  5 + *
  6 + * Permission is hereby granted, free of charge, to any person obtaining a copy
  7 + * of this software and associated documentation files (the "Software"), to deal
  8 + * in the Software without restriction, including without limitation the rights
  9 + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 + * copies of the Software, and to permit persons to whom the Software is
  11 + * furnished to do so, subject to the following conditions:
  12 + *
  13 + * The above copyright notice and this permission notice shall be included in
  14 + * all copies or substantial portions of the Software.
  15 + *
  16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 + * THE SOFTWARE.
  23 + */
  24 +#define TCG_TARGET_PPC64 1
  25 +
  26 +#define TCG_TARGET_REG_BITS 64
  27 +#define TCG_TARGET_WORDS_BIGENDIAN
  28 +#define TCG_TARGET_NB_REGS 32
  29 +
  30 +enum {
  31 + TCG_REG_R0 = 0,
  32 + TCG_REG_R1,
  33 + TCG_REG_R2,
  34 + TCG_REG_R3,
  35 + TCG_REG_R4,
  36 + TCG_REG_R5,
  37 + TCG_REG_R6,
  38 + TCG_REG_R7,
  39 + TCG_REG_R8,
  40 + TCG_REG_R9,
  41 + TCG_REG_R10,
  42 + TCG_REG_R11,
  43 + TCG_REG_R12,
  44 + TCG_REG_R13,
  45 + TCG_REG_R14,
  46 + TCG_REG_R15,
  47 + TCG_REG_R16,
  48 + TCG_REG_R17,
  49 + TCG_REG_R18,
  50 + TCG_REG_R19,
  51 + TCG_REG_R20,
  52 + TCG_REG_R21,
  53 + TCG_REG_R22,
  54 + TCG_REG_R23,
  55 + TCG_REG_R24,
  56 + TCG_REG_R25,
  57 + TCG_REG_R26,
  58 + TCG_REG_R27,
  59 + TCG_REG_R28,
  60 + TCG_REG_R29,
  61 + TCG_REG_R30,
  62 + TCG_REG_R31
  63 +};
  64 +
  65 +/* used for function call generation */
  66 +#define TCG_REG_CALL_STACK TCG_REG_R1
  67 +#define TCG_TARGET_STACK_ALIGN 16
  68 +#define TCG_TARGET_CALL_STACK_OFFSET 8
  69 +
  70 +/* optional instructions */
  71 +#define TCG_TARGET_HAS_neg_i32
  72 +#define TCG_TARGET_HAS_div_i32
  73 +#define TCG_TARGET_HAS_neg_i64
  74 +#define TCG_TARGET_HAS_div_i64
  75 +
  76 +#define TCG_AREG0 TCG_REG_R27
  77 +#define TCG_AREG1 TCG_REG_R24
  78 +#define TCG_AREG2 TCG_REG_R25
  79 +#define TCG_AREG3 TCG_REG_R26
  80 +
  81 +/* taken directly from tcg-dyngen.c */
  82 +#define MIN_CACHE_LINE_SIZE 8 /* conservative value */
  83 +
  84 +static inline void flush_icache_range(unsigned long start, unsigned long stop)
  85 +{
  86 + unsigned long p;
  87 +
  88 + start &= ~(MIN_CACHE_LINE_SIZE - 1);
  89 + stop = (stop + MIN_CACHE_LINE_SIZE - 1) & ~(MIN_CACHE_LINE_SIZE - 1);
  90 +
  91 + for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
  92 + asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
  93 + }
  94 + asm volatile ("sync" : : : "memory");
  95 + for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
  96 + asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
  97 + }
  98 + asm volatile ("sync" : : : "memory");
  99 + asm volatile ("isync" : : : "memory");
  100 +}
... ...
tcg/tcg.h
... ... @@ -414,8 +414,23 @@ uint64_t tcg_helper_remu_i64(uint64_t arg1, uint64_t arg2);
414 414  
415 415 extern uint8_t code_gen_prologue[];
416 416 #ifdef __powerpc__
  417 +#ifdef __powerpc64__
  418 +#define tcg_qemu_tb_exec(tb_ptr) \
  419 + ({ unsigned long p; \
  420 + asm volatile ( \
  421 + "mtctr %1\n\t" \
  422 + "mr 3,%2\n\t" \
  423 + "bctrl\n\t" \
  424 + "mr %0,3\n\t" \
  425 + : "=r" (p) \
  426 + : "r" (code_gen_prologue), "r" (tb_ptr) \
  427 + : "3", "4", "5", "6", "7", "8", "9", "10", "11", "12"); \
  428 + p; \
  429 + })
  430 +#else
417 431 #define tcg_qemu_tb_exec(tb_ptr) \
418 432 ((long REGPARM __attribute__ ((longcall)) (*)(void *))code_gen_prologue)(tb_ptr)
  433 +#endif
419 434 #else
420 435 #define tcg_qemu_tb_exec(tb_ptr) ((long REGPARM (*)(void *))code_gen_prologue)(tb_ptr)
421 436 #endif
... ...