Commit b8076a748d52db5f5258c29fe342b8593a0b9914
1 parent
7a674b13
ia64 host support (David Mosberger)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1360 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
13 changed files
with
738 additions
and
64 deletions
Makefile.target
| @@ -184,7 +184,9 @@ LDFLAGS+=-Wl,-T,$(SRC_PATH)/alpha.ld | @@ -184,7 +184,9 @@ LDFLAGS+=-Wl,-T,$(SRC_PATH)/alpha.ld | ||
| 184 | endif | 184 | endif |
| 185 | 185 | ||
| 186 | ifeq ($(ARCH),ia64) | 186 | ifeq ($(ARCH),ia64) |
| 187 | +CFLAGS += -mno-sdata | ||
| 187 | OP_CFLAGS=$(CFLAGS) | 188 | OP_CFLAGS=$(CFLAGS) |
| 189 | +LDFLAGS+=-Wl,-G0 -Wl,-T,$(SRC_PATH)/ia64.ld | ||
| 188 | endif | 190 | endif |
| 189 | 191 | ||
| 190 | ifeq ($(ARCH),arm) | 192 | ifeq ($(ARCH),arm) |
| @@ -382,6 +384,10 @@ vl.o: CFLAGS+=-p | @@ -382,6 +384,10 @@ vl.o: CFLAGS+=-p | ||
| 382 | VL_LDFLAGS+=-p | 384 | VL_LDFLAGS+=-p |
| 383 | endif | 385 | endif |
| 384 | 386 | ||
| 387 | +ifeq ($(ARCH),ia64) | ||
| 388 | +VL_LDFLAGS+=-Wl,-G0 -Wl,-T,$(SRC_PATH)/ia64.ld | ||
| 389 | +endif | ||
| 390 | + | ||
| 385 | $(QEMU_SYSTEM): $(VL_OBJS) libqemu.a | 391 | $(QEMU_SYSTEM): $(VL_OBJS) libqemu.a |
| 386 | $(CC) $(VL_LDFLAGS) -o $@ $^ $(LIBS) $(SDL_LIBS) $(COCOA_LIBS) $(VL_LIBS) | 392 | $(CC) $(VL_LDFLAGS) -o $@ $^ $(LIBS) $(SDL_LIBS) $(COCOA_LIBS) $(VL_LIBS) |
| 387 | 393 |
cpu-exec.c
| @@ -573,6 +573,15 @@ int cpu_exec(CPUState *env1) | @@ -573,6 +573,15 @@ int cpu_exec(CPUState *env1) | ||
| 573 | ); | 573 | ); |
| 574 | } | 574 | } |
| 575 | } | 575 | } |
| 576 | +#elif defined(__ia64) | ||
| 577 | + struct fptr { | ||
| 578 | + void *ip; | ||
| 579 | + void *gp; | ||
| 580 | + } fp; | ||
| 581 | + | ||
| 582 | + fp.ip = tc_ptr; | ||
| 583 | + fp.gp = code_gen_buffer + 2 * (1 << 20); | ||
| 584 | + (*(void (*)(void)) &fp)(); | ||
| 576 | #else | 585 | #else |
| 577 | gen_func(); | 586 | gen_func(); |
| 578 | #endif | 587 | #endif |
| @@ -1118,6 +1127,40 @@ int cpu_signal_handler(int host_signum, struct siginfo *info, | @@ -1118,6 +1127,40 @@ int cpu_signal_handler(int host_signum, struct siginfo *info, | ||
| 1118 | &uc->uc_sigmask, puc); | 1127 | &uc->uc_sigmask, puc); |
| 1119 | } | 1128 | } |
| 1120 | 1129 | ||
| 1130 | +#elif defined(__ia64) | ||
| 1131 | + | ||
| 1132 | +#ifndef __ISR_VALID | ||
| 1133 | + /* This ought to be in <bits/siginfo.h>... */ | ||
| 1134 | +# define __ISR_VALID 1 | ||
| 1135 | +# define si_flags _sifields._sigfault._si_pad0 | ||
| 1136 | +#endif | ||
| 1137 | + | ||
| 1138 | +int cpu_signal_handler(int host_signum, struct siginfo *info, void *puc) | ||
| 1139 | +{ | ||
| 1140 | + struct ucontext *uc = puc; | ||
| 1141 | + unsigned long ip; | ||
| 1142 | + int is_write = 0; | ||
| 1143 | + | ||
| 1144 | + ip = uc->uc_mcontext.sc_ip; | ||
| 1145 | + switch (host_signum) { | ||
| 1146 | + case SIGILL: | ||
| 1147 | + case SIGFPE: | ||
| 1148 | + case SIGSEGV: | ||
| 1149 | + case SIGBUS: | ||
| 1150 | + case SIGTRAP: | ||
| 1151 | + if (info->si_code && (info->si_flags & __ISR_VALID)) | ||
| 1152 | + /* ISR.W (write-access) is bit 33: */ | ||
| 1153 | + is_write = (info->si_isr >> 33) & 1; | ||
| 1154 | + break; | ||
| 1155 | + | ||
| 1156 | + default: | ||
| 1157 | + break; | ||
| 1158 | + } | ||
| 1159 | + return handle_cpu_signal(ip, (unsigned long)info->si_addr, | ||
| 1160 | + is_write, | ||
| 1161 | + &uc->uc_sigmask, puc); | ||
| 1162 | +} | ||
| 1163 | + | ||
| 1121 | #else | 1164 | #else |
| 1122 | 1165 | ||
| 1123 | #error host CPU specific signal handler needed | 1166 | #error host CPU specific signal handler needed |
disas.c
| @@ -143,7 +143,8 @@ void target_disas(FILE *out, target_ulong code, target_ulong size, int flags) | @@ -143,7 +143,8 @@ void target_disas(FILE *out, target_ulong code, target_ulong size, int flags) | ||
| 143 | #elif defined(TARGET_PPC) | 143 | #elif defined(TARGET_PPC) |
| 144 | print_insn = print_insn_ppc; | 144 | print_insn = print_insn_ppc; |
| 145 | #else | 145 | #else |
| 146 | - fprintf(out, "Asm output not supported on this arch\n"); | 146 | + fprintf(out, "0x" TARGET_FMT_lx |
| 147 | + ": Asm output not supported on this arch\n", code); | ||
| 147 | return; | 148 | return; |
| 148 | #endif | 149 | #endif |
| 149 | 150 | ||
| @@ -202,7 +203,8 @@ void disas(FILE *out, void *code, unsigned long size) | @@ -202,7 +203,8 @@ void disas(FILE *out, void *code, unsigned long size) | ||
| 202 | #elif defined(__arm__) | 203 | #elif defined(__arm__) |
| 203 | print_insn = print_insn_arm; | 204 | print_insn = print_insn_arm; |
| 204 | #else | 205 | #else |
| 205 | - fprintf(out, "Asm output not supported on this arch\n"); | 206 | + fprintf(out, "0x%lx: Asm output not supported on this arch\n", |
| 207 | + (long) code); | ||
| 206 | return; | 208 | return; |
| 207 | #endif | 209 | #endif |
| 208 | for (pc = (unsigned long)code; pc < (unsigned long)code + size; pc += count) { | 210 | for (pc = (unsigned long)code; pc < (unsigned long)code + size; pc += count) { |
| @@ -311,7 +313,8 @@ void monitor_disas(target_ulong pc, int nb_insn, int is_physical, int flags) | @@ -311,7 +313,8 @@ void monitor_disas(target_ulong pc, int nb_insn, int is_physical, int flags) | ||
| 311 | #elif defined(TARGET_PPC) | 313 | #elif defined(TARGET_PPC) |
| 312 | print_insn = print_insn_ppc; | 314 | print_insn = print_insn_ppc; |
| 313 | #else | 315 | #else |
| 314 | - term_printf("Asm output not supported on this arch\n"); | 316 | + term_printf("0x" TARGET_FMT_lx |
| 317 | + ": Asm output not supported on this arch\n", pc); | ||
| 315 | return; | 318 | return; |
| 316 | #endif | 319 | #endif |
| 317 | 320 |
dyngen-exec.h
| @@ -29,7 +29,7 @@ typedef unsigned char uint8_t; | @@ -29,7 +29,7 @@ typedef unsigned char uint8_t; | ||
| 29 | typedef unsigned short uint16_t; | 29 | typedef unsigned short uint16_t; |
| 30 | typedef unsigned int uint32_t; | 30 | typedef unsigned int uint32_t; |
| 31 | /* XXX may be done for all 64 bits targets ? */ | 31 | /* XXX may be done for all 64 bits targets ? */ |
| 32 | -#if defined (__x86_64__) | 32 | +#if defined (__x86_64__) || defined(__ia64) |
| 33 | typedef unsigned long uint64_t; | 33 | typedef unsigned long uint64_t; |
| 34 | #else | 34 | #else |
| 35 | typedef unsigned long long uint64_t; | 35 | typedef unsigned long long uint64_t; |
| @@ -38,7 +38,7 @@ typedef unsigned long long uint64_t; | @@ -38,7 +38,7 @@ typedef unsigned long long uint64_t; | ||
| 38 | typedef signed char int8_t; | 38 | typedef signed char int8_t; |
| 39 | typedef signed short int16_t; | 39 | typedef signed short int16_t; |
| 40 | typedef signed int int32_t; | 40 | typedef signed int int32_t; |
| 41 | -#if defined (__x86_64__) | 41 | +#if defined (__x86_64__) || defined(__ia64) |
| 42 | typedef signed long int64_t; | 42 | typedef signed long int64_t; |
| 43 | #else | 43 | #else |
| 44 | typedef signed long long int64_t; | 44 | typedef signed long long int64_t; |
| @@ -148,10 +148,10 @@ extern int printf(const char *, ...); | @@ -148,10 +148,10 @@ extern int printf(const char *, ...); | ||
| 148 | #define AREG4 "%d5" | 148 | #define AREG4 "%d5" |
| 149 | #endif | 149 | #endif |
| 150 | #ifdef __ia64__ | 150 | #ifdef __ia64__ |
| 151 | -#define AREG0 "r27" | ||
| 152 | -#define AREG1 "r24" | ||
| 153 | -#define AREG2 "r25" | ||
| 154 | -#define AREG3 "r26" | 151 | +#define AREG0 "r7" |
| 152 | +#define AREG1 "r4" | ||
| 153 | +#define AREG2 "r5" | ||
| 154 | +#define AREG3 "r6" | ||
| 155 | #endif | 155 | #endif |
| 156 | 156 | ||
| 157 | /* force GCC to generate only one epilog at the end of the function */ | 157 | /* force GCC to generate only one epilog at the end of the function */ |
| @@ -224,6 +224,8 @@ extern int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3; | @@ -224,6 +224,8 @@ extern int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3; | ||
| 224 | #endif | 224 | #endif |
| 225 | #ifdef __ia64__ | 225 | #ifdef __ia64__ |
| 226 | #define EXIT_TB() asm volatile ("br.ret.sptk.many b0;;") | 226 | #define EXIT_TB() asm volatile ("br.ret.sptk.many b0;;") |
| 227 | +#define GOTO_LABEL_PARAM(n) asm volatile ("br.sptk.many " \ | ||
| 228 | + ASM_NAME(__op_gen_label) #n) | ||
| 227 | #endif | 229 | #endif |
| 228 | #ifdef __sparc__ | 230 | #ifdef __sparc__ |
| 229 | #define EXIT_TB() asm volatile ("jmpl %i0 + 8, %g0\n" \ | 231 | #define EXIT_TB() asm volatile ("jmpl %i0 + 8, %g0\n" \ |
dyngen.c
| @@ -1203,6 +1203,48 @@ void get_reloc_expr(char *name, int name_size, const char *sym_name) | @@ -1203,6 +1203,48 @@ void get_reloc_expr(char *name, int name_size, const char *sym_name) | ||
| 1203 | } | 1203 | } |
| 1204 | } | 1204 | } |
| 1205 | 1205 | ||
| 1206 | +#ifdef HOST_IA64 | ||
| 1207 | + | ||
| 1208 | +#define PLT_ENTRY_SIZE 16 /* 1 bundle containing "brl" */ | ||
| 1209 | + | ||
| 1210 | +struct plt_entry { | ||
| 1211 | + struct plt_entry *next; | ||
| 1212 | + const char *name; | ||
| 1213 | + unsigned long addend; | ||
| 1214 | +} *plt_list; | ||
| 1215 | + | ||
| 1216 | +static int | ||
| 1217 | +get_plt_index (const char *name, unsigned long addend) | ||
| 1218 | +{ | ||
| 1219 | + struct plt_entry *plt, *prev= NULL; | ||
| 1220 | + int index = 0; | ||
| 1221 | + | ||
| 1222 | + /* see if we already have an entry for this target: */ | ||
| 1223 | + for (plt = plt_list; plt; ++index, prev = plt, plt = plt->next) | ||
| 1224 | + if (strcmp(plt->name, name) == 0 && plt->addend == addend) | ||
| 1225 | + return index; | ||
| 1226 | + | ||
| 1227 | + /* nope; create a new PLT entry: */ | ||
| 1228 | + | ||
| 1229 | + plt = malloc(sizeof(*plt)); | ||
| 1230 | + if (!plt) { | ||
| 1231 | + perror("malloc"); | ||
| 1232 | + exit(1); | ||
| 1233 | + } | ||
| 1234 | + memset(plt, 0, sizeof(*plt)); | ||
| 1235 | + plt->name = strdup(name); | ||
| 1236 | + plt->addend = addend; | ||
| 1237 | + | ||
| 1238 | + /* append to plt-list: */ | ||
| 1239 | + if (prev) | ||
| 1240 | + prev->next = plt; | ||
| 1241 | + else | ||
| 1242 | + plt_list = plt; | ||
| 1243 | + return index; | ||
| 1244 | +} | ||
| 1245 | + | ||
| 1246 | +#endif | ||
| 1247 | + | ||
| 1206 | #ifdef HOST_ARM | 1248 | #ifdef HOST_ARM |
| 1207 | 1249 | ||
| 1208 | int arm_emit_ldr_info(const char *name, unsigned long start_offset, | 1250 | int arm_emit_ldr_info(const char *name, unsigned long start_offset, |
| @@ -1392,7 +1434,7 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, | @@ -1392,7 +1434,7 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, | ||
| 1392 | /* 08 00 84 00 */ | 1434 | /* 08 00 84 00 */ |
| 1393 | if (get32((uint32_t *)p) != 0x00840008) | 1435 | if (get32((uint32_t *)p) != 0x00840008) |
| 1394 | error("br.ret.sptk.many b0;; expected at the end of %s", name); | 1436 | error("br.ret.sptk.many b0;; expected at the end of %s", name); |
| 1395 | - copy_size = p - p_start; | 1437 | + copy_size = p_end - p_start; |
| 1396 | } | 1438 | } |
| 1397 | #elif defined(HOST_SPARC) | 1439 | #elif defined(HOST_SPARC) |
| 1398 | { | 1440 | { |
| @@ -1529,7 +1571,11 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, | @@ -1529,7 +1571,11 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, | ||
| 1529 | } | 1571 | } |
| 1530 | fprintf(outfile, ";\n"); | 1572 | fprintf(outfile, ";\n"); |
| 1531 | } | 1573 | } |
| 1574 | +#if defined(HOST_IA64) | ||
| 1575 | + fprintf(outfile, " extern char %s;\n", name); | ||
| 1576 | +#else | ||
| 1532 | fprintf(outfile, " extern void %s();\n", name); | 1577 | fprintf(outfile, " extern void %s();\n", name); |
| 1578 | +#endif | ||
| 1533 | 1579 | ||
| 1534 | for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { | 1580 | for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { |
| 1535 | host_ulong offset = get_rel_offset(rel); | 1581 | host_ulong offset = get_rel_offset(rel); |
| @@ -1550,9 +1596,18 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, | @@ -1550,9 +1596,18 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, | ||
| 1550 | continue; | 1596 | continue; |
| 1551 | } | 1597 | } |
| 1552 | #endif | 1598 | #endif |
| 1553 | -#ifdef __APPLE__ | 1599 | +#if defined(__APPLE__) |
| 1554 | /* set __attribute((unused)) on darwin because we wan't to avoid warning when we don't use the symbol */ | 1600 | /* set __attribute((unused)) on darwin because we wan't to avoid warning when we don't use the symbol */ |
| 1555 | fprintf(outfile, "extern char %s __attribute__((unused));\n", sym_name); | 1601 | fprintf(outfile, "extern char %s __attribute__((unused));\n", sym_name); |
| 1602 | +#elif defined(HOST_IA64) | ||
| 1603 | + if (ELF64_R_TYPE(rel->r_info) != R_IA64_PCREL21B) | ||
| 1604 | + /* | ||
| 1605 | + * PCREL21 br.call targets generally | ||
| 1606 | + * are out of range and need to go | ||
| 1607 | + * through an "import stub". | ||
| 1608 | + */ | ||
| 1609 | + fprintf(outfile, " extern char %s;\n", | ||
| 1610 | + sym_name); | ||
| 1556 | #else | 1611 | #else |
| 1557 | fprintf(outfile, "extern char %s;\n", sym_name); | 1612 | fprintf(outfile, "extern char %s;\n", sym_name); |
| 1558 | #endif | 1613 | #endif |
| @@ -1964,25 +2019,78 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, | @@ -1964,25 +2019,78 @@ void gen_code(const char *name, host_ulong offset, host_ulong size, | ||
| 1964 | } | 2019 | } |
| 1965 | #elif defined(HOST_IA64) | 2020 | #elif defined(HOST_IA64) |
| 1966 | { | 2021 | { |
| 2022 | + unsigned long sym_idx; | ||
| 2023 | + long code_offset; | ||
| 1967 | char name[256]; | 2024 | char name[256]; |
| 1968 | int type; | 2025 | int type; |
| 1969 | - int addend; | 2026 | + long addend; |
| 2027 | + | ||
| 1970 | for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { | 2028 | for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { |
| 1971 | - if (rel->r_offset >= start_offset && rel->r_offset < start_offset + copy_size) { | ||
| 1972 | - sym_name = strtab + symtab[ELF64_R_SYM(rel->r_info)].st_name; | ||
| 1973 | - get_reloc_expr(name, sizeof(name), sym_name); | ||
| 1974 | - type = ELF64_R_TYPE(rel->r_info); | ||
| 1975 | - addend = rel->r_addend; | ||
| 1976 | - switch(type) { | ||
| 1977 | - case R_IA64_LTOFF22: | ||
| 1978 | - error("must implemnt R_IA64_LTOFF22 relocation"); | ||
| 1979 | - case R_IA64_PCREL21B: | ||
| 1980 | - error("must implemnt R_IA64_PCREL21B relocation"); | ||
| 1981 | - default: | ||
| 1982 | - error("unsupported ia64 relocation (%d)", type); | ||
| 1983 | - } | ||
| 1984 | - } | 2029 | + sym_idx = ELF64_R_SYM(rel->r_info); |
| 2030 | + if (rel->r_offset < start_offset | ||
| 2031 | + || rel->r_offset >= start_offset + copy_size) | ||
| 2032 | + continue; | ||
| 2033 | + sym_name = (strtab + symtab[sym_idx].st_name); | ||
| 2034 | + if (strstart(sym_name, "__op_jmp", &p)) { | ||
| 2035 | + int n; | ||
| 2036 | + n = strtol(p, NULL, 10); | ||
| 2037 | + /* __op_jmp relocations are done at | ||
| 2038 | + runtime to do translated block | ||
| 2039 | + chaining: the offset of the instruction | ||
| 2040 | + needs to be stored */ | ||
| 2041 | + fprintf(outfile, " jmp_offsets[%d] =" | ||
| 2042 | + "%ld + (gen_code_ptr - gen_code_buf);\n", | ||
| 2043 | + n, rel->r_offset - start_offset); | ||
| 2044 | + continue; | ||
| 2045 | + } | ||
| 2046 | + get_reloc_expr(name, sizeof(name), sym_name); | ||
| 2047 | + type = ELF64_R_TYPE(rel->r_info); | ||
| 2048 | + addend = rel->r_addend; | ||
| 2049 | + code_offset = rel->r_offset - start_offset; | ||
| 2050 | + switch(type) { | ||
| 2051 | + case R_IA64_IMM64: | ||
| 2052 | + fprintf(outfile, | ||
| 2053 | + " ia64_imm64(gen_code_ptr + %ld, " | ||
| 2054 | + "%s + %ld);\n", | ||
| 2055 | + code_offset, name, addend); | ||
| 2056 | + break; | ||
| 2057 | + case R_IA64_LTOFF22X: | ||
| 2058 | + case R_IA64_LTOFF22: | ||
| 2059 | + fprintf(outfile, " IA64_LTOFF(gen_code_ptr + %ld," | ||
| 2060 | + " %s + %ld, %d);\n", | ||
| 2061 | + code_offset, name, addend, | ||
| 2062 | + (type == R_IA64_LTOFF22X)); | ||
| 2063 | + break; | ||
| 2064 | + case R_IA64_LDXMOV: | ||
| 2065 | + fprintf(outfile, | ||
| 2066 | + " ia64_ldxmov(gen_code_ptr + %ld," | ||
| 2067 | + " %s + %ld);\n", code_offset, name, addend); | ||
| 2068 | + break; | ||
| 2069 | + | ||
| 2070 | + case R_IA64_PCREL21B: | ||
| 2071 | + if (strstart(sym_name, "__op_gen_label", NULL)) { | ||
| 2072 | + fprintf(outfile, | ||
| 2073 | + " ia64_imm21b(gen_code_ptr + %ld," | ||
| 2074 | + " (long) (%s + %ld -\n\t\t" | ||
| 2075 | + "((long) gen_code_ptr + %ld)) >> 4);\n", | ||
| 2076 | + code_offset, name, addend, | ||
| 2077 | + code_offset & ~0xfUL); | ||
| 2078 | + } else { | ||
| 2079 | + fprintf(outfile, | ||
| 2080 | + " IA64_PLT(gen_code_ptr + %ld, " | ||
| 2081 | + "%d);\t/* %s + %ld */\n", | ||
| 2082 | + code_offset, | ||
| 2083 | + get_plt_index(sym_name, addend), | ||
| 2084 | + sym_name, addend); | ||
| 2085 | + } | ||
| 2086 | + break; | ||
| 2087 | + default: | ||
| 2088 | + error("unsupported ia64 relocation (0x%x)", | ||
| 2089 | + type); | ||
| 2090 | + } | ||
| 1985 | } | 2091 | } |
| 2092 | + fprintf(outfile, " ia64_nop_b(gen_code_ptr + %d);\n", | ||
| 2093 | + copy_size - 16 + 2); | ||
| 1986 | } | 2094 | } |
| 1987 | #elif defined(HOST_SPARC) | 2095 | #elif defined(HOST_SPARC) |
| 1988 | { | 2096 | { |
| @@ -2236,6 +2344,63 @@ fprintf(outfile, | @@ -2236,6 +2344,63 @@ fprintf(outfile, | ||
| 2236 | " LDREntry *arm_ldr_ptr = arm_ldr_table;\n" | 2344 | " LDREntry *arm_ldr_ptr = arm_ldr_table;\n" |
| 2237 | " uint32_t *arm_data_ptr = arm_data_table;\n"); | 2345 | " uint32_t *arm_data_ptr = arm_data_table;\n"); |
| 2238 | #endif | 2346 | #endif |
| 2347 | +#ifdef HOST_IA64 | ||
| 2348 | + { | ||
| 2349 | + long addend, not_first = 0; | ||
| 2350 | + unsigned long sym_idx; | ||
| 2351 | + int index, max_index; | ||
| 2352 | + const char *sym_name; | ||
| 2353 | + EXE_RELOC *rel; | ||
| 2354 | + | ||
| 2355 | + max_index = -1; | ||
| 2356 | + for (i = 0, rel = relocs;i < nb_relocs; i++, rel++) { | ||
| 2357 | + sym_idx = ELF64_R_SYM(rel->r_info); | ||
| 2358 | + sym_name = (strtab + symtab[sym_idx].st_name); | ||
| 2359 | + if (strstart(sym_name, "__op_gen_label", NULL)) | ||
| 2360 | + continue; | ||
| 2361 | + if (ELF64_R_TYPE(rel->r_info) != R_IA64_PCREL21B) | ||
| 2362 | + continue; | ||
| 2363 | + | ||
| 2364 | + addend = rel->r_addend; | ||
| 2365 | + index = get_plt_index(sym_name, addend); | ||
| 2366 | + if (index <= max_index) | ||
| 2367 | + continue; | ||
| 2368 | + max_index = index; | ||
| 2369 | + fprintf(outfile, " extern void %s(void);\n", sym_name); | ||
| 2370 | + } | ||
| 2371 | + | ||
| 2372 | + fprintf(outfile, | ||
| 2373 | + " struct ia64_fixup *plt_fixes = NULL, " | ||
| 2374 | + "*ltoff_fixes = NULL;\n" | ||
| 2375 | + " static long plt_target[] = {\n\t"); | ||
| 2376 | + | ||
| 2377 | + max_index = -1; | ||
| 2378 | + for (i = 0, rel = relocs;i < nb_relocs; i++, rel++) { | ||
| 2379 | + sym_idx = ELF64_R_SYM(rel->r_info); | ||
| 2380 | + sym_name = (strtab + symtab[sym_idx].st_name); | ||
| 2381 | + if (strstart(sym_name, "__op_gen_label", NULL)) | ||
| 2382 | + continue; | ||
| 2383 | + if (ELF64_R_TYPE(rel->r_info) != R_IA64_PCREL21B) | ||
| 2384 | + continue; | ||
| 2385 | + | ||
| 2386 | + addend = rel->r_addend; | ||
| 2387 | + index = get_plt_index(sym_name, addend); | ||
| 2388 | + if (index <= max_index) | ||
| 2389 | + continue; | ||
| 2390 | + max_index = index; | ||
| 2391 | + | ||
| 2392 | + if (not_first) | ||
| 2393 | + fprintf(outfile, ",\n\t"); | ||
| 2394 | + not_first = 1; | ||
| 2395 | + if (addend) | ||
| 2396 | + fprintf(outfile, "(long) &%s + %ld", sym_name, addend); | ||
| 2397 | + else | ||
| 2398 | + fprintf(outfile, "(long) &%s", sym_name); | ||
| 2399 | + } | ||
| 2400 | + fprintf(outfile, "\n };\n" | ||
| 2401 | + " unsigned int plt_offset[%u] = { 0 };\n", max_index + 1); | ||
| 2402 | + } | ||
| 2403 | +#endif | ||
| 2239 | 2404 | ||
| 2240 | fprintf(outfile, | 2405 | fprintf(outfile, |
| 2241 | "\n" | 2406 | "\n" |
| @@ -2298,6 +2463,13 @@ fprintf(outfile, | @@ -2298,6 +2463,13 @@ fprintf(outfile, | ||
| 2298 | " }\n" | 2463 | " }\n" |
| 2299 | " the_end:\n" | 2464 | " the_end:\n" |
| 2300 | ); | 2465 | ); |
| 2466 | +#ifdef HOST_IA64 | ||
| 2467 | + fprintf(outfile, | ||
| 2468 | + " ia64_apply_fixes(&gen_code_ptr, ltoff_fixes, " | ||
| 2469 | + "(uint64_t) code_gen_buffer + 2*(1<<20), plt_fixes,\n\t\t\t" | ||
| 2470 | + "sizeof(plt_target)/sizeof(plt_target[0]),\n\t\t\t" | ||
| 2471 | + "plt_target, plt_offset);\n"); | ||
| 2472 | +#endif | ||
| 2301 | 2473 | ||
| 2302 | /* generate some code patching */ | 2474 | /* generate some code patching */ |
| 2303 | #ifdef HOST_ARM | 2475 | #ifdef HOST_ARM |
dyngen.h
| @@ -43,6 +43,11 @@ static inline void flush_icache_range(unsigned long start, unsigned long stop) | @@ -43,6 +43,11 @@ static inline void flush_icache_range(unsigned long start, unsigned long stop) | ||
| 43 | #ifdef __ia64__ | 43 | #ifdef __ia64__ |
| 44 | static inline void flush_icache_range(unsigned long start, unsigned long stop) | 44 | static inline void flush_icache_range(unsigned long start, unsigned long stop) |
| 45 | { | 45 | { |
| 46 | + while (start < stop) { | ||
| 47 | + asm volatile ("fc %0" :: "r"(start)); | ||
| 48 | + start += 32; | ||
| 49 | + } | ||
| 50 | + asm volatile (";;sync.i;;srlz.i;;"); | ||
| 46 | } | 51 | } |
| 47 | #endif | 52 | #endif |
| 48 | 53 | ||
| @@ -204,3 +209,218 @@ static uint8_t *arm_flush_ldr(uint8_t *gen_code_ptr, | @@ -204,3 +209,218 @@ static uint8_t *arm_flush_ldr(uint8_t *gen_code_ptr, | ||
| 204 | } | 209 | } |
| 205 | 210 | ||
| 206 | #endif /* __arm__ */ | 211 | #endif /* __arm__ */ |
| 212 | + | ||
| 213 | +#ifdef __ia64 | ||
| 214 | + | ||
| 215 | + | ||
| 216 | +/* Patch instruction with "val" where "mask" has 1 bits. */ | ||
| 217 | +static inline void ia64_patch (uint64_t insn_addr, uint64_t mask, uint64_t val) | ||
| 218 | +{ | ||
| 219 | + uint64_t m0, m1, v0, v1, b0, b1, *b = (uint64_t *) (insn_addr & -16); | ||
| 220 | +# define insn_mask ((1UL << 41) - 1) | ||
| 221 | + unsigned long shift; | ||
| 222 | + | ||
| 223 | + b0 = b[0]; b1 = b[1]; | ||
| 224 | + shift = 5 + 41 * (insn_addr % 16); /* 5 template, 3 x 41-bit insns */ | ||
| 225 | + if (shift >= 64) { | ||
| 226 | + m1 = mask << (shift - 64); | ||
| 227 | + v1 = val << (shift - 64); | ||
| 228 | + } else { | ||
| 229 | + m0 = mask << shift; m1 = mask >> (64 - shift); | ||
| 230 | + v0 = val << shift; v1 = val >> (64 - shift); | ||
| 231 | + b[0] = (b0 & ~m0) | (v0 & m0); | ||
| 232 | + } | ||
| 233 | + b[1] = (b1 & ~m1) | (v1 & m1); | ||
| 234 | +} | ||
| 235 | + | ||
| 236 | +static inline void ia64_patch_imm60 (uint64_t insn_addr, uint64_t val) | ||
| 237 | +{ | ||
| 238 | + ia64_patch(insn_addr, | ||
| 239 | + 0x011ffffe000UL, | ||
| 240 | + ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */ | ||
| 241 | + | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */)); | ||
| 242 | + ia64_patch(insn_addr - 1, 0x1fffffffffcUL, val >> 18); | ||
| 243 | +} | ||
| 244 | + | ||
| 245 | +static inline void ia64_imm64 (void *insn, uint64_t val) | ||
| 246 | +{ | ||
| 247 | + /* Ignore the slot number of the relocation; GCC and Intel | ||
| 248 | + toolchains differed for some time on whether IMM64 relocs are | ||
| 249 | + against slot 1 (Intel) or slot 2 (GCC). */ | ||
| 250 | + uint64_t insn_addr = (uint64_t) insn & ~3UL; | ||
| 251 | + | ||
| 252 | + ia64_patch(insn_addr + 2, | ||
| 253 | + 0x01fffefe000UL, | ||
| 254 | + ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */ | ||
| 255 | + | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */ | ||
| 256 | + | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */ | ||
| 257 | + | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */ | ||
| 258 | + | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */) | ||
| 259 | + ); | ||
| 260 | + ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22); | ||
| 261 | +} | ||
| 262 | + | ||
| 263 | +static inline void ia64_imm60b (void *insn, uint64_t val) | ||
| 264 | +{ | ||
| 265 | + /* Ignore the slot number of the relocation; GCC and Intel | ||
| 266 | + toolchains differed for some time on whether IMM64 relocs are | ||
| 267 | + against slot 1 (Intel) or slot 2 (GCC). */ | ||
| 268 | + uint64_t insn_addr = (uint64_t) insn & ~3UL; | ||
| 269 | + | ||
| 270 | + if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) | ||
| 271 | + fprintf(stderr, "%s: value %ld out of IMM60 range\n", | ||
| 272 | + __FUNCTION__, (int64_t) val); | ||
| 273 | + ia64_patch_imm60(insn_addr + 2, val); | ||
| 274 | +} | ||
| 275 | + | ||
| 276 | +static inline void ia64_imm22 (void *insn, uint64_t val) | ||
| 277 | +{ | ||
| 278 | + if (val + (1 << 21) >= (1 << 22)) | ||
| 279 | + fprintf(stderr, "%s: value %li out of IMM22 range\n", | ||
| 280 | + __FUNCTION__, (int64_t)val); | ||
| 281 | + ia64_patch((uint64_t) insn, 0x01fffcfe000UL, | ||
| 282 | + ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ | ||
| 283 | + | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */ | ||
| 284 | + | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */ | ||
| 285 | + | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */)); | ||
| 286 | +} | ||
| 287 | + | ||
| 288 | +/* Like ia64_imm22(), but also clear bits 20-21. For addl, this has | ||
| 289 | + the effect of turning "addl rX=imm22,rY" into "addl | ||
| 290 | + rX=imm22,r0". */ | ||
| 291 | +static inline void ia64_imm22_r0 (void *insn, uint64_t val) | ||
| 292 | +{ | ||
| 293 | + if (val + (1 << 21) >= (1 << 22)) | ||
| 294 | + fprintf(stderr, "%s: value %li out of IMM22 range\n", | ||
| 295 | + __FUNCTION__, (int64_t)val); | ||
| 296 | + ia64_patch((uint64_t) insn, 0x01fffcfe000UL | (0x3UL << 20), | ||
| 297 | + ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ | ||
| 298 | + | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */ | ||
| 299 | + | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */ | ||
| 300 | + | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */)); | ||
| 301 | +} | ||
| 302 | + | ||
| 303 | +static inline void ia64_imm21b (void *insn, uint64_t val) | ||
| 304 | +{ | ||
| 305 | + if (val + (1 << 20) >= (1 << 21)) | ||
| 306 | + fprintf(stderr, "%s: value %li out of IMM21b range\n", | ||
| 307 | + __FUNCTION__, (int64_t)val); | ||
| 308 | + ia64_patch((uint64_t) insn, 0x11ffffe000UL, | ||
| 309 | + ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */ | ||
| 310 | + | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */)); | ||
| 311 | +} | ||
| 312 | + | ||
| 313 | +static inline void ia64_nop_b (void *insn) | ||
| 314 | +{ | ||
| 315 | + ia64_patch((uint64_t) insn, (1UL << 41) - 1, 2UL << 37); | ||
| 316 | +} | ||
| 317 | + | ||
| 318 | +static inline void ia64_ldxmov(void *insn, uint64_t val) | ||
| 319 | +{ | ||
| 320 | + if (val + (1 << 21) < (1 << 22)) | ||
| 321 | + ia64_patch((uint64_t) insn, 0x1fff80fe000UL, 8UL << 37); | ||
| 322 | +} | ||
| 323 | + | ||
| 324 | +static inline int ia64_patch_ltoff(void *insn, uint64_t val, | ||
| 325 | + int relaxable) | ||
| 326 | +{ | ||
| 327 | + if (relaxable && (val + (1 << 21) < (1 << 22))) { | ||
| 328 | + ia64_imm22_r0(insn, val); | ||
| 329 | + return 0; | ||
| 330 | + } | ||
| 331 | + return 1; | ||
| 332 | +} | ||
| 333 | + | ||
| 334 | +struct ia64_fixup { | ||
| 335 | + struct ia64_fixup *next; | ||
| 336 | + void *addr; /* address that needs to be patched */ | ||
| 337 | + long value; | ||
| 338 | +}; | ||
| 339 | + | ||
| 340 | +#define IA64_PLT(insn, plt_index) \ | ||
| 341 | +do { \ | ||
| 342 | + struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \ | ||
| 343 | + fixup->next = plt_fixes; \ | ||
| 344 | + plt_fixes = fixup; \ | ||
| 345 | + fixup->addr = (insn); \ | ||
| 346 | + fixup->value = (plt_index); \ | ||
| 347 | + plt_offset[(plt_index)] = 1; \ | ||
| 348 | +} while (0) | ||
| 349 | + | ||
| 350 | +#define IA64_LTOFF(insn, val, relaxable) \ | ||
| 351 | +do { \ | ||
| 352 | + if (ia64_patch_ltoff(insn, val, relaxable)) { \ | ||
| 353 | + struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \ | ||
| 354 | + fixup->next = ltoff_fixes; \ | ||
| 355 | + ltoff_fixes = fixup; \ | ||
| 356 | + fixup->addr = (insn); \ | ||
| 357 | + fixup->value = (val); \ | ||
| 358 | + } \ | ||
| 359 | +} while (0) | ||
| 360 | + | ||
| 361 | +static inline void ia64_apply_fixes (uint8_t **gen_code_pp, | ||
| 362 | + struct ia64_fixup *ltoff_fixes, | ||
| 363 | + uint64_t gp, | ||
| 364 | + struct ia64_fixup *plt_fixes, | ||
| 365 | + int num_plts, | ||
| 366 | + unsigned long *plt_target, | ||
| 367 | + unsigned int *plt_offset) | ||
| 368 | +{ | ||
| 369 | + static const uint8_t plt_bundle[] = { | ||
| 370 | + 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; movl r1=GP */ | ||
| 371 | + 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x60, | ||
| 372 | + | ||
| 373 | + 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; brl IP */ | ||
| 374 | + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0 | ||
| 375 | + }; | ||
| 376 | + uint8_t *gen_code_ptr = *gen_code_pp, *plt_start, *got_start, *vp; | ||
| 377 | + struct ia64_fixup *fixup; | ||
| 378 | + unsigned int offset = 0; | ||
| 379 | + struct fdesc { | ||
| 380 | + long ip; | ||
| 381 | + long gp; | ||
| 382 | + } *fdesc; | ||
| 383 | + int i; | ||
| 384 | + | ||
| 385 | + if (plt_fixes) { | ||
| 386 | + plt_start = gen_code_ptr; | ||
| 387 | + | ||
| 388 | + for (i = 0; i < num_plts; ++i) { | ||
| 389 | + if (plt_offset[i]) { | ||
| 390 | + plt_offset[i] = offset; | ||
| 391 | + offset += sizeof(plt_bundle); | ||
| 392 | + | ||
| 393 | + fdesc = (struct fdesc *) plt_target[i]; | ||
| 394 | + memcpy(gen_code_ptr, plt_bundle, sizeof(plt_bundle)); | ||
| 395 | + ia64_imm64 (gen_code_ptr + 0x02, fdesc->gp); | ||
| 396 | + ia64_imm60b(gen_code_ptr + 0x12, | ||
| 397 | + (fdesc->ip - (long) (gen_code_ptr + 0x10)) >> 4); | ||
| 398 | + gen_code_ptr += sizeof(plt_bundle); | ||
| 399 | + } | ||
| 400 | + } | ||
| 401 | + | ||
| 402 | + for (fixup = plt_fixes; fixup; fixup = fixup->next) | ||
| 403 | + ia64_imm21b(fixup->addr, | ||
| 404 | + ((long) plt_start + plt_offset[fixup->value] | ||
| 405 | + - ((long) fixup->addr & ~0xf)) >> 4); | ||
| 406 | + } | ||
| 407 | + | ||
| 408 | + got_start = gen_code_ptr; | ||
| 409 | + | ||
| 410 | + /* First, create the GOT: */ | ||
| 411 | + for (fixup = ltoff_fixes; fixup; fixup = fixup->next) { | ||
| 412 | + /* first check if we already have this value in the GOT: */ | ||
| 413 | + for (vp = got_start; vp < gen_code_ptr; ++vp) | ||
| 414 | + if (*(uint64_t *) vp == fixup->value) | ||
| 415 | + break; | ||
| 416 | + if (vp == gen_code_ptr) { | ||
| 417 | + /* Nope, we need to put the value in the GOT: */ | ||
| 418 | + *(uint64_t *) vp = fixup->value; | ||
| 419 | + gen_code_ptr += 8; | ||
| 420 | + } | ||
| 421 | + ia64_imm22(fixup->addr, (long) vp - gp); | ||
| 422 | + } | ||
| 423 | + *gen_code_pp = gen_code_ptr; | ||
| 424 | +} | ||
| 425 | + | ||
| 426 | +#endif |
exec-all.h
| @@ -126,6 +126,8 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, | @@ -126,6 +126,8 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, | ||
| 126 | 126 | ||
| 127 | #if defined(__alpha__) | 127 | #if defined(__alpha__) |
| 128 | #define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024) | 128 | #define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024) |
| 129 | +#elif defined(__ia64) | ||
| 130 | +#define CODE_GEN_BUFFER_SIZE (4 * 1024 * 1024) /* range of addl */ | ||
| 129 | #elif defined(__powerpc__) | 131 | #elif defined(__powerpc__) |
| 130 | #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024) | 132 | #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024) |
| 131 | #else | 133 | #else |
| @@ -487,6 +489,15 @@ static inline int testandset (int *p) | @@ -487,6 +489,15 @@ static inline int testandset (int *p) | ||
| 487 | } | 489 | } |
| 488 | #endif | 490 | #endif |
| 489 | 491 | ||
| 492 | +#ifdef __ia64 | ||
| 493 | +#include <ia64intrin.h> | ||
| 494 | + | ||
| 495 | +static inline int testandset (int *p) | ||
| 496 | +{ | ||
| 497 | + return __sync_lock_test_and_set (p, 1); | ||
| 498 | +} | ||
| 499 | +#endif | ||
| 500 | + | ||
| 490 | typedef int spinlock_t; | 501 | typedef int spinlock_t; |
| 491 | 502 | ||
| 492 | #define SPIN_LOCK_UNLOCKED 0 | 503 | #define SPIN_LOCK_UNLOCKED 0 |
exec.c
| @@ -58,7 +58,7 @@ int nb_tbs; | @@ -58,7 +58,7 @@ int nb_tbs; | ||
| 58 | /* any access to the tbs or the page table must use this lock */ | 58 | /* any access to the tbs or the page table must use this lock */ |
| 59 | spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; | 59 | spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; |
| 60 | 60 | ||
| 61 | -uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]; | 61 | +uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32))); |
| 62 | uint8_t *code_gen_ptr; | 62 | uint8_t *code_gen_ptr; |
| 63 | 63 | ||
| 64 | int phys_ram_size; | 64 | int phys_ram_size; |
ia64.ld
0 โ 100644
| 1 | +/* Default linker script, for normal executables */ | ||
| 2 | +OUTPUT_FORMAT("elf64-ia64-little", "elf64-ia64-little", | ||
| 3 | + "elf64-ia64-little") | ||
| 4 | +OUTPUT_ARCH(ia64) | ||
| 5 | +ENTRY(_start) | ||
| 6 | +SEARCH_DIR("/usr/ia64-linux/lib"); SEARCH_DIR("/usr/local/lib"); SEARCH_DIR("/lib"); SEARCH_DIR("/usr/lib"); | ||
| 7 | +/* Do we need any of these for elf? | ||
| 8 | + __DYNAMIC = 0; */ | ||
| 9 | +SECTIONS | ||
| 10 | +{ | ||
| 11 | + /* Read-only sections, merged into text segment: */ | ||
| 12 | + PROVIDE (__executable_start = 0x60000000); . = 0x60000000 + SIZEOF_HEADERS; | ||
| 13 | + .interp : { *(.interp) } | ||
| 14 | + .hash : { *(.hash) } | ||
| 15 | + .dynsym : { *(.dynsym) } | ||
| 16 | + .dynstr : { *(.dynstr) } | ||
| 17 | + .gnu.version : { *(.gnu.version) } | ||
| 18 | + .gnu.version_d : { *(.gnu.version_d) } | ||
| 19 | + .gnu.version_r : { *(.gnu.version_r) } | ||
| 20 | + .rel.init : { *(.rel.init) } | ||
| 21 | + .rela.init : { *(.rela.init) } | ||
| 22 | + .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } | ||
| 23 | + .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } | ||
| 24 | + .rel.fini : { *(.rel.fini) } | ||
| 25 | + .rela.fini : { *(.rela.fini) } | ||
| 26 | + .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } | ||
| 27 | + .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } | ||
| 28 | + .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } | ||
| 29 | + .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } | ||
| 30 | + .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } | ||
| 31 | + .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } | ||
| 32 | + .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } | ||
| 33 | + .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } | ||
| 34 | + .rel.ctors : { *(.rel.ctors) } | ||
| 35 | + .rela.ctors : { *(.rela.ctors) } | ||
| 36 | + .rel.dtors : { *(.rel.dtors) } | ||
| 37 | + .rela.dtors : { *(.rela.dtors) } | ||
| 38 | + .rel.got : { *(.rel.got) } | ||
| 39 | + .rela.got : { *(.rela.got) } | ||
| 40 | + .rel.sdata : { *(.rel.sdata .rel.sdata.* .rel.gnu.linkonce.s.*) } | ||
| 41 | + .rela.sdata : { *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*) } | ||
| 42 | + .rel.sbss : { *(.rel.sbss .rel.sbss.* .rel.gnu.linkonce.sb.*) } | ||
| 43 | + .rela.sbss : { *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*) } | ||
| 44 | + .rel.sdata2 : { *(.rel.sdata2 .rel.sdata2.* .rel.gnu.linkonce.s2.*) } | ||
| 45 | + .rela.sdata2 : { *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*) } | ||
| 46 | + .rel.sbss2 : { *(.rel.sbss2 .rel.sbss2.* .rel.gnu.linkonce.sb2.*) } | ||
| 47 | + .rela.sbss2 : { *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*) } | ||
| 48 | + .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } | ||
| 49 | + .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } | ||
| 50 | + .rel.plt : { *(.rel.plt) } | ||
| 51 | + .rela.plt : { *(.rela.plt) } | ||
| 52 | + .rela.IA_64.pltoff : { *(.rela.IA_64.pltoff) } | ||
| 53 | + .init : | ||
| 54 | + { | ||
| 55 | + KEEP (*(.init)) | ||
| 56 | + } =0x00300000010070000002000001000400 | ||
| 57 | + .plt : { *(.plt) } | ||
| 58 | + .text : | ||
| 59 | + { | ||
| 60 | + *(.text .stub .text.* .gnu.linkonce.t.*) | ||
| 61 | + /* .gnu.warning sections are handled specially by elf32.em. */ | ||
| 62 | + *(.gnu.warning) | ||
| 63 | + } =0x00300000010070000002000001000400 | ||
| 64 | + .fini : | ||
| 65 | + { | ||
| 66 | + KEEP (*(.fini)) | ||
| 67 | + } =0x00300000010070000002000001000400 | ||
| 68 | + PROVIDE (__etext = .); | ||
| 69 | + PROVIDE (_etext = .); | ||
| 70 | + PROVIDE (etext = .); | ||
| 71 | + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } | ||
| 72 | + .rodata1 : { *(.rodata1) } | ||
| 73 | + .sdata2 : { *(.sdata2 .sdata2.* .gnu.linkonce.s2.*) } | ||
| 74 | + .sbss2 : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) } | ||
| 75 | + .opd : { *(.opd) } | ||
| 76 | + .IA_64.unwind_info : { *(.IA_64.unwind_info* .gnu.linkonce.ia64unwi.*) } | ||
| 77 | + .IA_64.unwind : { *(.IA_64.unwind* .gnu.linkonce.ia64unw.*) } | ||
| 78 | + .eh_frame_hdr : { *(.eh_frame_hdr) } | ||
| 79 | + /* Adjust the address for the data segment. We want to adjust up to | ||
| 80 | + the same address within the page on the next page up. */ | ||
| 81 | + . = ALIGN(0x10000) + (. & (0x10000 - 1)); | ||
| 82 | + /* Ensure the __preinit_array_start label is properly aligned. We | ||
| 83 | + could instead move the label definition inside the section, but | ||
| 84 | + the linker would then create the section even if it turns out to | ||
| 85 | + be empty, which isn't pretty. */ | ||
| 86 | + . = ALIGN(64 / 8); | ||
| 87 | + PROVIDE (__preinit_array_start = .); | ||
| 88 | + .preinit_array : { *(.preinit_array) } | ||
| 89 | + PROVIDE (__preinit_array_end = .); | ||
| 90 | + PROVIDE (__init_array_start = .); | ||
| 91 | + .init_array : { *(.init_array) } | ||
| 92 | + PROVIDE (__init_array_end = .); | ||
| 93 | + PROVIDE (__fini_array_start = .); | ||
| 94 | + .fini_array : { *(.fini_array) } | ||
| 95 | + PROVIDE (__fini_array_end = .); | ||
| 96 | + .data : | ||
| 97 | + { | ||
| 98 | + *(.data .data.* .gnu.linkonce.d.*) | ||
| 99 | + SORT(CONSTRUCTORS) | ||
| 100 | + } | ||
| 101 | + .data1 : { *(.data1) } | ||
| 102 | + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } | ||
| 103 | + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } | ||
| 104 | + .eh_frame : { KEEP (*(.eh_frame)) } | ||
| 105 | + .gcc_except_table : { *(.gcc_except_table) } | ||
| 106 | + .dynamic : { *(.dynamic) } | ||
| 107 | + .ctors : | ||
| 108 | + { | ||
| 109 | + /* gcc uses crtbegin.o to find the start of | ||
| 110 | + the constructors, so we make sure it is | ||
| 111 | + first. Because this is a wildcard, it | ||
| 112 | + doesn't matter if the user does not | ||
| 113 | + actually link against crtbegin.o; the | ||
| 114 | + linker won't look for a file to match a | ||
| 115 | + wildcard. The wildcard also means that it | ||
| 116 | + doesn't matter which directory crtbegin.o | ||
| 117 | + is in. */ | ||
| 118 | + KEEP (*crtbegin*.o(.ctors)) | ||
| 119 | + /* We don't want to include the .ctor section from | ||
| 120 | + from the crtend.o file until after the sorted ctors. | ||
| 121 | + The .ctor section from the crtend file contains the | ||
| 122 | + end of ctors marker and it must be last */ | ||
| 123 | + KEEP (*(EXCLUDE_FILE (*crtend*.o ) .ctors)) | ||
| 124 | + KEEP (*(SORT(.ctors.*))) | ||
| 125 | + KEEP (*(.ctors)) | ||
| 126 | + } | ||
| 127 | + .dtors : | ||
| 128 | + { | ||
| 129 | + KEEP (*crtbegin*.o(.dtors)) | ||
| 130 | + KEEP (*(EXCLUDE_FILE (*crtend*.o ) .dtors)) | ||
| 131 | + KEEP (*(SORT(.dtors.*))) | ||
| 132 | + KEEP (*(.dtors)) | ||
| 133 | + } | ||
| 134 | + .jcr : { KEEP (*(.jcr)) } | ||
| 135 | + /* Ensure __gp is outside the range of any normal data. We need to | ||
| 136 | + do this to avoid the linker optimizing the code in op.o and getting | ||
| 137 | + it out of sync with the relocs that we read when processing that | ||
| 138 | + file. A better solution might be to ensure that the dynamically | ||
| 139 | + generated code and static qemu code share a single gp-value. */ | ||
| 140 | + __gp = . + 0x200000; | ||
| 141 | + .got : { *(.got.plt) *(.got) } | ||
| 142 | + .IA_64.pltoff : { *(.IA_64.pltoff) } | ||
| 143 | + /* We want the small data sections together, so single-instruction offsets | ||
| 144 | + can access them all, and initialized data all before uninitialized, so | ||
| 145 | + we can shorten the on-disk segment size. */ | ||
| 146 | + .sdata : | ||
| 147 | + { | ||
| 148 | + *(.sdata .sdata.* .gnu.linkonce.s.*) | ||
| 149 | + } | ||
| 150 | + _edata = .; | ||
| 151 | + PROVIDE (edata = .); | ||
| 152 | + __bss_start = .; | ||
| 153 | + .sbss : | ||
| 154 | + { | ||
| 155 | + PROVIDE (__sbss_start = .); | ||
| 156 | + PROVIDE (___sbss_start = .); | ||
| 157 | + *(.dynsbss) | ||
| 158 | + *(.sbss .sbss.* .gnu.linkonce.sb.*) | ||
| 159 | + *(.scommon) | ||
| 160 | + PROVIDE (__sbss_end = .); | ||
| 161 | + PROVIDE (___sbss_end = .); | ||
| 162 | + } | ||
| 163 | + .bss : | ||
| 164 | + { | ||
| 165 | + . += 0x400000; /* ensure .bss stuff is out of reach of gp */ | ||
| 166 | + *(.dynbss) | ||
| 167 | + *(.bss .bss.* .gnu.linkonce.b.*) | ||
| 168 | + *(COMMON) | ||
| 169 | + /* Align here to ensure that the .bss section occupies space up to | ||
| 170 | + _end. Align after .bss to ensure correct alignment even if the | ||
| 171 | + .bss section disappears because there are no input sections. */ | ||
| 172 | + . = ALIGN(64 / 8); | ||
| 173 | + } | ||
| 174 | + . = ALIGN(64 / 8); | ||
| 175 | + _end = .; | ||
| 176 | + PROVIDE (end = .); | ||
| 177 | + /* Stabs debugging sections. */ | ||
| 178 | + .stab 0 : { *(.stab) } | ||
| 179 | + .stabstr 0 : { *(.stabstr) } | ||
| 180 | + .stab.excl 0 : { *(.stab.excl) } | ||
| 181 | + .stab.exclstr 0 : { *(.stab.exclstr) } | ||
| 182 | + .stab.index 0 : { *(.stab.index) } | ||
| 183 | + .stab.indexstr 0 : { *(.stab.indexstr) } | ||
| 184 | + .comment 0 : { *(.comment) } | ||
| 185 | + /* DWARF debug sections. | ||
| 186 | + Symbols in the DWARF debugging sections are relative to the beginning | ||
| 187 | + of the section so we begin them at 0. */ | ||
| 188 | + /* DWARF 1 */ | ||
| 189 | + .debug 0 : { *(.debug) } | ||
| 190 | + .line 0 : { *(.line) } | ||
| 191 | + /* GNU DWARF 1 extensions */ | ||
| 192 | + .debug_srcinfo 0 : { *(.debug_srcinfo) } | ||
| 193 | + .debug_sfnames 0 : { *(.debug_sfnames) } | ||
| 194 | + /* DWARF 1.1 and DWARF 2 */ | ||
| 195 | + .debug_aranges 0 : { *(.debug_aranges) } | ||
| 196 | + .debug_pubnames 0 : { *(.debug_pubnames) } | ||
| 197 | + /* DWARF 2 */ | ||
| 198 | + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } | ||
| 199 | + .debug_abbrev 0 : { *(.debug_abbrev) } | ||
| 200 | + .debug_line 0 : { *(.debug_line) } | ||
| 201 | + .debug_frame 0 : { *(.debug_frame) } | ||
| 202 | + .debug_str 0 : { *(.debug_str) } | ||
| 203 | + .debug_loc 0 : { *(.debug_loc) } | ||
| 204 | + .debug_macinfo 0 : { *(.debug_macinfo) } | ||
| 205 | + /* SGI/MIPS DWARF 2 extensions */ | ||
| 206 | + .debug_weaknames 0 : { *(.debug_weaknames) } | ||
| 207 | + .debug_funcnames 0 : { *(.debug_funcnames) } | ||
| 208 | + .debug_typenames 0 : { *(.debug_typenames) } | ||
| 209 | + .debug_varnames 0 : { *(.debug_varnames) } | ||
| 210 | + /DISCARD/ : { *(.note.GNU-stack) } | ||
| 211 | +} |
linux-user/mmap.c
| @@ -152,7 +152,8 @@ long target_mmap(unsigned long start, unsigned long len, int prot, | @@ -152,7 +152,8 @@ long target_mmap(unsigned long start, unsigned long len, int prot, | ||
| 152 | int flags, int fd, unsigned long offset) | 152 | int flags, int fd, unsigned long offset) |
| 153 | { | 153 | { |
| 154 | unsigned long ret, end, host_start, host_end, retaddr, host_offset, host_len; | 154 | unsigned long ret, end, host_start, host_end, retaddr, host_offset, host_len; |
| 155 | -#if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__) | 155 | +#if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__) || \ |
| 156 | + defined(__ia64) | ||
| 156 | static unsigned long last_start = 0x40000000; | 157 | static unsigned long last_start = 0x40000000; |
| 157 | #endif | 158 | #endif |
| 158 | 159 | ||
| @@ -191,7 +192,8 @@ long target_mmap(unsigned long start, unsigned long len, int prot, | @@ -191,7 +192,8 @@ long target_mmap(unsigned long start, unsigned long len, int prot, | ||
| 191 | host_start = start & qemu_host_page_mask; | 192 | host_start = start & qemu_host_page_mask; |
| 192 | 193 | ||
| 193 | if (!(flags & MAP_FIXED)) { | 194 | if (!(flags & MAP_FIXED)) { |
| 194 | -#if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__) | 195 | +#if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__) || \ |
| 196 | + defined(__ia64) | ||
| 195 | /* tell the kenel to search at the same place as i386 */ | 197 | /* tell the kenel to search at the same place as i386 */ |
| 196 | if (host_start == 0) { | 198 | if (host_start == 0) { |
| 197 | host_start = last_start; | 199 | host_start = last_start; |
linux-user/signal.c
| @@ -26,13 +26,6 @@ | @@ -26,13 +26,6 @@ | ||
| 26 | #include <errno.h> | 26 | #include <errno.h> |
| 27 | #include <sys/ucontext.h> | 27 | #include <sys/ucontext.h> |
| 28 | 28 | ||
| 29 | -#ifdef __ia64__ | ||
| 30 | -#undef uc_mcontext | ||
| 31 | -#undef uc_sigmask | ||
| 32 | -#undef uc_stack | ||
| 33 | -#undef uc_link | ||
| 34 | -#endif | ||
| 35 | - | ||
| 36 | #include "qemu.h" | 29 | #include "qemu.h" |
| 37 | 30 | ||
| 38 | //#define DEBUG_SIGNAL | 31 | //#define DEBUG_SIGNAL |
| @@ -557,11 +550,11 @@ typedef struct target_sigaltstack { | @@ -557,11 +550,11 @@ typedef struct target_sigaltstack { | ||
| 557 | } target_stack_t; | 550 | } target_stack_t; |
| 558 | 551 | ||
| 559 | struct target_ucontext { | 552 | struct target_ucontext { |
| 560 | - target_ulong uc_flags; | ||
| 561 | - target_ulong uc_link; | ||
| 562 | - target_stack_t uc_stack; | ||
| 563 | - struct target_sigcontext uc_mcontext; | ||
| 564 | - target_sigset_t uc_sigmask; /* mask last for extensibility */ | 553 | + target_ulong tuc_flags; |
| 554 | + target_ulong tuc_link; | ||
| 555 | + target_stack_t tuc_stack; | ||
| 556 | + struct target_sigcontext tuc_mcontext; | ||
| 557 | + target_sigset_t tuc_sigmask; /* mask last for extensibility */ | ||
| 565 | }; | 558 | }; |
| 566 | 559 | ||
| 567 | struct sigframe | 560 | struct sigframe |
| @@ -743,16 +736,18 @@ static void setup_rt_frame(int sig, struct emulated_sigaction *ka, | @@ -743,16 +736,18 @@ static void setup_rt_frame(int sig, struct emulated_sigaction *ka, | ||
| 743 | goto give_sigsegv; | 736 | goto give_sigsegv; |
| 744 | 737 | ||
| 745 | /* Create the ucontext. */ | 738 | /* Create the ucontext. */ |
| 746 | - err |= __put_user(0, &frame->uc.uc_flags); | ||
| 747 | - err |= __put_user(0, &frame->uc.uc_link); | ||
| 748 | - err |= __put_user(/*current->sas_ss_sp*/ 0, &frame->uc.uc_stack.ss_sp); | 739 | + err |= __put_user(0, &frame->uc.tuc_flags); |
| 740 | + err |= __put_user(0, &frame->uc.tuc_link); | ||
| 741 | + err |= __put_user(/*current->sas_ss_sp*/ 0, | ||
| 742 | + &frame->uc.tuc_stack.ss_sp); | ||
| 749 | err |= __put_user(/* sas_ss_flags(regs->esp) */ 0, | 743 | err |= __put_user(/* sas_ss_flags(regs->esp) */ 0, |
| 750 | - &frame->uc.uc_stack.ss_flags); | ||
| 751 | - err |= __put_user(/* current->sas_ss_size */ 0, &frame->uc.uc_stack.ss_size); | ||
| 752 | - err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, | 744 | + &frame->uc.tuc_stack.ss_flags); |
| 745 | + err |= __put_user(/* current->sas_ss_size */ 0, | ||
| 746 | + &frame->uc.tuc_stack.ss_size); | ||
| 747 | + err |= setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, | ||
| 753 | env, set->sig[0]); | 748 | env, set->sig[0]); |
| 754 | for(i = 0; i < TARGET_NSIG_WORDS; i++) { | 749 | for(i = 0; i < TARGET_NSIG_WORDS; i++) { |
| 755 | - if (__put_user(set->sig[i], &frame->uc.uc_sigmask.sig[i])) | 750 | + if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i])) |
| 756 | goto give_sigsegv; | 751 | goto give_sigsegv; |
| 757 | } | 752 | } |
| 758 | 753 | ||
| @@ -880,14 +875,14 @@ long do_rt_sigreturn(CPUX86State *env) | @@ -880,14 +875,14 @@ long do_rt_sigreturn(CPUX86State *env) | ||
| 880 | if (verify_area(VERIFY_READ, frame, sizeof(*frame))) | 875 | if (verify_area(VERIFY_READ, frame, sizeof(*frame))) |
| 881 | goto badframe; | 876 | goto badframe; |
| 882 | #endif | 877 | #endif |
| 883 | - target_to_host_sigset(&set, &frame->uc.uc_sigmask); | 878 | + target_to_host_sigset(&set, &frame->uc.tuc_sigmask); |
| 884 | sigprocmask(SIG_SETMASK, &set, NULL); | 879 | sigprocmask(SIG_SETMASK, &set, NULL); |
| 885 | 880 | ||
| 886 | - if (restore_sigcontext(env, &frame->uc.uc_mcontext, &eax)) | 881 | + if (restore_sigcontext(env, &frame->uc.tuc_mcontext, &eax)) |
| 887 | goto badframe; | 882 | goto badframe; |
| 888 | 883 | ||
| 889 | #if 0 | 884 | #if 0 |
| 890 | - if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) | 885 | + if (__copy_from_user(&st, &frame->uc.tuc_stack, sizeof(st))) |
| 891 | goto badframe; | 886 | goto badframe; |
| 892 | /* It is more difficult to avoid calling this function than to | 887 | /* It is more difficult to avoid calling this function than to |
| 893 | call it and ignore errors. */ | 888 | call it and ignore errors. */ |
| @@ -933,11 +928,11 @@ typedef struct target_sigaltstack { | @@ -933,11 +928,11 @@ typedef struct target_sigaltstack { | ||
| 933 | } target_stack_t; | 928 | } target_stack_t; |
| 934 | 929 | ||
| 935 | struct target_ucontext { | 930 | struct target_ucontext { |
| 936 | - target_ulong uc_flags; | ||
| 937 | - target_ulong uc_link; | ||
| 938 | - target_stack_t uc_stack; | ||
| 939 | - struct target_sigcontext uc_mcontext; | ||
| 940 | - target_sigset_t uc_sigmask; /* mask last for extensibility */ | 931 | + target_ulong tuc_flags; |
| 932 | + target_ulong tuc_link; | ||
| 933 | + target_stack_t tuc_stack; | ||
| 934 | + struct target_sigcontext tuc_mcontext; | ||
| 935 | + target_sigset_t tuc_sigmask; /* mask last for extensibility */ | ||
| 941 | }; | 936 | }; |
| 942 | 937 | ||
| 943 | struct sigframe | 938 | struct sigframe |
| @@ -1135,10 +1130,10 @@ static void setup_rt_frame(int usig, struct emulated_sigaction *ka, | @@ -1135,10 +1130,10 @@ static void setup_rt_frame(int usig, struct emulated_sigaction *ka, | ||
| 1135 | /* Clear all the bits of the ucontext we don't use. */ | 1130 | /* Clear all the bits of the ucontext we don't use. */ |
| 1136 | err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); | 1131 | err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); |
| 1137 | 1132 | ||
| 1138 | - err |= setup_sigcontext(&frame->uc.uc_mcontext, /*&frame->fpstate,*/ | 1133 | + err |= setup_sigcontext(&frame->uc.tuc_mcontext, /*&frame->fpstate,*/ |
| 1139 | env, set->sig[0]); | 1134 | env, set->sig[0]); |
| 1140 | for(i = 0; i < TARGET_NSIG_WORDS; i++) { | 1135 | for(i = 0; i < TARGET_NSIG_WORDS; i++) { |
| 1141 | - if (__put_user(set->sig[i], &frame->uc.uc_sigmask.sig[i])) | 1136 | + if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i])) |
| 1142 | return; | 1137 | return; |
| 1143 | } | 1138 | } |
| 1144 | 1139 | ||
| @@ -1253,10 +1248,10 @@ long do_rt_sigreturn(CPUState *env) | @@ -1253,10 +1248,10 @@ long do_rt_sigreturn(CPUState *env) | ||
| 1253 | if (verify_area(VERIFY_READ, frame, sizeof (*frame))) | 1248 | if (verify_area(VERIFY_READ, frame, sizeof (*frame))) |
| 1254 | goto badframe; | 1249 | goto badframe; |
| 1255 | #endif | 1250 | #endif |
| 1256 | - target_to_host_sigset(&host_set, &frame->uc.uc_sigmask); | 1251 | + target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); |
| 1257 | sigprocmask(SIG_SETMASK, &host_set, NULL); | 1252 | sigprocmask(SIG_SETMASK, &host_set, NULL); |
| 1258 | 1253 | ||
| 1259 | - if (restore_sigcontext(env, &frame->uc.uc_mcontext)) | 1254 | + if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) |
| 1260 | goto badframe; | 1255 | goto badframe; |
| 1261 | 1256 | ||
| 1262 | #if 0 | 1257 | #if 0 |
qemu-img.c
| @@ -165,7 +165,7 @@ static void get_human_readable_size(char *buf, int buf_size, int64_t size) | @@ -165,7 +165,7 @@ static void get_human_readable_size(char *buf, int buf_size, int64_t size) | ||
| 165 | int i; | 165 | int i; |
| 166 | 166 | ||
| 167 | if (size <= 999) { | 167 | if (size <= 999) { |
| 168 | - snprintf(buf, buf_size, "%lld", size); | 168 | + snprintf(buf, buf_size, "%lld", (long long) size); |
| 169 | } else { | 169 | } else { |
| 170 | base = 1024; | 170 | base = 1024; |
| 171 | for(i = 0; i < NB_SUFFIXES; i++) { | 171 | for(i = 0; i < NB_SUFFIXES; i++) { |
| @@ -176,7 +176,7 @@ static void get_human_readable_size(char *buf, int buf_size, int64_t size) | @@ -176,7 +176,7 @@ static void get_human_readable_size(char *buf, int buf_size, int64_t size) | ||
| 176 | break; | 176 | break; |
| 177 | } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) { | 177 | } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) { |
| 178 | snprintf(buf, buf_size, "%lld%c", | 178 | snprintf(buf, buf_size, "%lld%c", |
| 179 | - (size + (base >> 1)) / base, | 179 | + (long long) ((size + (base >> 1)) / base), |
| 180 | suffixes[i]); | 180 | suffixes[i]); |
| 181 | break; | 181 | break; |
| 182 | } | 182 | } |
| @@ -369,7 +369,7 @@ static int img_create(int argc, char **argv) | @@ -369,7 +369,7 @@ static int img_create(int argc, char **argv) | ||
| 369 | printf(", backing_file=%s", | 369 | printf(", backing_file=%s", |
| 370 | base_filename); | 370 | base_filename); |
| 371 | } | 371 | } |
| 372 | - printf(", size=%lld kB\n", size / 1024); | 372 | + printf(", size=%lld kB\n", (long long) (size / 1024)); |
| 373 | ret = bdrv_create(drv, filename, size / 512, base_filename, encrypted); | 373 | ret = bdrv_create(drv, filename, size / 512, base_filename, encrypted); |
| 374 | if (ret < 0) { | 374 | if (ret < 0) { |
| 375 | if (ret == -ENOTSUP) { | 375 | if (ret == -ENOTSUP) { |
| @@ -666,7 +666,7 @@ static int img_info(int argc, char **argv) | @@ -666,7 +666,7 @@ static int img_info(int argc, char **argv) | ||
| 666 | "virtual size: %s (%lld bytes)\n" | 666 | "virtual size: %s (%lld bytes)\n" |
| 667 | "disk size: %s\n", | 667 | "disk size: %s\n", |
| 668 | filename, fmt_name, size_buf, | 668 | filename, fmt_name, size_buf, |
| 669 | - total_sectors * 512, | 669 | + (long long) (total_sectors * 512), |
| 670 | dsize_buf); | 670 | dsize_buf); |
| 671 | if (bdrv_is_encrypted(bs)) | 671 | if (bdrv_is_encrypted(bs)) |
| 672 | printf("encrypted: yes\n"); | 672 | printf("encrypted: yes\n"); |
vl.c
| @@ -519,6 +519,15 @@ int64_t cpu_get_real_ticks(void) | @@ -519,6 +519,15 @@ int64_t cpu_get_real_ticks(void) | ||
| 519 | return val; | 519 | return val; |
| 520 | } | 520 | } |
| 521 | 521 | ||
| 522 | +#elif defined(__ia64) | ||
| 523 | + | ||
| 524 | +int64_t cpu_get_real_ticks(void) | ||
| 525 | +{ | ||
| 526 | + int64_t val; | ||
| 527 | + asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); | ||
| 528 | + return val; | ||
| 529 | +} | ||
| 530 | + | ||
| 522 | #else | 531 | #else |
| 523 | #error unsupported CPU | 532 | #error unsupported CPU |
| 524 | #endif | 533 | #endif |