Commit c27004ec7888096c982bbc9b17016fcfe7903171

Authored by bellard
1 parent 612458f5

64 bit target support


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1189 c046a42c-6fe2-441c-8c8c-71466251a162
cpu-all.h
@@ -166,17 +166,17 @@ typedef union { @@ -166,17 +166,17 @@ typedef union {
166 * user : user mode access using soft MMU 166 * user : user mode access using soft MMU
167 * kernel : kernel mode access using soft MMU 167 * kernel : kernel mode access using soft MMU
168 */ 168 */
169 -static inline int ldub_raw(void *ptr) 169 +static inline int ldub_p(void *ptr)
170 { 170 {
171 return *(uint8_t *)ptr; 171 return *(uint8_t *)ptr;
172 } 172 }
173 173
174 -static inline int ldsb_raw(void *ptr) 174 +static inline int ldsb_p(void *ptr)
175 { 175 {
176 return *(int8_t *)ptr; 176 return *(int8_t *)ptr;
177 } 177 }
178 178
179 -static inline void stb_raw(void *ptr, int v) 179 +static inline void stb_p(void *ptr, int v)
180 { 180 {
181 *(uint8_t *)ptr = v; 181 *(uint8_t *)ptr = v;
182 } 182 }
@@ -187,7 +187,7 @@ static inline void stb_raw(void *ptr, int v) @@ -187,7 +187,7 @@ static inline void stb_raw(void *ptr, int v)
187 #if !defined(TARGET_WORDS_BIGENDIAN) && (defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)) 187 #if !defined(TARGET_WORDS_BIGENDIAN) && (defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED))
188 188
189 /* conservative code for little endian unaligned accesses */ 189 /* conservative code for little endian unaligned accesses */
190 -static inline int lduw_raw(void *ptr) 190 +static inline int lduw_p(void *ptr)
191 { 191 {
192 #ifdef __powerpc__ 192 #ifdef __powerpc__
193 int val; 193 int val;
@@ -199,7 +199,7 @@ static inline int lduw_raw(void *ptr) @@ -199,7 +199,7 @@ static inline int lduw_raw(void *ptr)
199 #endif 199 #endif
200 } 200 }
201 201
202 -static inline int ldsw_raw(void *ptr) 202 +static inline int ldsw_p(void *ptr)
203 { 203 {
204 #ifdef __powerpc__ 204 #ifdef __powerpc__
205 int val; 205 int val;
@@ -211,7 +211,7 @@ static inline int ldsw_raw(void *ptr) @@ -211,7 +211,7 @@ static inline int ldsw_raw(void *ptr)
211 #endif 211 #endif
212 } 212 }
213 213
214 -static inline int ldl_raw(void *ptr) 214 +static inline int ldl_p(void *ptr)
215 { 215 {
216 #ifdef __powerpc__ 216 #ifdef __powerpc__
217 int val; 217 int val;
@@ -223,16 +223,16 @@ static inline int ldl_raw(void *ptr) @@ -223,16 +223,16 @@ static inline int ldl_raw(void *ptr)
223 #endif 223 #endif
224 } 224 }
225 225
226 -static inline uint64_t ldq_raw(void *ptr) 226 +static inline uint64_t ldq_p(void *ptr)
227 { 227 {
228 uint8_t *p = ptr; 228 uint8_t *p = ptr;
229 uint32_t v1, v2; 229 uint32_t v1, v2;
230 - v1 = ldl_raw(p);  
231 - v2 = ldl_raw(p + 4); 230 + v1 = ldl_p(p);
  231 + v2 = ldl_p(p + 4);
232 return v1 | ((uint64_t)v2 << 32); 232 return v1 | ((uint64_t)v2 << 32);
233 } 233 }
234 234
235 -static inline void stw_raw(void *ptr, int v) 235 +static inline void stw_p(void *ptr, int v)
236 { 236 {
237 #ifdef __powerpc__ 237 #ifdef __powerpc__
238 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); 238 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
@@ -243,7 +243,7 @@ static inline void stw_raw(void *ptr, int v) @@ -243,7 +243,7 @@ static inline void stw_raw(void *ptr, int v)
243 #endif 243 #endif
244 } 244 }
245 245
246 -static inline void stl_raw(void *ptr, int v) 246 +static inline void stl_p(void *ptr, int v)
247 { 247 {
248 #ifdef __powerpc__ 248 #ifdef __powerpc__
249 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); 249 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
@@ -256,54 +256,54 @@ static inline void stl_raw(void *ptr, int v) @@ -256,54 +256,54 @@ static inline void stl_raw(void *ptr, int v)
256 #endif 256 #endif
257 } 257 }
258 258
259 -static inline void stq_raw(void *ptr, uint64_t v) 259 +static inline void stq_p(void *ptr, uint64_t v)
260 { 260 {
261 uint8_t *p = ptr; 261 uint8_t *p = ptr;
262 - stl_raw(p, (uint32_t)v);  
263 - stl_raw(p + 4, v >> 32); 262 + stl_p(p, (uint32_t)v);
  263 + stl_p(p + 4, v >> 32);
264 } 264 }
265 265
266 /* float access */ 266 /* float access */
267 267
268 -static inline float ldfl_raw(void *ptr) 268 +static inline float ldfl_p(void *ptr)
269 { 269 {
270 union { 270 union {
271 float f; 271 float f;
272 uint32_t i; 272 uint32_t i;
273 } u; 273 } u;
274 - u.i = ldl_raw(ptr); 274 + u.i = ldl_p(ptr);
275 return u.f; 275 return u.f;
276 } 276 }
277 277
278 -static inline void stfl_raw(void *ptr, float v) 278 +static inline void stfl_p(void *ptr, float v)
279 { 279 {
280 union { 280 union {
281 float f; 281 float f;
282 uint32_t i; 282 uint32_t i;
283 } u; 283 } u;
284 u.f = v; 284 u.f = v;
285 - stl_raw(ptr, u.i); 285 + stl_p(ptr, u.i);
286 } 286 }
287 287
288 -static inline double ldfq_raw(void *ptr) 288 +static inline double ldfq_p(void *ptr)
289 { 289 {
290 CPU_DoubleU u; 290 CPU_DoubleU u;
291 - u.l.lower = ldl_raw(ptr);  
292 - u.l.upper = ldl_raw(ptr + 4); 291 + u.l.lower = ldl_p(ptr);
  292 + u.l.upper = ldl_p(ptr + 4);
293 return u.d; 293 return u.d;
294 } 294 }
295 295
296 -static inline void stfq_raw(void *ptr, double v) 296 +static inline void stfq_p(void *ptr, double v)
297 { 297 {
298 CPU_DoubleU u; 298 CPU_DoubleU u;
299 u.d = v; 299 u.d = v;
300 - stl_raw(ptr, u.l.lower);  
301 - stl_raw(ptr + 4, u.l.upper); 300 + stl_p(ptr, u.l.lower);
  301 + stl_p(ptr + 4, u.l.upper);
302 } 302 }
303 303
304 #elif defined(TARGET_WORDS_BIGENDIAN) && (!defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)) 304 #elif defined(TARGET_WORDS_BIGENDIAN) && (!defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED))
305 305
306 -static inline int lduw_raw(void *ptr) 306 +static inline int lduw_p(void *ptr)
307 { 307 {
308 #if defined(__i386__) 308 #if defined(__i386__)
309 int val; 309 int val;
@@ -318,7 +318,7 @@ static inline int lduw_raw(void *ptr) @@ -318,7 +318,7 @@ static inline int lduw_raw(void *ptr)
318 #endif 318 #endif
319 } 319 }
320 320
321 -static inline int ldsw_raw(void *ptr) 321 +static inline int ldsw_p(void *ptr)
322 { 322 {
323 #if defined(__i386__) 323 #if defined(__i386__)
324 int val; 324 int val;
@@ -333,7 +333,7 @@ static inline int ldsw_raw(void *ptr) @@ -333,7 +333,7 @@ static inline int ldsw_raw(void *ptr)
333 #endif 333 #endif
334 } 334 }
335 335
336 -static inline int ldl_raw(void *ptr) 336 +static inline int ldl_p(void *ptr)
337 { 337 {
338 #if defined(__i386__) || defined(__x86_64__) 338 #if defined(__i386__) || defined(__x86_64__)
339 int val; 339 int val;
@@ -348,15 +348,15 @@ static inline int ldl_raw(void *ptr) @@ -348,15 +348,15 @@ static inline int ldl_raw(void *ptr)
348 #endif 348 #endif
349 } 349 }
350 350
351 -static inline uint64_t ldq_raw(void *ptr) 351 +static inline uint64_t ldq_p(void *ptr)
352 { 352 {
353 uint32_t a,b; 353 uint32_t a,b;
354 - a = ldl_raw(ptr);  
355 - b = ldl_raw(ptr+4); 354 + a = ldl_p(ptr);
  355 + b = ldl_p(ptr+4);
356 return (((uint64_t)a<<32)|b); 356 return (((uint64_t)a<<32)|b);
357 } 357 }
358 358
359 -static inline void stw_raw(void *ptr, int v) 359 +static inline void stw_p(void *ptr, int v)
360 { 360 {
361 #if defined(__i386__) 361 #if defined(__i386__)
362 asm volatile ("xchgb %b0, %h0\n" 362 asm volatile ("xchgb %b0, %h0\n"
@@ -370,7 +370,7 @@ static inline void stw_raw(void *ptr, int v) @@ -370,7 +370,7 @@ static inline void stw_raw(void *ptr, int v)
370 #endif 370 #endif
371 } 371 }
372 372
373 -static inline void stl_raw(void *ptr, int v) 373 +static inline void stl_p(void *ptr, int v)
374 { 374 {
375 #if defined(__i386__) || defined(__x86_64__) 375 #if defined(__i386__) || defined(__x86_64__)
376 asm volatile ("bswap %0\n" 376 asm volatile ("bswap %0\n"
@@ -386,105 +386,105 @@ static inline void stl_raw(void *ptr, int v) @@ -386,105 +386,105 @@ static inline void stl_raw(void *ptr, int v)
386 #endif 386 #endif
387 } 387 }
388 388
389 -static inline void stq_raw(void *ptr, uint64_t v) 389 +static inline void stq_p(void *ptr, uint64_t v)
390 { 390 {
391 - stl_raw(ptr, v >> 32);  
392 - stl_raw(ptr + 4, v); 391 + stl_p(ptr, v >> 32);
  392 + stl_p(ptr + 4, v);
393 } 393 }
394 394
395 /* float access */ 395 /* float access */
396 396
397 -static inline float ldfl_raw(void *ptr) 397 +static inline float ldfl_p(void *ptr)
398 { 398 {
399 union { 399 union {
400 float f; 400 float f;
401 uint32_t i; 401 uint32_t i;
402 } u; 402 } u;
403 - u.i = ldl_raw(ptr); 403 + u.i = ldl_p(ptr);
404 return u.f; 404 return u.f;
405 } 405 }
406 406
407 -static inline void stfl_raw(void *ptr, float v) 407 +static inline void stfl_p(void *ptr, float v)
408 { 408 {
409 union { 409 union {
410 float f; 410 float f;
411 uint32_t i; 411 uint32_t i;
412 } u; 412 } u;
413 u.f = v; 413 u.f = v;
414 - stl_raw(ptr, u.i); 414 + stl_p(ptr, u.i);
415 } 415 }
416 416
417 -static inline double ldfq_raw(void *ptr) 417 +static inline double ldfq_p(void *ptr)
418 { 418 {
419 CPU_DoubleU u; 419 CPU_DoubleU u;
420 - u.l.upper = ldl_raw(ptr);  
421 - u.l.lower = ldl_raw(ptr + 4); 420 + u.l.upper = ldl_p(ptr);
  421 + u.l.lower = ldl_p(ptr + 4);
422 return u.d; 422 return u.d;
423 } 423 }
424 424
425 -static inline void stfq_raw(void *ptr, double v) 425 +static inline void stfq_p(void *ptr, double v)
426 { 426 {
427 CPU_DoubleU u; 427 CPU_DoubleU u;
428 u.d = v; 428 u.d = v;
429 - stl_raw(ptr, u.l.upper);  
430 - stl_raw(ptr + 4, u.l.lower); 429 + stl_p(ptr, u.l.upper);
  430 + stl_p(ptr + 4, u.l.lower);
431 } 431 }
432 432
433 #else 433 #else
434 434
435 -static inline int lduw_raw(void *ptr) 435 +static inline int lduw_p(void *ptr)
436 { 436 {
437 return *(uint16_t *)ptr; 437 return *(uint16_t *)ptr;
438 } 438 }
439 439
440 -static inline int ldsw_raw(void *ptr) 440 +static inline int ldsw_p(void *ptr)
441 { 441 {
442 return *(int16_t *)ptr; 442 return *(int16_t *)ptr;
443 } 443 }
444 444
445 -static inline int ldl_raw(void *ptr) 445 +static inline int ldl_p(void *ptr)
446 { 446 {
447 return *(uint32_t *)ptr; 447 return *(uint32_t *)ptr;
448 } 448 }
449 449
450 -static inline uint64_t ldq_raw(void *ptr) 450 +static inline uint64_t ldq_p(void *ptr)
451 { 451 {
452 return *(uint64_t *)ptr; 452 return *(uint64_t *)ptr;
453 } 453 }
454 454
455 -static inline void stw_raw(void *ptr, int v) 455 +static inline void stw_p(void *ptr, int v)
456 { 456 {
457 *(uint16_t *)ptr = v; 457 *(uint16_t *)ptr = v;
458 } 458 }
459 459
460 -static inline void stl_raw(void *ptr, int v) 460 +static inline void stl_p(void *ptr, int v)
461 { 461 {
462 *(uint32_t *)ptr = v; 462 *(uint32_t *)ptr = v;
463 } 463 }
464 464
465 -static inline void stq_raw(void *ptr, uint64_t v) 465 +static inline void stq_p(void *ptr, uint64_t v)
466 { 466 {
467 *(uint64_t *)ptr = v; 467 *(uint64_t *)ptr = v;
468 } 468 }
469 469
470 /* float access */ 470 /* float access */
471 471
472 -static inline float ldfl_raw(void *ptr) 472 +static inline float ldfl_p(void *ptr)
473 { 473 {
474 return *(float *)ptr; 474 return *(float *)ptr;
475 } 475 }
476 476
477 -static inline double ldfq_raw(void *ptr) 477 +static inline double ldfq_p(void *ptr)
478 { 478 {
479 return *(double *)ptr; 479 return *(double *)ptr;
480 } 480 }
481 481
482 -static inline void stfl_raw(void *ptr, float v) 482 +static inline void stfl_p(void *ptr, float v)
483 { 483 {
484 *(float *)ptr = v; 484 *(float *)ptr = v;
485 } 485 }
486 486
487 -static inline void stfq_raw(void *ptr, double v) 487 +static inline void stfq_p(void *ptr, double v)
488 { 488 {
489 *(double *)ptr = v; 489 *(double *)ptr = v;
490 } 490 }
@@ -492,6 +492,24 @@ static inline void stfq_raw(void *ptr, double v) @@ -492,6 +492,24 @@ static inline void stfq_raw(void *ptr, double v)
492 492
493 /* MMU memory access macros */ 493 /* MMU memory access macros */
494 494
  495 +/* NOTE: we use double casts if pointers and target_ulong have
  496 + different sizes */
  497 +#define ldub_raw(p) ldub_p((uint8_t *)(long)(p))
  498 +#define ldsb_raw(p) ldsb_p((uint8_t *)(long)(p))
  499 +#define lduw_raw(p) lduw_p((uint8_t *)(long)(p))
  500 +#define ldsw_raw(p) ldsw_p((uint8_t *)(long)(p))
  501 +#define ldl_raw(p) ldl_p((uint8_t *)(long)(p))
  502 +#define ldq_raw(p) ldq_p((uint8_t *)(long)(p))
  503 +#define ldfl_raw(p) ldfl_p((uint8_t *)(long)(p))
  504 +#define ldfq_raw(p) ldfq_p((uint8_t *)(long)(p))
  505 +#define stb_raw(p, v) stb_p((uint8_t *)(long)(p), v)
  506 +#define stw_raw(p, v) stw_p((uint8_t *)(long)(p), v)
  507 +#define stl_raw(p, v) stl_p((uint8_t *)(long)(p), v)
  508 +#define stq_raw(p, v) stq_p((uint8_t *)(long)(p), v)
  509 +#define stfl_raw(p, v) stfl_p((uint8_t *)(long)(p), v)
  510 +#define stfq_raw(p, v) stfq_p((uint8_t *)(long)(p), v)
  511 +
  512 +
495 #if defined(CONFIG_USER_ONLY) 513 #if defined(CONFIG_USER_ONLY)
496 514
497 /* if user mode, no other memory access functions */ 515 /* if user mode, no other memory access functions */
cpu-defs.h
@@ -49,9 +49,11 @@ @@ -49,9 +49,11 @@
49 #if TARGET_LONG_SIZE == 4 49 #if TARGET_LONG_SIZE == 4
50 typedef int32_t target_long; 50 typedef int32_t target_long;
51 typedef uint32_t target_ulong; 51 typedef uint32_t target_ulong;
  52 +#define TARGET_FMT_lx "%08x"
52 #elif TARGET_LONG_SIZE == 8 53 #elif TARGET_LONG_SIZE == 8
53 typedef int64_t target_long; 54 typedef int64_t target_long;
54 typedef uint64_t target_ulong; 55 typedef uint64_t target_ulong;
  56 +#define TARGET_FMT_lx "%016llx"
55 #else 57 #else
56 #error TARGET_LONG_SIZE undefined 58 #error TARGET_LONG_SIZE undefined
57 #endif 59 #endif
cpu-exec.c
@@ -106,15 +106,16 @@ int cpu_exec(CPUState *env1) @@ -106,15 +106,16 @@ int cpu_exec(CPUState *env1)
106 int code_gen_size, ret, interrupt_request; 106 int code_gen_size, ret, interrupt_request;
107 void (*gen_func)(void); 107 void (*gen_func)(void);
108 TranslationBlock *tb, **ptb; 108 TranslationBlock *tb, **ptb;
109 - uint8_t *tc_ptr, *cs_base, *pc; 109 + target_ulong cs_base, pc;
  110 + uint8_t *tc_ptr;
110 unsigned int flags; 111 unsigned int flags;
111 112
112 /* first we save global registers */ 113 /* first we save global registers */
  114 + saved_env = env;
  115 + env = env1;
113 saved_T0 = T0; 116 saved_T0 = T0;
114 saved_T1 = T1; 117 saved_T1 = T1;
115 saved_T2 = T2; 118 saved_T2 = T2;
116 - saved_env = env;  
117 - env = env1;  
118 #ifdef __sparc__ 119 #ifdef __sparc__
119 /* we also save i7 because longjmp may not restore it */ 120 /* we also save i7 because longjmp may not restore it */
120 asm volatile ("mov %%i7, %0" : "=r" (saved_i7)); 121 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
@@ -285,7 +286,7 @@ int cpu_exec(CPUState *env1) @@ -285,7 +286,7 @@ int cpu_exec(CPUState *env1)
285 } 286 }
286 } 287 }
287 #ifdef DEBUG_EXEC 288 #ifdef DEBUG_EXEC
288 - if (loglevel & CPU_LOG_EXEC) { 289 + if ((loglevel & CPU_LOG_EXEC)) {
289 #if defined(TARGET_I386) 290 #if defined(TARGET_I386)
290 /* restore flags in standard format */ 291 /* restore flags in standard format */
291 env->regs[R_EAX] = EAX; 292 env->regs[R_EAX] = EAX;
@@ -323,19 +324,19 @@ int cpu_exec(CPUState *env1) @@ -323,19 +324,19 @@ int cpu_exec(CPUState *env1)
323 #elif defined(TARGET_ARM) 324 #elif defined(TARGET_ARM)
324 flags = 0; 325 flags = 0;
325 cs_base = 0; 326 cs_base = 0;
326 - pc = (uint8_t *)env->regs[15]; 327 + pc = env->regs[15];
327 #elif defined(TARGET_SPARC) 328 #elif defined(TARGET_SPARC)
328 flags = 0; 329 flags = 0;
329 - cs_base = (uint8_t *)env->npc;  
330 - pc = (uint8_t *) env->pc; 330 + cs_base = env->npc;
  331 + pc = env->pc;
331 #elif defined(TARGET_PPC) 332 #elif defined(TARGET_PPC)
332 flags = 0; 333 flags = 0;
333 cs_base = 0; 334 cs_base = 0;
334 - pc = (uint8_t *)env->nip; 335 + pc = env->nip;
335 #else 336 #else
336 #error unsupported CPU 337 #error unsupported CPU
337 #endif 338 #endif
338 - tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base, 339 + tb = tb_find(&ptb, pc, cs_base,
339 flags); 340 flags);
340 if (!tb) { 341 if (!tb) {
341 TranslationBlock **ptb1; 342 TranslationBlock **ptb1;
@@ -350,7 +351,7 @@ int cpu_exec(CPUState *env1) @@ -350,7 +351,7 @@ int cpu_exec(CPUState *env1)
350 regs_to_env(); /* XXX: do it just before cpu_gen_code() */ 351 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
351 352
352 /* find translated block using physical mappings */ 353 /* find translated block using physical mappings */
353 - phys_pc = get_phys_addr_code(env, (unsigned long)pc); 354 + phys_pc = get_phys_addr_code(env, pc);
354 phys_page1 = phys_pc & TARGET_PAGE_MASK; 355 phys_page1 = phys_pc & TARGET_PAGE_MASK;
355 phys_page2 = -1; 356 phys_page2 = -1;
356 h = tb_phys_hash_func(phys_pc); 357 h = tb_phys_hash_func(phys_pc);
@@ -359,13 +360,13 @@ int cpu_exec(CPUState *env1) @@ -359,13 +360,13 @@ int cpu_exec(CPUState *env1)
359 tb = *ptb1; 360 tb = *ptb1;
360 if (!tb) 361 if (!tb)
361 goto not_found; 362 goto not_found;
362 - if (tb->pc == (unsigned long)pc && 363 + if (tb->pc == pc &&
363 tb->page_addr[0] == phys_page1 && 364 tb->page_addr[0] == phys_page1 &&
364 - tb->cs_base == (unsigned long)cs_base && 365 + tb->cs_base == cs_base &&
365 tb->flags == flags) { 366 tb->flags == flags) {
366 /* check next page if needed */ 367 /* check next page if needed */
367 if (tb->page_addr[1] != -1) { 368 if (tb->page_addr[1] != -1) {
368 - virt_page2 = ((unsigned long)pc & TARGET_PAGE_MASK) + 369 + virt_page2 = (pc & TARGET_PAGE_MASK) +
369 TARGET_PAGE_SIZE; 370 TARGET_PAGE_SIZE;
370 phys_page2 = get_phys_addr_code(env, virt_page2); 371 phys_page2 = get_phys_addr_code(env, virt_page2);
371 if (tb->page_addr[1] == phys_page2) 372 if (tb->page_addr[1] == phys_page2)
@@ -378,27 +379,27 @@ int cpu_exec(CPUState *env1) @@ -378,27 +379,27 @@ int cpu_exec(CPUState *env1)
378 } 379 }
379 not_found: 380 not_found:
380 /* if no translated code available, then translate it now */ 381 /* if no translated code available, then translate it now */
381 - tb = tb_alloc((unsigned long)pc); 382 + tb = tb_alloc(pc);
382 if (!tb) { 383 if (!tb) {
383 /* flush must be done */ 384 /* flush must be done */
384 tb_flush(env); 385 tb_flush(env);
385 /* cannot fail at this point */ 386 /* cannot fail at this point */
386 - tb = tb_alloc((unsigned long)pc); 387 + tb = tb_alloc(pc);
387 /* don't forget to invalidate previous TB info */ 388 /* don't forget to invalidate previous TB info */
388 - ptb = &tb_hash[tb_hash_func((unsigned long)pc)]; 389 + ptb = &tb_hash[tb_hash_func(pc)];
389 T0 = 0; 390 T0 = 0;
390 } 391 }
391 tc_ptr = code_gen_ptr; 392 tc_ptr = code_gen_ptr;
392 tb->tc_ptr = tc_ptr; 393 tb->tc_ptr = tc_ptr;
393 - tb->cs_base = (unsigned long)cs_base; 394 + tb->cs_base = cs_base;
394 tb->flags = flags; 395 tb->flags = flags;
395 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); 396 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
396 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); 397 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
397 398
398 /* check next page if needed */ 399 /* check next page if needed */
399 - virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK; 400 + virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
400 phys_page2 = -1; 401 phys_page2 = -1;
401 - if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) { 402 + if ((pc & TARGET_PAGE_MASK) != virt_page2) {
402 phys_page2 = get_phys_addr_code(env, virt_page2); 403 phys_page2 = get_phys_addr_code(env, virt_page2);
403 } 404 }
404 tb_link_phys(tb, phys_pc, phys_page2); 405 tb_link_phys(tb, phys_pc, phys_page2);
@@ -408,7 +409,7 @@ int cpu_exec(CPUState *env1) @@ -408,7 +409,7 @@ int cpu_exec(CPUState *env1)
408 /* as some TB could have been invalidated because 409 /* as some TB could have been invalidated because
409 of memory exceptions while generating the code, we 410 of memory exceptions while generating the code, we
410 must recompute the hash index here */ 411 must recompute the hash index here */
411 - ptb = &tb_hash[tb_hash_func((unsigned long)pc)]; 412 + ptb = &tb_hash[tb_hash_func(pc)];
412 while (*ptb != NULL) 413 while (*ptb != NULL)
413 ptb = &(*ptb)->hash_next; 414 ptb = &(*ptb)->hash_next;
414 T0 = 0; 415 T0 = 0;
@@ -420,24 +421,25 @@ int cpu_exec(CPUState *env1) @@ -420,24 +421,25 @@ int cpu_exec(CPUState *env1)
420 spin_unlock(&tb_lock); 421 spin_unlock(&tb_lock);
421 } 422 }
422 #ifdef DEBUG_EXEC 423 #ifdef DEBUG_EXEC
423 - if (loglevel & CPU_LOG_EXEC) {  
424 - fprintf(logfile, "Trace 0x%08lx [0x%08lx] %s\n",  
425 - (long)tb->tc_ptr, (long)tb->pc,  
426 - lookup_symbol((void *)tb->pc)); 424 + if ((loglevel & CPU_LOG_EXEC) && (env->hflags & HF_LMA_MASK)) {
  425 + fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
  426 + (long)tb->tc_ptr, tb->pc,
  427 + lookup_symbol(tb->pc));
427 } 428 }
428 #endif 429 #endif
429 #ifdef __sparc__ 430 #ifdef __sparc__
430 T0 = tmp_T0; 431 T0 = tmp_T0;
431 #endif 432 #endif
432 /* see if we can patch the calling TB. */ 433 /* see if we can patch the calling TB. */
433 - if (T0 != 0 434 + {
  435 + if (T0 != 0
434 #if defined(TARGET_I386) && defined(USE_CODE_COPY) 436 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
435 && (tb->cflags & CF_CODE_COPY) == 437 && (tb->cflags & CF_CODE_COPY) ==
436 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY) 438 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
437 #endif 439 #endif
438 ) { 440 ) {
439 spin_lock(&tb_lock); 441 spin_lock(&tb_lock);
440 - tb_add_jump((TranslationBlock *)(T0 & ~3), T0 & 3, tb); 442 + tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
441 #if defined(USE_CODE_COPY) 443 #if defined(USE_CODE_COPY)
442 /* propagates the FP use info */ 444 /* propagates the FP use info */
443 ((TranslationBlock *)(T0 & ~3))->cflags |= 445 ((TranslationBlock *)(T0 & ~3))->cflags |=
@@ -445,6 +447,7 @@ int cpu_exec(CPUState *env1) @@ -445,6 +447,7 @@ int cpu_exec(CPUState *env1)
445 #endif 447 #endif
446 spin_unlock(&tb_lock); 448 spin_unlock(&tb_lock);
447 } 449 }
  450 + }
448 tc_ptr = tb->tc_ptr; 451 tc_ptr = tb->tc_ptr;
449 env->current_tb = tb; 452 env->current_tb = tb;
450 /* execute the generated code */ 453 /* execute the generated code */
@@ -631,7 +634,7 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) @@ -631,7 +634,7 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
631 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { 634 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
632 selector &= 0xffff; 635 selector &= 0xffff;
633 cpu_x86_load_seg_cache(env, seg_reg, selector, 636 cpu_x86_load_seg_cache(env, seg_reg, selector,
634 - (uint8_t *)(selector << 4), 0xffff, 0); 637 + (selector << 4), 0xffff, 0);
635 } else { 638 } else {
636 load_seg(seg_reg, selector); 639 load_seg(seg_reg, selector);
637 } 640 }
@@ -645,7 +648,7 @@ void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32) @@ -645,7 +648,7 @@ void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
645 saved_env = env; 648 saved_env = env;
646 env = s; 649 env = s;
647 650
648 - helper_fsave(ptr, data32); 651 + helper_fsave((target_ulong)ptr, data32);
649 652
650 env = saved_env; 653 env = saved_env;
651 } 654 }
@@ -657,7 +660,7 @@ void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32) @@ -657,7 +660,7 @@ void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
657 saved_env = env; 660 saved_env = env;
658 env = s; 661 env = s;
659 662
660 - helper_frstor(ptr, data32); 663 + helper_frstor((target_ulong)ptr, data32);
661 664
662 env = saved_env; 665 env = saved_env;
663 } 666 }
dis-asm.h
@@ -9,6 +9,7 @@ @@ -9,6 +9,7 @@
9 #ifndef DIS_ASM_H 9 #ifndef DIS_ASM_H
10 #define DIS_ASM_H 10 #define DIS_ASM_H
11 11
  12 +#include <stdlib.h>
12 #include <stdio.h> 13 #include <stdio.h>
13 #include <string.h> 14 #include <string.h>
14 #include <inttypes.h> 15 #include <inttypes.h>
@@ -20,6 +21,8 @@ typedef int64_t bfd_signed_vma; @@ -20,6 +21,8 @@ typedef int64_t bfd_signed_vma;
20 typedef uint8_t bfd_byte; 21 typedef uint8_t bfd_byte;
21 #define sprintf_vma(s,x) sprintf (s, "%0" PRIx64, x) 22 #define sprintf_vma(s,x) sprintf (s, "%0" PRIx64, x)
22 23
  24 +#define BFD64
  25 +
23 enum bfd_flavour { 26 enum bfd_flavour {
24 bfd_target_unknown_flavour, 27 bfd_target_unknown_flavour,
25 bfd_target_aout_flavour, 28 bfd_target_aout_flavour,
@@ -28,23 +28,20 @@ buffer_read_memory (memaddr, myaddr, length, info) @@ -28,23 +28,20 @@ buffer_read_memory (memaddr, myaddr, length, info)
28 return 0; 28 return 0;
29 } 29 }
30 30
31 -#if !defined(CONFIG_USER_ONLY)  
32 /* Get LENGTH bytes from info's buffer, at target address memaddr. 31 /* Get LENGTH bytes from info's buffer, at target address memaddr.
33 Transfer them to myaddr. */ 32 Transfer them to myaddr. */
34 static int 33 static int
35 -target_read_memory (memaddr, myaddr, length, info)  
36 - bfd_vma memaddr;  
37 - bfd_byte *myaddr;  
38 - int length;  
39 - struct disassemble_info *info; 34 +target_read_memory (bfd_vma memaddr,
  35 + bfd_byte *myaddr,
  36 + int length,
  37 + struct disassemble_info *info)
40 { 38 {
41 int i; 39 int i;
42 for(i = 0; i < length; i++) { 40 for(i = 0; i < length; i++) {
43 - myaddr[i] = ldub_code((void *)((long)memaddr + i)); 41 + myaddr[i] = ldub_code(memaddr + i);
44 } 42 }
45 return 0; 43 return 0;
46 } 44 }
47 -#endif  
48 45
49 /* Print an error message. We can assume that this is in response to 46 /* Print an error message. We can assume that this is in response to
50 an error return from buffer_read_memory. */ 47 an error return from buffer_read_memory. */
@@ -113,75 +110,107 @@ bfd_vma bfd_getb32 (const bfd_byte *addr) @@ -113,75 +110,107 @@ bfd_vma bfd_getb32 (const bfd_byte *addr)
113 110
114 /* Disassemble this for me please... (debugging). 'flags' is only used 111 /* Disassemble this for me please... (debugging). 'flags' is only used
115 for i386: non zero means 16 bit code */ 112 for i386: non zero means 16 bit code */
116 -void disas(FILE *out, void *code, unsigned long size, int is_host, int flags) 113 +void target_disas(FILE *out, target_ulong code, unsigned long size, int flags)
117 { 114 {
118 - uint8_t *pc; 115 + target_ulong pc;
119 int count; 116 int count;
120 struct disassemble_info disasm_info; 117 struct disassemble_info disasm_info;
121 int (*print_insn)(bfd_vma pc, disassemble_info *info); 118 int (*print_insn)(bfd_vma pc, disassemble_info *info);
122 119
123 INIT_DISASSEMBLE_INFO(disasm_info, out, fprintf); 120 INIT_DISASSEMBLE_INFO(disasm_info, out, fprintf);
124 121
125 -#if !defined(CONFIG_USER_ONLY)  
126 - if (!is_host) {  
127 - disasm_info.read_memory_func = target_read_memory;  
128 - } 122 + disasm_info.read_memory_func = target_read_memory;
  123 + disasm_info.buffer_vma = code;
  124 + disasm_info.buffer_length = size;
  125 +
  126 +#ifdef TARGET_WORDS_BIGENDIAN
  127 + disasm_info.endian = BFD_ENDIAN_BIG;
  128 +#else
  129 + disasm_info.endian = BFD_ENDIAN_LITTLE;
  130 +#endif
  131 +#if defined(TARGET_I386)
  132 + if (flags == 2)
  133 + disasm_info.mach = bfd_mach_x86_64;
  134 + else if (flags == 1)
  135 + disasm_info.mach = bfd_mach_i386_i8086;
  136 + else
  137 + disasm_info.mach = bfd_mach_i386_i386;
  138 + print_insn = print_insn_i386;
  139 +#elif defined(TARGET_ARM)
  140 + print_insn = print_insn_arm;
  141 +#elif defined(TARGET_SPARC)
  142 + print_insn = print_insn_sparc;
  143 +#elif defined(TARGET_PPC)
  144 + print_insn = print_insn_ppc;
  145 +#else
  146 + fprintf(out, "Asm output not supported on this arch\n");
  147 + return;
129 #endif 148 #endif
130 149
  150 + for (pc = code; pc < code + size; pc += count) {
  151 +#if TARGET_LONG_BITS == 64
  152 + fprintf(out, "0x%016llx: ", pc);
  153 +#else
  154 + fprintf(out, "0x%08x: ", pc);
  155 +#endif
  156 + count = print_insn(pc, &disasm_info);
  157 +#if 0
  158 + {
  159 + int i;
  160 + uint8_t b;
  161 + fprintf(out, " {");
  162 + for(i = 0; i < count; i++) {
  163 + target_read_memory(pc + i, &b, 1, &disasm_info);
  164 + fprintf(out, " %02x", b);
  165 + }
  166 + fprintf(out, " }");
  167 + }
  168 +#endif
  169 + fprintf(out, "\n");
  170 + if (count < 0)
  171 + break;
  172 + }
  173 +}
  174 +
  175 +/* Disassemble this for me please... (debugging). */
  176 +void disas(FILE *out, void *code, unsigned long size)
  177 +{
  178 + unsigned long pc;
  179 + int count;
  180 + struct disassemble_info disasm_info;
  181 + int (*print_insn)(bfd_vma pc, disassemble_info *info);
  182 +
  183 + INIT_DISASSEMBLE_INFO(disasm_info, out, fprintf);
  184 +
131 disasm_info.buffer = code; 185 disasm_info.buffer = code;
132 disasm_info.buffer_vma = (unsigned long)code; 186 disasm_info.buffer_vma = (unsigned long)code;
133 disasm_info.buffer_length = size; 187 disasm_info.buffer_length = size;
134 188
135 - if (is_host) {  
136 #ifdef WORDS_BIGENDIAN 189 #ifdef WORDS_BIGENDIAN
137 - disasm_info.endian = BFD_ENDIAN_BIG; 190 + disasm_info.endian = BFD_ENDIAN_BIG;
138 #else 191 #else
139 - disasm_info.endian = BFD_ENDIAN_LITTLE; 192 + disasm_info.endian = BFD_ENDIAN_LITTLE;
140 #endif 193 #endif
141 #if defined(__i386__) 194 #if defined(__i386__)
142 - disasm_info.mach = bfd_mach_i386_i386;  
143 - print_insn = print_insn_i386; 195 + disasm_info.mach = bfd_mach_i386_i386;
  196 + print_insn = print_insn_i386;
144 #elif defined(__x86_64__) 197 #elif defined(__x86_64__)
145 - disasm_info.mach = bfd_mach_x86_64;  
146 - print_insn = print_insn_i386; 198 + disasm_info.mach = bfd_mach_x86_64;
  199 + print_insn = print_insn_i386;
147 #elif defined(__powerpc__) 200 #elif defined(__powerpc__)
148 - print_insn = print_insn_ppc; 201 + print_insn = print_insn_ppc;
149 #elif defined(__alpha__) 202 #elif defined(__alpha__)
150 - print_insn = print_insn_alpha; 203 + print_insn = print_insn_alpha;
151 #elif defined(__sparc__) 204 #elif defined(__sparc__)
152 - print_insn = print_insn_sparc; 205 + print_insn = print_insn_sparc;
153 #elif defined(__arm__) 206 #elif defined(__arm__)
154 - print_insn = print_insn_arm;  
155 -#else  
156 - fprintf(out, "Asm output not supported on this arch\n");  
157 - return;  
158 -#endif  
159 - } else {  
160 -#ifdef TARGET_WORDS_BIGENDIAN  
161 - disasm_info.endian = BFD_ENDIAN_BIG;  
162 -#else  
163 - disasm_info.endian = BFD_ENDIAN_LITTLE;  
164 -#endif  
165 -#if defined(TARGET_I386)  
166 - if (!flags)  
167 - disasm_info.mach = bfd_mach_i386_i386;  
168 - else  
169 - disasm_info.mach = bfd_mach_i386_i8086;  
170 - print_insn = print_insn_i386;  
171 -#elif defined(TARGET_ARM)  
172 - print_insn = print_insn_arm;  
173 -#elif defined(TARGET_SPARC)  
174 - print_insn = print_insn_sparc;  
175 -#elif defined(TARGET_PPC)  
176 - print_insn = print_insn_ppc; 207 + print_insn = print_insn_arm;
177 #else 208 #else
178 - fprintf(out, "Asm output not supported on this arch\n");  
179 - return; 209 + fprintf(out, "Asm output not supported on this arch\n");
  210 + return;
180 #endif 211 #endif
181 - }  
182 -  
183 - for (pc = code; pc < (uint8_t *)code + size; pc += count) {  
184 - fprintf(out, "0x%08lx: ", (long)pc); 212 + for (pc = (unsigned long)code; pc < (unsigned long)code + size; pc += count) {
  213 + fprintf(out, "0x%08lx: ", pc);
185 #ifdef __arm__ 214 #ifdef __arm__
186 /* since data are included in the code, it is better to 215 /* since data are included in the code, it is better to
187 display code data too */ 216 display code data too */
@@ -189,7 +218,7 @@ void disas(FILE *out, void *code, unsigned long size, int is_host, int flags) @@ -189,7 +218,7 @@ void disas(FILE *out, void *code, unsigned long size, int is_host, int flags)
189 fprintf(out, "%08x ", (int)bfd_getl32((const bfd_byte *)pc)); 218 fprintf(out, "%08x ", (int)bfd_getl32((const bfd_byte *)pc));
190 } 219 }
191 #endif 220 #endif
192 - count = print_insn((unsigned long)pc, &disasm_info); 221 + count = print_insn(pc, &disasm_info);
193 fprintf(out, "\n"); 222 fprintf(out, "\n");
194 if (count < 0) 223 if (count < 0)
195 break; 224 break;
@@ -197,7 +226,7 @@ void disas(FILE *out, void *code, unsigned long size, int is_host, int flags) @@ -197,7 +226,7 @@ void disas(FILE *out, void *code, unsigned long size, int is_host, int flags)
197 } 226 }
198 227
199 /* Look up symbol for debugging purpose. Returns "" if unknown. */ 228 /* Look up symbol for debugging purpose. Returns "" if unknown. */
200 -const char *lookup_symbol(void *orig_addr) 229 +const char *lookup_symbol(target_ulong orig_addr)
201 { 230 {
202 unsigned int i; 231 unsigned int i;
203 /* Hack, because we know this is x86. */ 232 /* Hack, because we know this is x86. */
@@ -214,8 +243,8 @@ const char *lookup_symbol(void *orig_addr) @@ -214,8 +243,8 @@ const char *lookup_symbol(void *orig_addr)
214 if (ELF_ST_TYPE(sym[i].st_info) != STT_FUNC) 243 if (ELF_ST_TYPE(sym[i].st_info) != STT_FUNC)
215 continue; 244 continue;
216 245
217 - if ((long)orig_addr >= sym[i].st_value  
218 - && (long)orig_addr < sym[i].st_value + sym[i].st_size) 246 + if (orig_addr >= sym[i].st_value
  247 + && orig_addr < sym[i].st_value + sym[i].st_size)
219 return s->disas_strtab + sym[i].st_name; 248 return s->disas_strtab + sym[i].st_name;
220 } 249 }
221 } 250 }
@@ -2,11 +2,12 @@ @@ -2,11 +2,12 @@
2 #define _QEMU_DISAS_H 2 #define _QEMU_DISAS_H
3 3
4 /* Disassemble this for me please... (debugging). */ 4 /* Disassemble this for me please... (debugging). */
5 -void disas(FILE *out, void *code, unsigned long size, int is_host, int flags); 5 +void disas(FILE *out, void *code, unsigned long size);
  6 +void target_disas(FILE *out, target_ulong code, unsigned long size, int flags);
6 void monitor_disas(target_ulong pc, int nb_insn, int is_physical, int flags); 7 void monitor_disas(target_ulong pc, int nb_insn, int is_physical, int flags);
7 8
8 /* Look up symbol for debugging purpose. Returns "" if unknown. */ 9 /* Look up symbol for debugging purpose. Returns "" if unknown. */
9 -const char *lookup_symbol(void *orig_addr); 10 +const char *lookup_symbol(target_ulong orig_addr);
10 11
11 /* Filled in by elfload.c. Simplistic, but will do for now. */ 12 /* Filled in by elfload.c. Simplistic, but will do for now. */
12 extern struct syminfo { 13 extern struct syminfo {
exec-all.h
@@ -55,8 +55,10 @@ struct TranslationBlock; @@ -55,8 +55,10 @@ struct TranslationBlock;
55 55
56 extern uint16_t gen_opc_buf[OPC_BUF_SIZE]; 56 extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
57 extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE]; 57 extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
58 -extern uint32_t gen_opc_pc[OPC_BUF_SIZE];  
59 -extern uint32_t gen_opc_npc[OPC_BUF_SIZE]; 58 +extern long gen_labels[OPC_BUF_SIZE];
  59 +extern int nb_gen_labels;
  60 +extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
  61 +extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
60 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE]; 62 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
61 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; 63 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
62 64
@@ -186,7 +188,7 @@ typedef struct TranslationBlock { @@ -186,7 +188,7 @@ typedef struct TranslationBlock {
186 struct TranslationBlock *jmp_first; 188 struct TranslationBlock *jmp_first;
187 } TranslationBlock; 189 } TranslationBlock;
188 190
189 -static inline unsigned int tb_hash_func(unsigned long pc) 191 +static inline unsigned int tb_hash_func(target_ulong pc)
190 { 192 {
191 return pc & (CODE_GEN_HASH_SIZE - 1); 193 return pc & (CODE_GEN_HASH_SIZE - 1);
192 } 194 }
@@ -196,7 +198,7 @@ static inline unsigned int tb_phys_hash_func(unsigned long pc) @@ -196,7 +198,7 @@ static inline unsigned int tb_phys_hash_func(unsigned long pc)
196 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1); 198 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
197 } 199 }
198 200
199 -TranslationBlock *tb_alloc(unsigned long pc); 201 +TranslationBlock *tb_alloc(target_ulong pc);
200 void tb_flush(CPUState *env); 202 void tb_flush(CPUState *env);
201 void tb_link(TranslationBlock *tb); 203 void tb_link(TranslationBlock *tb);
202 void tb_link_phys(TranslationBlock *tb, 204 void tb_link_phys(TranslationBlock *tb,
@@ -329,7 +331,7 @@ do {\ @@ -329,7 +331,7 @@ do {\
329 "b " ASM_NAME(__op_jmp) #n "\n"\ 331 "b " ASM_NAME(__op_jmp) #n "\n"\
330 "1:\n");\ 332 "1:\n");\
331 T0 = (long)(tbparam) + (n);\ 333 T0 = (long)(tbparam) + (n);\
332 - EIP = eip;\ 334 + EIP = (int32_t)eip;\
333 EXIT_TB();\ 335 EXIT_TB();\
334 } while (0) 336 } while (0)
335 337
@@ -341,6 +343,16 @@ do {\ @@ -341,6 +343,16 @@ do {\
341 #elif defined(__i386__) && defined(USE_DIRECT_JUMP) 343 #elif defined(__i386__) && defined(USE_DIRECT_JUMP)
342 344
343 /* we patch the jump instruction directly */ 345 /* we patch the jump instruction directly */
  346 +#define GOTO_TB(opname, n)\
  347 +do {\
  348 + asm volatile (".section .data\n"\
  349 + ASM_NAME(__op_label) #n "." ASM_NAME(opname) ":\n"\
  350 + ".long 1f\n"\
  351 + ASM_PREVIOUS_SECTION \
  352 + "jmp " ASM_NAME(__op_jmp) #n "\n"\
  353 + "1:\n");\
  354 +} while (0)
  355 +
344 #define JUMP_TB(opname, tbparam, n, eip)\ 356 #define JUMP_TB(opname, tbparam, n, eip)\
345 do {\ 357 do {\
346 asm volatile (".section .data\n"\ 358 asm volatile (".section .data\n"\
@@ -350,7 +362,7 @@ do {\ @@ -350,7 +362,7 @@ do {\
350 "jmp " ASM_NAME(__op_jmp) #n "\n"\ 362 "jmp " ASM_NAME(__op_jmp) #n "\n"\
351 "1:\n");\ 363 "1:\n");\
352 T0 = (long)(tbparam) + (n);\ 364 T0 = (long)(tbparam) + (n);\
353 - EIP = eip;\ 365 + EIP = (int32_t)eip;\
354 EXIT_TB();\ 366 EXIT_TB();\
355 } while (0) 367 } while (0)
356 368
@@ -370,7 +382,7 @@ do {\ @@ -370,7 +382,7 @@ do {\
370 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\ 382 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
371 label ## n:\ 383 label ## n:\
372 T0 = (long)(tbparam) + (n);\ 384 T0 = (long)(tbparam) + (n);\
373 - EIP = eip;\ 385 + EIP = (int32_t)eip;\
374 dummy_label ## n:\ 386 dummy_label ## n:\
375 EXIT_TB();\ 387 EXIT_TB();\
376 } while (0) 388 } while (0)
@@ -544,7 +556,7 @@ extern int tb_invalidated_flag; @@ -544,7 +556,7 @@ extern int tb_invalidated_flag;
544 556
545 #if !defined(CONFIG_USER_ONLY) 557 #if !defined(CONFIG_USER_ONLY)
546 558
547 -void tlb_fill(unsigned long addr, int is_write, int is_user, 559 +void tlb_fill(target_ulong addr, int is_write, int is_user,
548 void *retaddr); 560 void *retaddr);
549 561
550 #define ACCESS_TYPE 3 562 #define ACCESS_TYPE 3
@@ -560,6 +572,9 @@ void tlb_fill(unsigned long addr, int is_write, int is_user, @@ -560,6 +572,9 @@ void tlb_fill(unsigned long addr, int is_write, int is_user,
560 #define DATA_SIZE 4 572 #define DATA_SIZE 4
561 #include "softmmu_header.h" 573 #include "softmmu_header.h"
562 574
  575 +#define DATA_SIZE 8
  576 +#include "softmmu_header.h"
  577 +
563 #undef ACCESS_TYPE 578 #undef ACCESS_TYPE
564 #undef MEMSUFFIX 579 #undef MEMSUFFIX
565 #undef env 580 #undef env
@@ -578,7 +593,7 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) @@ -578,7 +593,7 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
578 /* XXX: i386 target specific */ 593 /* XXX: i386 target specific */
579 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) 594 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
580 { 595 {
581 - int is_user, index; 596 + int is_user, index, pd;
582 597
583 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 598 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
584 #if defined(TARGET_I386) 599 #if defined(TARGET_I386)
@@ -592,7 +607,11 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) @@ -592,7 +607,11 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
592 #endif 607 #endif
593 if (__builtin_expect(env->tlb_read[is_user][index].address != 608 if (__builtin_expect(env->tlb_read[is_user][index].address !=
594 (addr & TARGET_PAGE_MASK), 0)) { 609 (addr & TARGET_PAGE_MASK), 0)) {
595 - ldub_code((void *)addr); 610 + ldub_code(addr);
  611 + }
  612 + pd = env->tlb_read[is_user][index].address & ~TARGET_PAGE_MASK;
  613 + if (pd > IO_MEM_ROM) {
  614 + cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr);
596 } 615 }
597 return addr + env->tlb_read[is_user][index].addend - (unsigned long)phys_ram_base; 616 return addr + env->tlb_read[is_user][index].addend - (unsigned long)phys_ram_base;
598 } 617 }
@@ -231,6 +231,10 @@ static inline VirtPageDesc *virt_page_find_alloc(unsigned int index) @@ -231,6 +231,10 @@ static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
231 { 231 {
232 VirtPageDesc **lp, *p; 232 VirtPageDesc **lp, *p;
233 233
  234 + /* XXX: should not truncate for 64 bit addresses */
  235 +#if TARGET_LONG_BITS > 32
  236 + index &= (L1_SIZE - 1);
  237 +#endif
234 lp = &l1_virt_map[index >> L2_BITS]; 238 lp = &l1_virt_map[index >> L2_BITS];
235 p = *lp; 239 p = *lp;
236 if (!p) { 240 if (!p) {
@@ -597,13 +601,13 @@ static void tb_gen_code(CPUState *env, @@ -597,13 +601,13 @@ static void tb_gen_code(CPUState *env,
597 target_ulong phys_pc, phys_page2, virt_page2; 601 target_ulong phys_pc, phys_page2, virt_page2;
598 int code_gen_size; 602 int code_gen_size;
599 603
600 - phys_pc = get_phys_addr_code(env, (unsigned long)pc);  
601 - tb = tb_alloc((unsigned long)pc); 604 + phys_pc = get_phys_addr_code(env, pc);
  605 + tb = tb_alloc(pc);
602 if (!tb) { 606 if (!tb) {
603 /* flush must be done */ 607 /* flush must be done */
604 tb_flush(env); 608 tb_flush(env);
605 /* cannot fail at this point */ 609 /* cannot fail at this point */
606 - tb = tb_alloc((unsigned long)pc); 610 + tb = tb_alloc(pc);
607 } 611 }
608 tc_ptr = code_gen_ptr; 612 tc_ptr = code_gen_ptr;
609 tb->tc_ptr = tc_ptr; 613 tb->tc_ptr = tc_ptr;
@@ -614,9 +618,9 @@ static void tb_gen_code(CPUState *env, @@ -614,9 +618,9 @@ static void tb_gen_code(CPUState *env,
614 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); 618 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
615 619
616 /* check next page if needed */ 620 /* check next page if needed */
617 - virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK; 621 + virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
618 phys_page2 = -1; 622 phys_page2 = -1;
619 - if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) { 623 + if ((pc & TARGET_PAGE_MASK) != virt_page2) {
620 phys_page2 = get_phys_addr_code(env, virt_page2); 624 phys_page2 = get_phys_addr_code(env, virt_page2);
621 } 625 }
622 tb_link_phys(tb, phys_pc, phys_page2); 626 tb_link_phys(tb, phys_pc, phys_page2);
@@ -884,7 +888,7 @@ static inline void tb_alloc_page(TranslationBlock *tb, @@ -884,7 +888,7 @@ static inline void tb_alloc_page(TranslationBlock *tb,
884 888
885 /* Allocate a new translation block. Flush the translation buffer if 889 /* Allocate a new translation block. Flush the translation buffer if
886 too many translation blocks or too much generated code. */ 890 too many translation blocks or too much generated code. */
887 -TranslationBlock *tb_alloc(unsigned long pc) 891 +TranslationBlock *tb_alloc(target_ulong pc)
888 { 892 {
889 TranslationBlock *tb; 893 TranslationBlock *tb;
890 894
@@ -1063,6 +1067,7 @@ static void tb_reset_jump_recursive(TranslationBlock *tb) @@ -1063,6 +1067,7 @@ static void tb_reset_jump_recursive(TranslationBlock *tb)
1063 tb_reset_jump_recursive2(tb, 1); 1067 tb_reset_jump_recursive2(tb, 1);
1064 } 1068 }
1065 1069
  1070 +#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1066 static void breakpoint_invalidate(CPUState *env, target_ulong pc) 1071 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1067 { 1072 {
1068 target_ulong phys_addr; 1073 target_ulong phys_addr;
@@ -1070,6 +1075,7 @@ static void breakpoint_invalidate(CPUState *env, target_ulong pc) @@ -1070,6 +1075,7 @@ static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1070 phys_addr = cpu_get_phys_page_debug(env, pc); 1075 phys_addr = cpu_get_phys_page_debug(env, pc);
1071 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0); 1076 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1072 } 1077 }
  1078 +#endif
1073 1079
1074 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a 1080 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1075 breakpoint is reached */ 1081 breakpoint is reached */
@@ -1872,7 +1878,7 @@ static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) @@ -1872,7 +1878,7 @@ static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1872 #if !defined(CONFIG_USER_ONLY) 1878 #if !defined(CONFIG_USER_ONLY)
1873 tb_invalidate_phys_page_fast(phys_addr, 1); 1879 tb_invalidate_phys_page_fast(phys_addr, 1);
1874 #endif 1880 #endif
1875 - stb_raw((uint8_t *)addr, val); 1881 + stb_p((uint8_t *)(long)addr, val);
1876 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; 1882 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1877 } 1883 }
1878 1884
@@ -1884,7 +1890,7 @@ static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) @@ -1884,7 +1890,7 @@ static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1884 #if !defined(CONFIG_USER_ONLY) 1890 #if !defined(CONFIG_USER_ONLY)
1885 tb_invalidate_phys_page_fast(phys_addr, 2); 1891 tb_invalidate_phys_page_fast(phys_addr, 2);
1886 #endif 1892 #endif
1887 - stw_raw((uint8_t *)addr, val); 1893 + stw_p((uint8_t *)(long)addr, val);
1888 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; 1894 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1889 } 1895 }
1890 1896
@@ -1896,7 +1902,7 @@ static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) @@ -1896,7 +1902,7 @@ static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1896 #if !defined(CONFIG_USER_ONLY) 1902 #if !defined(CONFIG_USER_ONLY)
1897 tb_invalidate_phys_page_fast(phys_addr, 4); 1903 tb_invalidate_phys_page_fast(phys_addr, 4);
1898 #endif 1904 #endif
1899 - stl_raw((uint8_t *)addr, val); 1905 + stl_p((uint8_t *)(long)addr, val);
1900 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; 1906 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1901 } 1907 }
1902 1908
@@ -1914,19 +1920,19 @@ static CPUWriteMemoryFunc *code_mem_write[3] = { @@ -1914,19 +1920,19 @@ static CPUWriteMemoryFunc *code_mem_write[3] = {
1914 1920
1915 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) 1921 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1916 { 1922 {
1917 - stb_raw((uint8_t *)addr, val); 1923 + stb_p((uint8_t *)(long)addr, val);
1918 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); 1924 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1919 } 1925 }
1920 1926
1921 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) 1927 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1922 { 1928 {
1923 - stw_raw((uint8_t *)addr, val); 1929 + stw_p((uint8_t *)(long)addr, val);
1924 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); 1930 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1925 } 1931 }
1926 1932
1927 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) 1933 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1928 { 1934 {
1929 - stl_raw((uint8_t *)addr, val); 1935 + stl_p((uint8_t *)(long)addr, val);
1930 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); 1936 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1931 } 1937 }
1932 1938
@@ -2046,17 +2052,17 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, @@ -2046,17 +2052,17 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2046 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 2052 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2047 if (l >= 4 && ((addr & 3) == 0)) { 2053 if (l >= 4 && ((addr & 3) == 0)) {
2048 /* 32 bit read access */ 2054 /* 32 bit read access */
2049 - val = ldl_raw(buf); 2055 + val = ldl_p(buf);
2050 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 2056 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2051 l = 4; 2057 l = 4;
2052 } else if (l >= 2 && ((addr & 1) == 0)) { 2058 } else if (l >= 2 && ((addr & 1) == 0)) {
2053 /* 16 bit read access */ 2059 /* 16 bit read access */
2054 - val = lduw_raw(buf); 2060 + val = lduw_p(buf);
2055 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); 2061 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2056 l = 2; 2062 l = 2;
2057 } else { 2063 } else {
2058 /* 8 bit access */ 2064 /* 8 bit access */
2059 - val = ldub_raw(buf); 2065 + val = ldub_p(buf);
2060 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); 2066 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2061 l = 1; 2067 l = 1;
2062 } 2068 }
@@ -2079,17 +2085,17 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, @@ -2079,17 +2085,17 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2079 if (l >= 4 && ((addr & 3) == 0)) { 2085 if (l >= 4 && ((addr & 3) == 0)) {
2080 /* 32 bit read access */ 2086 /* 32 bit read access */
2081 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); 2087 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2082 - stl_raw(buf, val); 2088 + stl_p(buf, val);
2083 l = 4; 2089 l = 4;
2084 } else if (l >= 2 && ((addr & 1) == 0)) { 2090 } else if (l >= 2 && ((addr & 1) == 0)) {
2085 /* 16 bit read access */ 2091 /* 16 bit read access */
2086 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); 2092 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2087 - stw_raw(buf, val); 2093 + stw_p(buf, val);
2088 l = 2; 2094 l = 2;
2089 } else { 2095 } else {
2090 /* 8 bit access */ 2096 /* 8 bit access */
2091 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); 2097 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2092 - stb_raw(buf, val); 2098 + stb_p(buf, val);
2093 l = 1; 2099 l = 1;
2094 } 2100 }
2095 } else { 2101 } else {
monitor.c
@@ -438,7 +438,7 @@ static void memory_dump(int count, int format, int wsize, @@ -438,7 +438,7 @@ static void memory_dump(int count, int format, int wsize,
438 } 438 }
439 439
440 while (len > 0) { 440 while (len > 0) {
441 - term_printf("0x%08x:", addr); 441 + term_printf(TARGET_FMT_lx ":", addr);
442 l = len; 442 l = len;
443 if (l > line_size) 443 if (l > line_size)
444 l = line_size; 444 l = line_size;
softmmu_header.h
@@ -82,13 +82,14 @@ @@ -82,13 +82,14 @@
82 #endif 82 #endif
83 83
84 84
85 -DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(unsigned long addr, 85 +DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
86 int is_user); 86 int is_user);
87 -void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr, DATA_TYPE v, int is_user); 87 +void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE v, int is_user);
88 88
89 -#if (DATA_SIZE <= 4) && defined(__i386__) && (ACCESS_TYPE <= 1) && defined(ASM_SOFTMMU) 89 +#if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \
  90 + (ACCESS_TYPE <= 1) && defined(ASM_SOFTMMU)
90 91
91 -static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(void *ptr) 92 +static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
92 { 93 {
93 int res; 94 int res;
94 95
@@ -131,7 +132,7 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(void *ptr) @@ -131,7 +132,7 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(void *ptr)
131 } 132 }
132 133
133 #if DATA_SIZE <= 2 134 #if DATA_SIZE <= 2
134 -static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr) 135 +static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
135 { 136 {
136 int res; 137 int res;
137 138
@@ -178,7 +179,7 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr) @@ -178,7 +179,7 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr)
178 } 179 }
179 #endif 180 #endif
180 181
181 -static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v) 182 +static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
182 { 183 {
183 asm volatile ("movl %0, %%edx\n" 184 asm volatile ("movl %0, %%edx\n"
184 "movl %0, %%eax\n" 185 "movl %0, %%eax\n"
@@ -232,14 +233,15 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v) @@ -232,14 +233,15 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v)
232 233
233 /* generic load/store macros */ 234 /* generic load/store macros */
234 235
235 -static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(void *ptr) 236 +static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
236 { 237 {
237 int index; 238 int index;
238 RES_TYPE res; 239 RES_TYPE res;
239 - unsigned long addr, physaddr; 240 + target_ulong addr;
  241 + unsigned long physaddr;
240 int is_user; 242 int is_user;
241 243
242 - addr = (unsigned long)ptr; 244 + addr = ptr;
243 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 245 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
244 is_user = CPU_MEM_INDEX; 246 is_user = CPU_MEM_INDEX;
245 if (__builtin_expect(env->tlb_read[is_user][index].address != 247 if (__builtin_expect(env->tlb_read[is_user][index].address !=
@@ -253,13 +255,14 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(void *ptr) @@ -253,13 +255,14 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(void *ptr)
253 } 255 }
254 256
255 #if DATA_SIZE <= 2 257 #if DATA_SIZE <= 2
256 -static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr) 258 +static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
257 { 259 {
258 int res, index; 260 int res, index;
259 - unsigned long addr, physaddr; 261 + target_ulong addr;
  262 + unsigned long physaddr;
260 int is_user; 263 int is_user;
261 264
262 - addr = (unsigned long)ptr; 265 + addr = ptr;
263 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 266 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
264 is_user = CPU_MEM_INDEX; 267 is_user = CPU_MEM_INDEX;
265 if (__builtin_expect(env->tlb_read[is_user][index].address != 268 if (__builtin_expect(env->tlb_read[is_user][index].address !=
@@ -275,13 +278,14 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr) @@ -275,13 +278,14 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr)
275 278
276 /* generic store macro */ 279 /* generic store macro */
277 280
278 -static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v) 281 +static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
279 { 282 {
280 int index; 283 int index;
281 - unsigned long addr, physaddr; 284 + target_ulong addr;
  285 + unsigned long physaddr;
282 int is_user; 286 int is_user;
283 287
284 - addr = (unsigned long)ptr; 288 + addr = ptr;
285 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 289 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
286 is_user = CPU_MEM_INDEX; 290 is_user = CPU_MEM_INDEX;
287 if (__builtin_expect(env->tlb_write[is_user][index].address != 291 if (__builtin_expect(env->tlb_write[is_user][index].address !=
@@ -296,7 +300,7 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v) @@ -296,7 +300,7 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v)
296 #endif 300 #endif
297 301
298 #if DATA_SIZE == 8 302 #if DATA_SIZE == 8
299 -static inline double glue(ldfq, MEMSUFFIX)(void *ptr) 303 +static inline double glue(ldfq, MEMSUFFIX)(target_ulong ptr)
300 { 304 {
301 union { 305 union {
302 double d; 306 double d;
@@ -306,7 +310,7 @@ static inline double glue(ldfq, MEMSUFFIX)(void *ptr) @@ -306,7 +310,7 @@ static inline double glue(ldfq, MEMSUFFIX)(void *ptr)
306 return u.d; 310 return u.d;
307 } 311 }
308 312
309 -static inline void glue(stfq, MEMSUFFIX)(void *ptr, double v) 313 +static inline void glue(stfq, MEMSUFFIX)(target_ulong ptr, double v)
310 { 314 {
311 union { 315 union {
312 double d; 316 double d;
@@ -318,7 +322,7 @@ static inline void glue(stfq, MEMSUFFIX)(void *ptr, double v) @@ -318,7 +322,7 @@ static inline void glue(stfq, MEMSUFFIX)(void *ptr, double v)
318 #endif /* DATA_SIZE == 8 */ 322 #endif /* DATA_SIZE == 8 */
319 323
320 #if DATA_SIZE == 4 324 #if DATA_SIZE == 4
321 -static inline float glue(ldfl, MEMSUFFIX)(void *ptr) 325 +static inline float glue(ldfl, MEMSUFFIX)(target_ulong ptr)
322 { 326 {
323 union { 327 union {
324 float f; 328 float f;
@@ -328,7 +332,7 @@ static inline float glue(ldfl, MEMSUFFIX)(void *ptr) @@ -328,7 +332,7 @@ static inline float glue(ldfl, MEMSUFFIX)(void *ptr)
328 return u.f; 332 return u.f;
329 } 333 }
330 334
331 -static inline void glue(stfl, MEMSUFFIX)(void *ptr, float v) 335 +static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float v)
332 { 336 {
333 union { 337 union {
334 float f; 338 float f;
softmmu_template.h
@@ -45,11 +45,11 @@ @@ -45,11 +45,11 @@
45 #define READ_ACCESS_TYPE 0 45 #define READ_ACCESS_TYPE 0
46 #endif 46 #endif
47 47
48 -static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr, 48 +static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
49 int is_user, 49 int is_user,
50 void *retaddr); 50 void *retaddr);
51 static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr, 51 static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr,
52 - unsigned long tlb_addr) 52 + target_ulong tlb_addr)
53 { 53 {
54 DATA_TYPE res; 54 DATA_TYPE res;
55 int index; 55 int index;
@@ -70,12 +70,13 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr, @@ -70,12 +70,13 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr,
70 } 70 }
71 71
72 /* handle all cases except unaligned access which span two pages */ 72 /* handle all cases except unaligned access which span two pages */
73 -DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(unsigned long addr, 73 +DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
74 int is_user) 74 int is_user)
75 { 75 {
76 DATA_TYPE res; 76 DATA_TYPE res;
77 int index; 77 int index;
78 - unsigned long physaddr, tlb_addr; 78 + target_ulong tlb_addr;
  79 + unsigned long physaddr;
79 void *retaddr; 80 void *retaddr;
80 81
81 /* test if there is match for unaligned or IO access */ 82 /* test if there is match for unaligned or IO access */
@@ -110,13 +111,14 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(unsigned long addr, @@ -110,13 +111,14 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(unsigned long addr,
110 } 111 }
111 112
112 /* handle all unaligned cases */ 113 /* handle all unaligned cases */
113 -static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr, 114 +static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
114 int is_user, 115 int is_user,
115 void *retaddr) 116 void *retaddr)
116 { 117 {
117 DATA_TYPE res, res1, res2; 118 DATA_TYPE res, res1, res2;
118 int index, shift; 119 int index, shift;
119 - unsigned long physaddr, tlb_addr, addr1, addr2; 120 + unsigned long physaddr;
  121 + target_ulong tlb_addr, addr1, addr2;
120 122
121 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 123 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
122 redo: 124 redo:
@@ -158,14 +160,14 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr, @@ -158,14 +160,14 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr,
158 160
159 #ifndef SOFTMMU_CODE_ACCESS 161 #ifndef SOFTMMU_CODE_ACCESS
160 162
161 -static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr, 163 +static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
162 DATA_TYPE val, 164 DATA_TYPE val,
163 int is_user, 165 int is_user,
164 void *retaddr); 166 void *retaddr);
165 167
166 static inline void glue(io_write, SUFFIX)(unsigned long physaddr, 168 static inline void glue(io_write, SUFFIX)(unsigned long physaddr,
167 DATA_TYPE val, 169 DATA_TYPE val,
168 - unsigned long tlb_addr, 170 + target_ulong tlb_addr,
169 void *retaddr) 171 void *retaddr)
170 { 172 {
171 int index; 173 int index;
@@ -186,11 +188,12 @@ static inline void glue(io_write, SUFFIX)(unsigned long physaddr, @@ -186,11 +188,12 @@ static inline void glue(io_write, SUFFIX)(unsigned long physaddr,
186 #endif /* SHIFT > 2 */ 188 #endif /* SHIFT > 2 */
187 } 189 }
188 190
189 -void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr, 191 +void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
190 DATA_TYPE val, 192 DATA_TYPE val,
191 int is_user) 193 int is_user)
192 { 194 {
193 - unsigned long physaddr, tlb_addr; 195 + unsigned long physaddr;
  196 + target_ulong tlb_addr;
194 void *retaddr; 197 void *retaddr;
195 int index; 198 int index;
196 199
@@ -223,12 +226,13 @@ void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr, @@ -223,12 +226,13 @@ void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr,
223 } 226 }
224 227
225 /* handles all unaligned cases */ 228 /* handles all unaligned cases */
226 -static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr, 229 +static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
227 DATA_TYPE val, 230 DATA_TYPE val,
228 int is_user, 231 int is_user,
229 void *retaddr) 232 void *retaddr)
230 { 233 {
231 - unsigned long physaddr, tlb_addr; 234 + unsigned long physaddr;
  235 + target_ulong tlb_addr;
232 int index, i; 236 int index, i;
233 237
234 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 238 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
@@ -335,6 +335,18 @@ static inline void qemu_get_8s(QEMUFile *f, uint8_t *pv) @@ -335,6 +335,18 @@ static inline void qemu_get_8s(QEMUFile *f, uint8_t *pv)
335 *pv = qemu_get_byte(f); 335 *pv = qemu_get_byte(f);
336 } 336 }
337 337
  338 +#if TARGET_LONG_BITS == 64
  339 +#define qemu_put_betl qemu_put_be64
  340 +#define qemu_get_betl qemu_get_be64
  341 +#define qemu_put_betls qemu_put_be64s
  342 +#define qemu_get_betls qemu_get_be64s
  343 +#else
  344 +#define qemu_put_betl qemu_put_be32
  345 +#define qemu_get_betl qemu_get_be32
  346 +#define qemu_put_betls qemu_put_be32s
  347 +#define qemu_get_betls qemu_get_be32s
  348 +#endif
  349 +
338 int64_t qemu_ftell(QEMUFile *f); 350 int64_t qemu_ftell(QEMUFile *f);
339 int64_t qemu_fseek(QEMUFile *f, int64_t pos, int whence); 351 int64_t qemu_fseek(QEMUFile *f, int64_t pos, int whence);
340 352
@@ -628,6 +640,10 @@ uint32_t pic_intack_read(CPUState *env); @@ -628,6 +640,10 @@ uint32_t pic_intack_read(CPUState *env);
628 void pic_info(void); 640 void pic_info(void);
629 void irq_info(void); 641 void irq_info(void);
630 642
  643 +/* APIC */
  644 +int apic_init(CPUState *env);
  645 +int apic_get_interrupt(CPUState *env);
  646 +
631 /* i8254.c */ 647 /* i8254.c */
632 648
633 #define PIT_FREQ 1193182 649 #define PIT_FREQ 1193182