Commit b5fc09ae52e3d19e01126715c998eb6587795b56
1 parent
c75a823c
Fix crash due to invalid env->current_tb (Adam Lackorzynski, Paul Brook, me)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4317 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
1 changed file
with
44 additions
and
24 deletions
cpu-exec.c
| ... | ... | @@ -36,6 +36,7 @@ |
| 36 | 36 | #endif |
| 37 | 37 | |
| 38 | 38 | int tb_invalidated_flag; |
| 39 | +static unsigned long next_tb; | |
| 39 | 40 | |
| 40 | 41 | //#define DEBUG_EXEC |
| 41 | 42 | //#define DEBUG_SIGNAL |
| ... | ... | @@ -273,14 +274,12 @@ static inline TranslationBlock *tb_find_fast(void) |
| 273 | 274 | /* as some TB could have been invalidated because |
| 274 | 275 | of memory exceptions while generating the code, we |
| 275 | 276 | must recompute the hash index here */ |
| 276 | - T0 = 0; | |
| 277 | + next_tb = 0; | |
| 277 | 278 | } |
| 278 | 279 | } |
| 279 | 280 | return tb; |
| 280 | 281 | } |
| 281 | 282 | |
| 282 | -#define BREAK_CHAIN T0 = 0 | |
| 283 | - | |
| 284 | 283 | /* main execution loop */ |
| 285 | 284 | |
| 286 | 285 | int cpu_exec(CPUState *env1) |
| ... | ... | @@ -293,7 +292,7 @@ int cpu_exec(CPUState *env1) |
| 293 | 292 | #endif |
| 294 | 293 | #endif |
| 295 | 294 | int ret, interrupt_request; |
| 296 | - long (*gen_func)(void); | |
| 295 | + unsigned long (*gen_func)(void); | |
| 297 | 296 | TranslationBlock *tb; |
| 298 | 297 | uint8_t *tc_ptr; |
| 299 | 298 | |
| ... | ... | @@ -414,7 +413,7 @@ int cpu_exec(CPUState *env1) |
| 414 | 413 | } |
| 415 | 414 | #endif |
| 416 | 415 | |
| 417 | - T0 = 0; /* force lookup of first TB */ | |
| 416 | + next_tb = 0; /* force lookup of first TB */ | |
| 418 | 417 | for(;;) { |
| 419 | 418 | SAVE_GLOBALS(); |
| 420 | 419 | interrupt_request = env->interrupt_request; |
| ... | ... | @@ -443,13 +442,13 @@ int cpu_exec(CPUState *env1) |
| 443 | 442 | svm_check_intercept(SVM_EXIT_SMI); |
| 444 | 443 | env->interrupt_request &= ~CPU_INTERRUPT_SMI; |
| 445 | 444 | do_smm_enter(); |
| 446 | - BREAK_CHAIN; | |
| 445 | + next_tb = 0; | |
| 447 | 446 | } else if ((interrupt_request & CPU_INTERRUPT_NMI) && |
| 448 | 447 | !(env->hflags & HF_NMI_MASK)) { |
| 449 | 448 | env->interrupt_request &= ~CPU_INTERRUPT_NMI; |
| 450 | 449 | env->hflags |= HF_NMI_MASK; |
| 451 | 450 | do_interrupt(EXCP02_NMI, 0, 0, 0, 1); |
| 452 | - BREAK_CHAIN; | |
| 451 | + next_tb = 0; | |
| 453 | 452 | } else if ((interrupt_request & CPU_INTERRUPT_HARD) && |
| 454 | 453 | (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) && |
| 455 | 454 | !(env->hflags & HF_INHIBIT_IRQ_MASK)) { |
| ... | ... | @@ -463,7 +462,7 @@ int cpu_exec(CPUState *env1) |
| 463 | 462 | do_interrupt(intno, 0, 0, 0, 1); |
| 464 | 463 | /* ensure that no TB jump will be modified as |
| 465 | 464 | the program flow was changed */ |
| 466 | - BREAK_CHAIN; | |
| 465 | + next_tb = 0; | |
| 467 | 466 | #if !defined(CONFIG_USER_ONLY) |
| 468 | 467 | } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && |
| 469 | 468 | (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) { |
| ... | ... | @@ -477,7 +476,7 @@ int cpu_exec(CPUState *env1) |
| 477 | 476 | do_interrupt(intno, 0, 0, -1, 1); |
| 478 | 477 | stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), |
| 479 | 478 | ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK); |
| 480 | - BREAK_CHAIN; | |
| 479 | + next_tb = 0; | |
| 481 | 480 | #endif |
| 482 | 481 | } |
| 483 | 482 | #elif defined(TARGET_PPC) |
| ... | ... | @@ -490,7 +489,7 @@ int cpu_exec(CPUState *env1) |
| 490 | 489 | ppc_hw_interrupt(env); |
| 491 | 490 | if (env->pending_interrupts == 0) |
| 492 | 491 | env->interrupt_request &= ~CPU_INTERRUPT_HARD; |
| 493 | - BREAK_CHAIN; | |
| 492 | + next_tb = 0; | |
| 494 | 493 | } |
| 495 | 494 | #elif defined(TARGET_MIPS) |
| 496 | 495 | if ((interrupt_request & CPU_INTERRUPT_HARD) && |
| ... | ... | @@ -503,7 +502,7 @@ int cpu_exec(CPUState *env1) |
| 503 | 502 | env->exception_index = EXCP_EXT_INTERRUPT; |
| 504 | 503 | env->error_code = 0; |
| 505 | 504 | do_interrupt(env); |
| 506 | - BREAK_CHAIN; | |
| 505 | + next_tb = 0; | |
| 507 | 506 | } |
| 508 | 507 | #elif defined(TARGET_SPARC) |
| 509 | 508 | if ((interrupt_request & CPU_INTERRUPT_HARD) && |
| ... | ... | @@ -520,7 +519,7 @@ int cpu_exec(CPUState *env1) |
| 520 | 519 | #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) |
| 521 | 520 | cpu_check_irqs(env); |
| 522 | 521 | #endif |
| 523 | - BREAK_CHAIN; | |
| 522 | + next_tb = 0; | |
| 524 | 523 | } |
| 525 | 524 | } else if (interrupt_request & CPU_INTERRUPT_TIMER) { |
| 526 | 525 | //do_interrupt(0, 0, 0, 0, 0); |
| ... | ... | @@ -531,7 +530,7 @@ int cpu_exec(CPUState *env1) |
| 531 | 530 | && !(env->uncached_cpsr & CPSR_F)) { |
| 532 | 531 | env->exception_index = EXCP_FIQ; |
| 533 | 532 | do_interrupt(env); |
| 534 | - BREAK_CHAIN; | |
| 533 | + next_tb = 0; | |
| 535 | 534 | } |
| 536 | 535 | /* ARMv7-M interrupt return works by loading a magic value |
| 537 | 536 | into the PC. On real hardware the load causes the |
| ... | ... | @@ -547,22 +546,22 @@ int cpu_exec(CPUState *env1) |
| 547 | 546 | || !(env->uncached_cpsr & CPSR_I))) { |
| 548 | 547 | env->exception_index = EXCP_IRQ; |
| 549 | 548 | do_interrupt(env); |
| 550 | - BREAK_CHAIN; | |
| 549 | + next_tb = 0; | |
| 551 | 550 | } |
| 552 | 551 | #elif defined(TARGET_SH4) |
| 553 | 552 | if (interrupt_request & CPU_INTERRUPT_HARD) { |
| 554 | 553 | do_interrupt(env); |
| 555 | - BREAK_CHAIN; | |
| 554 | + next_tb = 0; | |
| 556 | 555 | } |
| 557 | 556 | #elif defined(TARGET_ALPHA) |
| 558 | 557 | if (interrupt_request & CPU_INTERRUPT_HARD) { |
| 559 | 558 | do_interrupt(env); |
| 560 | - BREAK_CHAIN; | |
| 559 | + next_tb = 0; | |
| 561 | 560 | } |
| 562 | 561 | #elif defined(TARGET_CRIS) |
| 563 | 562 | if (interrupt_request & CPU_INTERRUPT_HARD) { |
| 564 | 563 | do_interrupt(env); |
| 565 | - BREAK_CHAIN; | |
| 564 | + next_tb = 0; | |
| 566 | 565 | } |
| 567 | 566 | #elif defined(TARGET_M68K) |
| 568 | 567 | if (interrupt_request & CPU_INTERRUPT_HARD |
| ... | ... | @@ -575,7 +574,7 @@ int cpu_exec(CPUState *env1) |
| 575 | 574 | first signalled. */ |
| 576 | 575 | env->exception_index = env->pending_vector; |
| 577 | 576 | do_interrupt(1); |
| 578 | - BREAK_CHAIN; | |
| 577 | + next_tb = 0; | |
| 579 | 578 | } |
| 580 | 579 | #endif |
| 581 | 580 | /* Don't use the cached interupt_request value, |
| ... | ... | @@ -584,7 +583,7 @@ int cpu_exec(CPUState *env1) |
| 584 | 583 | env->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
| 585 | 584 | /* ensure that no TB jump will be modified as |
| 586 | 585 | the program flow was changed */ |
| 587 | - BREAK_CHAIN; | |
| 586 | + next_tb = 0; | |
| 588 | 587 | } |
| 589 | 588 | if (interrupt_request & CPU_INTERRUPT_EXIT) { |
| 590 | 589 | env->interrupt_request &= ~CPU_INTERRUPT_EXIT; |
| ... | ... | @@ -640,13 +639,13 @@ int cpu_exec(CPUState *env1) |
| 640 | 639 | spans two pages, we cannot safely do a direct |
| 641 | 640 | jump. */ |
| 642 | 641 | { |
| 643 | - if (T0 != 0 && | |
| 642 | + if (next_tb != 0 && | |
| 644 | 643 | #if USE_KQEMU |
| 645 | 644 | (env->kqemu_enabled != 2) && |
| 646 | 645 | #endif |
| 647 | 646 | tb->page_addr[1] == -1) { |
| 648 | 647 | spin_lock(&tb_lock); |
| 649 | - tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb); | |
| 648 | + tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb); | |
| 650 | 649 | spin_unlock(&tb_lock); |
| 651 | 650 | } |
| 652 | 651 | } |
| ... | ... | @@ -667,7 +666,7 @@ int cpu_exec(CPUState *env1) |
| 667 | 666 | asm volatile ("ble 0(%%sr4,%1)\n" |
| 668 | 667 | "copy %%r31,%%r18\n" |
| 669 | 668 | "copy %%r28,%0\n" |
| 670 | - : "=r" (T0) | |
| 669 | + : "=r" (next_tb) | |
| 671 | 670 | : "r" (gen_func) |
| 672 | 671 | : "r1", "r2", "r3", "r4", "r5", "r6", "r7", |
| 673 | 672 | "r8", "r9", "r10", "r11", "r12", "r13", |
| ... | ... | @@ -690,8 +689,29 @@ int cpu_exec(CPUState *env1) |
| 690 | 689 | fp.ip = tc_ptr; |
| 691 | 690 | fp.gp = code_gen_buffer + 2 * (1 << 20); |
| 692 | 691 | (*(void (*)(void)) &fp)(); |
| 692 | +#elif defined(__i386) | |
| 693 | + asm volatile ("sub $12, %%esp\n\t" | |
| 694 | + "push %%ebp\n\t" | |
| 695 | + "call *%1\n\t" | |
| 696 | + "pop %%ebp\n\t" | |
| 697 | + "add $12, %%esp\n\t" | |
| 698 | + : "=a" (next_tb) | |
| 699 | + : "a" (gen_func) | |
| 700 | + : "ebx", "ecx", "edx", "esi", "edi", "cc", | |
| 701 | + "memory"); | |
| 702 | +#elif defined(__x86_64__) | |
| 703 | + asm volatile ("sub $8, %%rsp\n\t" | |
| 704 | + "push %%rbp\n\t" | |
| 705 | + "call *%1\n\t" | |
| 706 | + "pop %%rbp\n\t" | |
| 707 | + "add $8, %%rsp\n\t" | |
| 708 | + : "=a" (next_tb) | |
| 709 | + : "a" (gen_func) | |
| 710 | + : "rbx", "rcx", "rdx", "rsi", "rdi", "r8", "r9", | |
| 711 | + "r10", "r11", "r12", "r13", "r14", "r15", "cc", | |
| 712 | + "memory"); | |
| 693 | 713 | #else |
| 694 | - T0 = gen_func(); | |
| 714 | + next_tb = gen_func(); | |
| 695 | 715 | #endif |
| 696 | 716 | env->current_tb = NULL; |
| 697 | 717 | /* reset soft MMU for next block (it can currently |
| ... | ... | @@ -700,7 +720,7 @@ int cpu_exec(CPUState *env1) |
| 700 | 720 | if (env->hflags & HF_SOFTMMU_MASK) { |
| 701 | 721 | env->hflags &= ~HF_SOFTMMU_MASK; |
| 702 | 722 | /* do not allow linking to another block */ |
| 703 | - T0 = 0; | |
| 723 | + next_tb = 0; | |
| 704 | 724 | } |
| 705 | 725 | #endif |
| 706 | 726 | #if defined(USE_KQEMU) | ... | ... |