Commit b5fc09ae52e3d19e01126715c998eb6587795b56

Authored by blueswir1
1 parent c75a823c

Fix crash due to invalid env->current_tb (Adam Lackorzynski, Paul Brook, me)

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4317 c046a42c-6fe2-441c-8c8c-71466251a162
Showing 1 changed file with 44 additions and 24 deletions
cpu-exec.c
@@ -36,6 +36,7 @@ @@ -36,6 +36,7 @@
36 #endif 36 #endif
37 37
38 int tb_invalidated_flag; 38 int tb_invalidated_flag;
  39 +static unsigned long next_tb;
39 40
40 //#define DEBUG_EXEC 41 //#define DEBUG_EXEC
41 //#define DEBUG_SIGNAL 42 //#define DEBUG_SIGNAL
@@ -273,14 +274,12 @@ static inline TranslationBlock *tb_find_fast(void) @@ -273,14 +274,12 @@ static inline TranslationBlock *tb_find_fast(void)
273 /* as some TB could have been invalidated because 274 /* as some TB could have been invalidated because
274 of memory exceptions while generating the code, we 275 of memory exceptions while generating the code, we
275 must recompute the hash index here */ 276 must recompute the hash index here */
276 - T0 = 0; 277 + next_tb = 0;
277 } 278 }
278 } 279 }
279 return tb; 280 return tb;
280 } 281 }
281 282
282 -#define BREAK_CHAIN T0 = 0  
283 -  
284 /* main execution loop */ 283 /* main execution loop */
285 284
286 int cpu_exec(CPUState *env1) 285 int cpu_exec(CPUState *env1)
@@ -293,7 +292,7 @@ int cpu_exec(CPUState *env1) @@ -293,7 +292,7 @@ int cpu_exec(CPUState *env1)
293 #endif 292 #endif
294 #endif 293 #endif
295 int ret, interrupt_request; 294 int ret, interrupt_request;
296 - long (*gen_func)(void); 295 + unsigned long (*gen_func)(void);
297 TranslationBlock *tb; 296 TranslationBlock *tb;
298 uint8_t *tc_ptr; 297 uint8_t *tc_ptr;
299 298
@@ -414,7 +413,7 @@ int cpu_exec(CPUState *env1) @@ -414,7 +413,7 @@ int cpu_exec(CPUState *env1)
414 } 413 }
415 #endif 414 #endif
416 415
417 - T0 = 0; /* force lookup of first TB */ 416 + next_tb = 0; /* force lookup of first TB */
418 for(;;) { 417 for(;;) {
419 SAVE_GLOBALS(); 418 SAVE_GLOBALS();
420 interrupt_request = env->interrupt_request; 419 interrupt_request = env->interrupt_request;
@@ -443,13 +442,13 @@ int cpu_exec(CPUState *env1) @@ -443,13 +442,13 @@ int cpu_exec(CPUState *env1)
443 svm_check_intercept(SVM_EXIT_SMI); 442 svm_check_intercept(SVM_EXIT_SMI);
444 env->interrupt_request &= ~CPU_INTERRUPT_SMI; 443 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
445 do_smm_enter(); 444 do_smm_enter();
446 - BREAK_CHAIN; 445 + next_tb = 0;
447 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 446 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
448 !(env->hflags & HF_NMI_MASK)) { 447 !(env->hflags & HF_NMI_MASK)) {
449 env->interrupt_request &= ~CPU_INTERRUPT_NMI; 448 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
450 env->hflags |= HF_NMI_MASK; 449 env->hflags |= HF_NMI_MASK;
451 do_interrupt(EXCP02_NMI, 0, 0, 0, 1); 450 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
452 - BREAK_CHAIN; 451 + next_tb = 0;
453 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 452 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
454 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) && 453 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
455 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 454 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
@@ -463,7 +462,7 @@ int cpu_exec(CPUState *env1) @@ -463,7 +462,7 @@ int cpu_exec(CPUState *env1)
463 do_interrupt(intno, 0, 0, 0, 1); 462 do_interrupt(intno, 0, 0, 0, 1);
464 /* ensure that no TB jump will be modified as 463 /* ensure that no TB jump will be modified as
465 the program flow was changed */ 464 the program flow was changed */
466 - BREAK_CHAIN; 465 + next_tb = 0;
467 #if !defined(CONFIG_USER_ONLY) 466 #if !defined(CONFIG_USER_ONLY)
468 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 467 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
469 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 468 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
@@ -477,7 +476,7 @@ int cpu_exec(CPUState *env1) @@ -477,7 +476,7 @@ int cpu_exec(CPUState *env1)
477 do_interrupt(intno, 0, 0, -1, 1); 476 do_interrupt(intno, 0, 0, -1, 1);
478 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), 477 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
479 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK); 478 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
480 - BREAK_CHAIN; 479 + next_tb = 0;
481 #endif 480 #endif
482 } 481 }
483 #elif defined(TARGET_PPC) 482 #elif defined(TARGET_PPC)
@@ -490,7 +489,7 @@ int cpu_exec(CPUState *env1) @@ -490,7 +489,7 @@ int cpu_exec(CPUState *env1)
490 ppc_hw_interrupt(env); 489 ppc_hw_interrupt(env);
491 if (env->pending_interrupts == 0) 490 if (env->pending_interrupts == 0)
492 env->interrupt_request &= ~CPU_INTERRUPT_HARD; 491 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
493 - BREAK_CHAIN; 492 + next_tb = 0;
494 } 493 }
495 #elif defined(TARGET_MIPS) 494 #elif defined(TARGET_MIPS)
496 if ((interrupt_request & CPU_INTERRUPT_HARD) && 495 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -503,7 +502,7 @@ int cpu_exec(CPUState *env1) @@ -503,7 +502,7 @@ int cpu_exec(CPUState *env1)
503 env->exception_index = EXCP_EXT_INTERRUPT; 502 env->exception_index = EXCP_EXT_INTERRUPT;
504 env->error_code = 0; 503 env->error_code = 0;
505 do_interrupt(env); 504 do_interrupt(env);
506 - BREAK_CHAIN; 505 + next_tb = 0;
507 } 506 }
508 #elif defined(TARGET_SPARC) 507 #elif defined(TARGET_SPARC)
509 if ((interrupt_request & CPU_INTERRUPT_HARD) && 508 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -520,7 +519,7 @@ int cpu_exec(CPUState *env1) @@ -520,7 +519,7 @@ int cpu_exec(CPUState *env1)
520 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) 519 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
521 cpu_check_irqs(env); 520 cpu_check_irqs(env);
522 #endif 521 #endif
523 - BREAK_CHAIN; 522 + next_tb = 0;
524 } 523 }
525 } else if (interrupt_request & CPU_INTERRUPT_TIMER) { 524 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
526 //do_interrupt(0, 0, 0, 0, 0); 525 //do_interrupt(0, 0, 0, 0, 0);
@@ -531,7 +530,7 @@ int cpu_exec(CPUState *env1) @@ -531,7 +530,7 @@ int cpu_exec(CPUState *env1)
531 && !(env->uncached_cpsr & CPSR_F)) { 530 && !(env->uncached_cpsr & CPSR_F)) {
532 env->exception_index = EXCP_FIQ; 531 env->exception_index = EXCP_FIQ;
533 do_interrupt(env); 532 do_interrupt(env);
534 - BREAK_CHAIN; 533 + next_tb = 0;
535 } 534 }
536 /* ARMv7-M interrupt return works by loading a magic value 535 /* ARMv7-M interrupt return works by loading a magic value
537 into the PC. On real hardware the load causes the 536 into the PC. On real hardware the load causes the
@@ -547,22 +546,22 @@ int cpu_exec(CPUState *env1) @@ -547,22 +546,22 @@ int cpu_exec(CPUState *env1)
547 || !(env->uncached_cpsr & CPSR_I))) { 546 || !(env->uncached_cpsr & CPSR_I))) {
548 env->exception_index = EXCP_IRQ; 547 env->exception_index = EXCP_IRQ;
549 do_interrupt(env); 548 do_interrupt(env);
550 - BREAK_CHAIN; 549 + next_tb = 0;
551 } 550 }
552 #elif defined(TARGET_SH4) 551 #elif defined(TARGET_SH4)
553 if (interrupt_request & CPU_INTERRUPT_HARD) { 552 if (interrupt_request & CPU_INTERRUPT_HARD) {
554 do_interrupt(env); 553 do_interrupt(env);
555 - BREAK_CHAIN; 554 + next_tb = 0;
556 } 555 }
557 #elif defined(TARGET_ALPHA) 556 #elif defined(TARGET_ALPHA)
558 if (interrupt_request & CPU_INTERRUPT_HARD) { 557 if (interrupt_request & CPU_INTERRUPT_HARD) {
559 do_interrupt(env); 558 do_interrupt(env);
560 - BREAK_CHAIN; 559 + next_tb = 0;
561 } 560 }
562 #elif defined(TARGET_CRIS) 561 #elif defined(TARGET_CRIS)
563 if (interrupt_request & CPU_INTERRUPT_HARD) { 562 if (interrupt_request & CPU_INTERRUPT_HARD) {
564 do_interrupt(env); 563 do_interrupt(env);
565 - BREAK_CHAIN; 564 + next_tb = 0;
566 } 565 }
567 #elif defined(TARGET_M68K) 566 #elif defined(TARGET_M68K)
568 if (interrupt_request & CPU_INTERRUPT_HARD 567 if (interrupt_request & CPU_INTERRUPT_HARD
@@ -575,7 +574,7 @@ int cpu_exec(CPUState *env1) @@ -575,7 +574,7 @@ int cpu_exec(CPUState *env1)
575 first signalled. */ 574 first signalled. */
576 env->exception_index = env->pending_vector; 575 env->exception_index = env->pending_vector;
577 do_interrupt(1); 576 do_interrupt(1);
578 - BREAK_CHAIN; 577 + next_tb = 0;
579 } 578 }
580 #endif 579 #endif
581 /* Don't use the cached interupt_request value, 580 /* Don't use the cached interupt_request value,
@@ -584,7 +583,7 @@ int cpu_exec(CPUState *env1) @@ -584,7 +583,7 @@ int cpu_exec(CPUState *env1)
584 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB; 583 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
585 /* ensure that no TB jump will be modified as 584 /* ensure that no TB jump will be modified as
586 the program flow was changed */ 585 the program flow was changed */
587 - BREAK_CHAIN; 586 + next_tb = 0;
588 } 587 }
589 if (interrupt_request & CPU_INTERRUPT_EXIT) { 588 if (interrupt_request & CPU_INTERRUPT_EXIT) {
590 env->interrupt_request &= ~CPU_INTERRUPT_EXIT; 589 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
@@ -640,13 +639,13 @@ int cpu_exec(CPUState *env1) @@ -640,13 +639,13 @@ int cpu_exec(CPUState *env1)
640 spans two pages, we cannot safely do a direct 639 spans two pages, we cannot safely do a direct
641 jump. */ 640 jump. */
642 { 641 {
643 - if (T0 != 0 && 642 + if (next_tb != 0 &&
644 #if USE_KQEMU 643 #if USE_KQEMU
645 (env->kqemu_enabled != 2) && 644 (env->kqemu_enabled != 2) &&
646 #endif 645 #endif
647 tb->page_addr[1] == -1) { 646 tb->page_addr[1] == -1) {
648 spin_lock(&tb_lock); 647 spin_lock(&tb_lock);
649 - tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb); 648 + tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
650 spin_unlock(&tb_lock); 649 spin_unlock(&tb_lock);
651 } 650 }
652 } 651 }
@@ -667,7 +666,7 @@ int cpu_exec(CPUState *env1) @@ -667,7 +666,7 @@ int cpu_exec(CPUState *env1)
667 asm volatile ("ble 0(%%sr4,%1)\n" 666 asm volatile ("ble 0(%%sr4,%1)\n"
668 "copy %%r31,%%r18\n" 667 "copy %%r31,%%r18\n"
669 "copy %%r28,%0\n" 668 "copy %%r28,%0\n"
670 - : "=r" (T0) 669 + : "=r" (next_tb)
671 : "r" (gen_func) 670 : "r" (gen_func)
672 : "r1", "r2", "r3", "r4", "r5", "r6", "r7", 671 : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
673 "r8", "r9", "r10", "r11", "r12", "r13", 672 "r8", "r9", "r10", "r11", "r12", "r13",
@@ -690,8 +689,29 @@ int cpu_exec(CPUState *env1) @@ -690,8 +689,29 @@ int cpu_exec(CPUState *env1)
690 fp.ip = tc_ptr; 689 fp.ip = tc_ptr;
691 fp.gp = code_gen_buffer + 2 * (1 << 20); 690 fp.gp = code_gen_buffer + 2 * (1 << 20);
692 (*(void (*)(void)) &fp)(); 691 (*(void (*)(void)) &fp)();
  692 +#elif defined(__i386)
  693 + asm volatile ("sub $12, %%esp\n\t"
  694 + "push %%ebp\n\t"
  695 + "call *%1\n\t"
  696 + "pop %%ebp\n\t"
  697 + "add $12, %%esp\n\t"
  698 + : "=a" (next_tb)
  699 + : "a" (gen_func)
  700 + : "ebx", "ecx", "edx", "esi", "edi", "cc",
  701 + "memory");
  702 +#elif defined(__x86_64__)
  703 + asm volatile ("sub $8, %%rsp\n\t"
  704 + "push %%rbp\n\t"
  705 + "call *%1\n\t"
  706 + "pop %%rbp\n\t"
  707 + "add $8, %%rsp\n\t"
  708 + : "=a" (next_tb)
  709 + : "a" (gen_func)
  710 + : "rbx", "rcx", "rdx", "rsi", "rdi", "r8", "r9",
  711 + "r10", "r11", "r12", "r13", "r14", "r15", "cc",
  712 + "memory");
693 #else 713 #else
694 - T0 = gen_func(); 714 + next_tb = gen_func();
695 #endif 715 #endif
696 env->current_tb = NULL; 716 env->current_tb = NULL;
697 /* reset soft MMU for next block (it can currently 717 /* reset soft MMU for next block (it can currently
@@ -700,7 +720,7 @@ int cpu_exec(CPUState *env1) @@ -700,7 +720,7 @@ int cpu_exec(CPUState *env1)
700 if (env->hflags & HF_SOFTMMU_MASK) { 720 if (env->hflags & HF_SOFTMMU_MASK) {
701 env->hflags &= ~HF_SOFTMMU_MASK; 721 env->hflags &= ~HF_SOFTMMU_MASK;
702 /* do not allow linking to another block */ 722 /* do not allow linking to another block */
703 - T0 = 0; 723 + next_tb = 0;
704 } 724 }
705 #endif 725 #endif
706 #if defined(USE_KQEMU) 726 #if defined(USE_KQEMU)