Commit d720b93d0bcfe1beb729245b9ed1e5f071a24bd5

Authored by bellard
1 parent eeab3a55

precise self modifying code support


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@745 c046a42c-6fe2-441c-8c8c-71466251a162
cpu-all.h
@@ -681,8 +681,7 @@ extern uint8_t *phys_ram_dirty; @@ -681,8 +681,7 @@ extern uint8_t *phys_ram_dirty;
681 #define IO_MEM_CODE (3 << IO_MEM_SHIFT) /* used internally, never use directly */ 681 #define IO_MEM_CODE (3 << IO_MEM_SHIFT) /* used internally, never use directly */
682 #define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */ 682 #define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */
683 683
684 -/* NOTE: vaddr is only used internally. Never use it except if you know what you do */  
685 -typedef void CPUWriteMemoryFunc(uint32_t addr, uint32_t value, uint32_t vaddr); 684 +typedef void CPUWriteMemoryFunc(uint32_t addr, uint32_t value);
686 typedef uint32_t CPUReadMemoryFunc(uint32_t addr); 685 typedef uint32_t CPUReadMemoryFunc(uint32_t addr);
687 686
688 void cpu_register_physical_memory(unsigned long start_addr, unsigned long size, 687 void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
@@ -168,7 +168,6 @@ static inline PageDesc *page_find(unsigned int index) @@ -168,7 +168,6 @@ static inline PageDesc *page_find(unsigned int index)
168 168
169 #if !defined(CONFIG_USER_ONLY) 169 #if !defined(CONFIG_USER_ONLY)
170 static void tlb_protect_code(CPUState *env, uint32_t addr); 170 static void tlb_protect_code(CPUState *env, uint32_t addr);
171 -static void tlb_unprotect_code(CPUState *env, uint32_t addr);  
172 static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr, target_ulong vaddr); 171 static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr, target_ulong vaddr);
173 172
174 static inline VirtPageDesc *virt_page_find_alloc(unsigned int index) 173 static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
@@ -533,30 +532,78 @@ static void build_page_bitmap(PageDesc *p) @@ -533,30 +532,78 @@ static void build_page_bitmap(PageDesc *p)
533 } 532 }
534 } 533 }
535 534
  535 +#ifdef TARGET_HAS_PRECISE_SMC
  536 +
  537 +static void tb_gen_code(CPUState *env,
  538 + target_ulong pc, target_ulong cs_base, int flags,
  539 + int cflags)
  540 +{
  541 + TranslationBlock *tb;
  542 + uint8_t *tc_ptr;
  543 + target_ulong phys_pc, phys_page2, virt_page2;
  544 + int code_gen_size;
  545 +
  546 + phys_pc = get_phys_addr_code(env, (unsigned long)pc);
  547 + tb = tb_alloc((unsigned long)pc);
  548 + if (!tb) {
  549 + /* flush must be done */
  550 + tb_flush(env);
  551 + /* cannot fail at this point */
  552 + tb = tb_alloc((unsigned long)pc);
  553 + }
  554 + tc_ptr = code_gen_ptr;
  555 + tb->tc_ptr = tc_ptr;
  556 + tb->cs_base = cs_base;
  557 + tb->flags = flags;
  558 + tb->cflags = cflags;
  559 + cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
  560 + code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
  561 +
  562 + /* check next page if needed */
  563 + virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
  564 + phys_page2 = -1;
  565 + if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
  566 + phys_page2 = get_phys_addr_code(env, virt_page2);
  567 + }
  568 + tb_link_phys(tb, phys_pc, phys_page2);
  569 +}
  570 +#endif
  571 +
536 /* invalidate all TBs which intersect with the target physical page 572 /* invalidate all TBs which intersect with the target physical page
537 starting in range [start;end[. NOTE: start and end must refer to 573 starting in range [start;end[. NOTE: start and end must refer to
538 - the same physical page. 'vaddr' is a virtual address referencing  
539 - the physical page of code. It is only used an a hint if there is no  
540 - code left. */  
541 -static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,  
542 - target_ulong vaddr)  
543 -{  
544 - int n; 574 + the same physical page. 'is_cpu_write_access' should be true if called
  575 + from a real cpu write access: the virtual CPU will exit the current
  576 + TB if code is modified inside this TB. */
  577 +void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
  578 + int is_cpu_write_access)
  579 +{
  580 + int n, current_tb_modified, current_tb_not_found, current_flags;
  581 +#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
  582 + CPUState *env = cpu_single_env;
  583 +#endif
545 PageDesc *p; 584 PageDesc *p;
546 - TranslationBlock *tb, *tb_next; 585 + TranslationBlock *tb, *tb_next, *current_tb;
547 target_ulong tb_start, tb_end; 586 target_ulong tb_start, tb_end;
  587 + target_ulong current_pc, current_cs_base;
548 588
549 p = page_find(start >> TARGET_PAGE_BITS); 589 p = page_find(start >> TARGET_PAGE_BITS);
550 if (!p) 590 if (!p)
551 return; 591 return;
552 if (!p->code_bitmap && 592 if (!p->code_bitmap &&
553 - ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { 593 + ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
  594 + is_cpu_write_access) {
554 /* build code bitmap */ 595 /* build code bitmap */
555 build_page_bitmap(p); 596 build_page_bitmap(p);
556 } 597 }
557 598
558 /* we remove all the TBs in the range [start, end[ */ 599 /* we remove all the TBs in the range [start, end[ */
559 /* XXX: see if in some cases it could be faster to invalidate all the code */ 600 /* XXX: see if in some cases it could be faster to invalidate all the code */
  601 + current_tb_not_found = is_cpu_write_access;
  602 + current_tb_modified = 0;
  603 + current_tb = NULL; /* avoid warning */
  604 + current_pc = 0; /* avoid warning */
  605 + current_cs_base = 0; /* avoid warning */
  606 + current_flags = 0; /* avoid warning */
560 tb = p->first_tb; 607 tb = p->first_tb;
561 while (tb != NULL) { 608 while (tb != NULL) {
562 n = (long)tb & 3; 609 n = (long)tb & 3;
@@ -573,6 +620,36 @@ static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, @@ -573,6 +620,36 @@ static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
573 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 620 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
574 } 621 }
575 if (!(tb_end <= start || tb_start >= end)) { 622 if (!(tb_end <= start || tb_start >= end)) {
  623 +#ifdef TARGET_HAS_PRECISE_SMC
  624 + if (current_tb_not_found) {
  625 + current_tb_not_found = 0;
  626 + current_tb = NULL;
  627 + if (env->mem_write_pc) {
  628 + /* now we have a real cpu fault */
  629 + current_tb = tb_find_pc(env->mem_write_pc);
  630 + }
  631 + }
  632 + if (current_tb == tb &&
  633 + !(current_tb->cflags & CF_SINGLE_INSN)) {
  634 + /* If we are modifying the current TB, we must stop
  635 + its execution. We could be more precise by checking
  636 + that the modification is after the current PC, but it
  637 + would require a specialized function to partially
  638 + restore the CPU state */
  639 +
  640 + current_tb_modified = 1;
  641 + cpu_restore_state(current_tb, env,
  642 + env->mem_write_pc, NULL);
  643 +#if defined(TARGET_I386)
  644 + current_flags = env->hflags;
  645 + current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
  646 + current_cs_base = (target_ulong)env->segs[R_CS].base;
  647 + current_pc = current_cs_base + env->eip;
  648 +#else
  649 +#error unsupported CPU
  650 +#endif
  651 + }
  652 +#endif /* TARGET_HAS_PRECISE_SMC */
576 tb_phys_invalidate(tb, -1); 653 tb_phys_invalidate(tb, -1);
577 } 654 }
578 tb = tb_next; 655 tb = tb_next;
@@ -581,13 +658,25 @@ static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, @@ -581,13 +658,25 @@ static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
581 /* if no code remaining, no need to continue to use slow writes */ 658 /* if no code remaining, no need to continue to use slow writes */
582 if (!p->first_tb) { 659 if (!p->first_tb) {
583 invalidate_page_bitmap(p); 660 invalidate_page_bitmap(p);
584 - tlb_unprotect_code_phys(cpu_single_env, start, vaddr); 661 + if (is_cpu_write_access) {
  662 + tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
  663 + }
  664 + }
  665 +#endif
  666 +#ifdef TARGET_HAS_PRECISE_SMC
  667 + if (current_tb_modified) {
  668 + /* we generate a block containing just the instruction
  669 + modifying the memory. It will ensure that it cannot modify
  670 + itself */
  671 + tb_gen_code(env, current_pc, current_cs_base, current_flags,
  672 + CF_SINGLE_INSN);
  673 + cpu_resume_from_signal(env, NULL);
585 } 674 }
586 #endif 675 #endif
587 } 676 }
588 677
589 /* len must be <= 8 and start must be a multiple of len */ 678 /* len must be <= 8 and start must be a multiple of len */
590 -static inline void tb_invalidate_phys_page_fast(target_ulong start, int len, target_ulong vaddr) 679 +static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
591 { 680 {
592 PageDesc *p; 681 PageDesc *p;
593 int offset, b; 682 int offset, b;
@@ -608,77 +697,75 @@ static inline void tb_invalidate_phys_page_fast(target_ulong start, int len, tar @@ -608,77 +697,75 @@ static inline void tb_invalidate_phys_page_fast(target_ulong start, int len, tar
608 goto do_invalidate; 697 goto do_invalidate;
609 } else { 698 } else {
610 do_invalidate: 699 do_invalidate:
611 - tb_invalidate_phys_page_range(start, start + len, vaddr); 700 + tb_invalidate_phys_page_range(start, start + len, 1);
612 } 701 }
613 } 702 }
614 703
615 -/* invalidate all TBs which intersect with the target virtual page  
616 - starting in range [start;end[. This function is usually used when  
617 - the target processor flushes its I-cache. NOTE: start and end must  
618 - refer to the same physical page */  
619 -void tb_invalidate_page_range(target_ulong start, target_ulong end)  
620 -{  
621 - int n;  
622 - PageDesc *p;  
623 - TranslationBlock *tb, *tb_next;  
624 - target_ulong pc;  
625 - target_ulong phys_start;  
626 -  
627 -#if !defined(CONFIG_USER_ONLY)  
628 - {  
629 - VirtPageDesc *vp;  
630 - vp = virt_page_find(start >> TARGET_PAGE_BITS);  
631 - if (!vp)  
632 - return;  
633 - if (vp->valid_tag != virt_valid_tag)  
634 - return;  
635 - phys_start = vp->phys_addr + (start & ~TARGET_PAGE_MASK);  
636 - }  
637 -#else  
638 - phys_start = start;  
639 -#endif  
640 - p = page_find(phys_start >> TARGET_PAGE_BITS);  
641 - if (!p)  
642 - return;  
643 - /* we remove all the TBs in the range [start, end[ */  
644 - /* XXX: see if in some cases it could be faster to invalidate all the code */  
645 - tb = p->first_tb;  
646 - while (tb != NULL) {  
647 - n = (long)tb & 3;  
648 - tb = (TranslationBlock *)((long)tb & ~3);  
649 - tb_next = tb->page_next[n];  
650 - pc = tb->pc;  
651 - if (!((pc + tb->size) <= start || pc >= end)) {  
652 - tb_phys_invalidate(tb, -1);  
653 - }  
654 - tb = tb_next;  
655 - }  
656 -#if !defined(CONFIG_USER_ONLY)  
657 - /* if no code remaining, no need to continue to use slow writes */  
658 - if (!p->first_tb)  
659 - tlb_unprotect_code(cpu_single_env, start);  
660 -#endif  
661 -}  
662 -  
663 #if !defined(CONFIG_SOFTMMU) 704 #if !defined(CONFIG_SOFTMMU)
664 -static void tb_invalidate_phys_page(target_ulong addr) 705 +static void tb_invalidate_phys_page(target_ulong addr,
  706 + unsigned long pc, void *puc)
665 { 707 {
666 - int n; 708 + int n, current_flags, current_tb_modified;
  709 + target_ulong current_pc, current_cs_base;
667 PageDesc *p; 710 PageDesc *p;
668 - TranslationBlock *tb; 711 + TranslationBlock *tb, *current_tb;
  712 +#ifdef TARGET_HAS_PRECISE_SMC
  713 + CPUState *env = cpu_single_env;
  714 +#endif
669 715
670 addr &= TARGET_PAGE_MASK; 716 addr &= TARGET_PAGE_MASK;
671 p = page_find(addr >> TARGET_PAGE_BITS); 717 p = page_find(addr >> TARGET_PAGE_BITS);
672 if (!p) 718 if (!p)
673 return; 719 return;
674 tb = p->first_tb; 720 tb = p->first_tb;
  721 + current_tb_modified = 0;
  722 + current_tb = NULL;
  723 + current_pc = 0; /* avoid warning */
  724 + current_cs_base = 0; /* avoid warning */
  725 + current_flags = 0; /* avoid warning */
  726 +#ifdef TARGET_HAS_PRECISE_SMC
  727 + if (tb && pc != 0) {
  728 + current_tb = tb_find_pc(pc);
  729 + }
  730 +#endif
675 while (tb != NULL) { 731 while (tb != NULL) {
676 n = (long)tb & 3; 732 n = (long)tb & 3;
677 tb = (TranslationBlock *)((long)tb & ~3); 733 tb = (TranslationBlock *)((long)tb & ~3);
  734 +#ifdef TARGET_HAS_PRECISE_SMC
  735 + if (current_tb == tb &&
  736 + !(current_tb->cflags & CF_SINGLE_INSN)) {
  737 + /* If we are modifying the current TB, we must stop
  738 + its execution. We could be more precise by checking
  739 + that the modification is after the current PC, but it
  740 + would require a specialized function to partially
  741 + restore the CPU state */
  742 +
  743 + current_tb_modified = 1;
  744 + cpu_restore_state(current_tb, env, pc, puc);
  745 +#if defined(TARGET_I386)
  746 + current_flags = env->hflags;
  747 + current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
  748 + current_cs_base = (target_ulong)env->segs[R_CS].base;
  749 + current_pc = current_cs_base + env->eip;
  750 +#else
  751 +#error unsupported CPU
  752 +#endif
  753 + }
  754 +#endif /* TARGET_HAS_PRECISE_SMC */
678 tb_phys_invalidate(tb, addr); 755 tb_phys_invalidate(tb, addr);
679 tb = tb->page_next[n]; 756 tb = tb->page_next[n];
680 } 757 }
681 p->first_tb = NULL; 758 p->first_tb = NULL;
  759 +#ifdef TARGET_HAS_PRECISE_SMC
  760 + if (current_tb_modified) {
  761 + /* we generate a block containing just the instruction
  762 + modifying the memory. It will ensure that it cannot modify
  763 + itself */
  764 + tb_gen_code(env, current_pc, current_cs_base, current_flags,
  765 + CF_SINGLE_INSN);
  766 + cpu_resume_from_signal(env, puc);
  767 + }
  768 +#endif
682 } 769 }
683 #endif 770 #endif
684 771
@@ -696,6 +783,8 @@ static inline void tb_alloc_page(TranslationBlock *tb, @@ -696,6 +783,8 @@ static inline void tb_alloc_page(TranslationBlock *tb,
696 p->first_tb = (TranslationBlock *)((long)tb | n); 783 p->first_tb = (TranslationBlock *)((long)tb | n);
697 invalidate_page_bitmap(p); 784 invalidate_page_bitmap(p);
698 785
  786 +#ifdef TARGET_HAS_SMC
  787 +
699 #if defined(CONFIG_USER_ONLY) 788 #if defined(CONFIG_USER_ONLY)
700 if (p->flags & PAGE_WRITE) { 789 if (p->flags & PAGE_WRITE) {
701 unsigned long host_start, host_end, addr; 790 unsigned long host_start, host_end, addr;
@@ -727,6 +816,8 @@ static inline void tb_alloc_page(TranslationBlock *tb, @@ -727,6 +816,8 @@ static inline void tb_alloc_page(TranslationBlock *tb,
727 tlb_protect_code(cpu_single_env, virt_addr); 816 tlb_protect_code(cpu_single_env, virt_addr);
728 } 817 }
729 #endif 818 #endif
  819 +
  820 +#endif /* TARGET_HAS_SMC */
730 } 821 }
731 822
732 /* Allocate a new translation block. Flush the translation buffer if 823 /* Allocate a new translation block. Flush the translation buffer if
@@ -910,13 +1001,21 @@ static void tb_reset_jump_recursive(TranslationBlock *tb) @@ -910,13 +1001,21 @@ static void tb_reset_jump_recursive(TranslationBlock *tb)
910 tb_reset_jump_recursive2(tb, 1); 1001 tb_reset_jump_recursive2(tb, 1);
911 } 1002 }
912 1003
  1004 +static void breakpoint_invalidate(CPUState *env, target_ulong pc)
  1005 +{
  1006 + target_ulong phys_addr;
  1007 +
  1008 + phys_addr = cpu_get_phys_page_debug(env, pc);
  1009 + tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
  1010 +}
  1011 +
913 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a 1012 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
914 breakpoint is reached */ 1013 breakpoint is reached */
915 int cpu_breakpoint_insert(CPUState *env, uint32_t pc) 1014 int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
916 { 1015 {
917 #if defined(TARGET_I386) || defined(TARGET_PPC) 1016 #if defined(TARGET_I386) || defined(TARGET_PPC)
918 int i; 1017 int i;
919 - 1018 +
920 for(i = 0; i < env->nb_breakpoints; i++) { 1019 for(i = 0; i < env->nb_breakpoints; i++) {
921 if (env->breakpoints[i] == pc) 1020 if (env->breakpoints[i] == pc)
922 return 0; 1021 return 0;
@@ -925,7 +1024,8 @@ int cpu_breakpoint_insert(CPUState *env, uint32_t pc) @@ -925,7 +1024,8 @@ int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
925 if (env->nb_breakpoints >= MAX_BREAKPOINTS) 1024 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
926 return -1; 1025 return -1;
927 env->breakpoints[env->nb_breakpoints++] = pc; 1026 env->breakpoints[env->nb_breakpoints++] = pc;
928 - tb_invalidate_page_range(pc, pc + 1); 1027 +
  1028 + breakpoint_invalidate(env, pc);
929 return 0; 1029 return 0;
930 #else 1030 #else
931 return -1; 1031 return -1;
@@ -946,7 +1046,8 @@ int cpu_breakpoint_remove(CPUState *env, uint32_t pc) @@ -946,7 +1046,8 @@ int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
946 memmove(&env->breakpoints[i], &env->breakpoints[i + 1], 1046 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
947 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0])); 1047 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
948 env->nb_breakpoints--; 1048 env->nb_breakpoints--;
949 - tb_invalidate_page_range(pc, pc + 1); 1049 +
  1050 + breakpoint_invalidate(env, pc);
950 return 0; 1051 return 0;
951 #else 1052 #else
952 return -1; 1053 return -1;
@@ -1197,27 +1298,6 @@ static void tlb_protect_code(CPUState *env, uint32_t addr) @@ -1197,27 +1298,6 @@ static void tlb_protect_code(CPUState *env, uint32_t addr)
1197 #endif 1298 #endif
1198 } 1299 }
1199 1300
1200 -static inline void tlb_unprotect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)  
1201 -{  
1202 - if (addr == (tlb_entry->address &  
1203 - (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&  
1204 - (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE) {  
1205 - tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;  
1206 - }  
1207 -}  
1208 -  
1209 -/* update the TLB so that writes in virtual page 'addr' are no longer  
1210 - tested self modifying code */  
1211 -static void tlb_unprotect_code(CPUState *env, uint32_t addr)  
1212 -{  
1213 - int i;  
1214 -  
1215 - addr &= TARGET_PAGE_MASK;  
1216 - i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);  
1217 - tlb_unprotect_code1(&env->tlb_write[0][i], addr);  
1218 - tlb_unprotect_code1(&env->tlb_write[1][i], addr);  
1219 -}  
1220 -  
1221 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, 1301 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1222 uint32_t phys_addr) 1302 uint32_t phys_addr)
1223 { 1303 {
@@ -1387,12 +1467,18 @@ int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot, @@ -1387,12 +1467,18 @@ int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1387 /* ROM: access is ignored (same as unassigned) */ 1467 /* ROM: access is ignored (same as unassigned) */
1388 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM; 1468 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1389 env->tlb_write[is_user][index].addend = addend; 1469 env->tlb_write[is_user][index].addend = addend;
1390 - } else if (first_tb) { 1470 + } else
  1471 + /* XXX: the PowerPC code seems not ready to handle
  1472 + self modifying code with DCBI */
  1473 +#if defined(TARGET_HAS_SMC) || 1
  1474 + if (first_tb) {
1391 /* if code is present, we use a specific memory 1475 /* if code is present, we use a specific memory
1392 handler. It works only for physical memory access */ 1476 handler. It works only for physical memory access */
1393 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE; 1477 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1394 env->tlb_write[is_user][index].addend = addend; 1478 env->tlb_write[is_user][index].addend = addend;
1395 - } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 1479 + } else
  1480 +#endif
  1481 + if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1396 !cpu_physical_memory_is_dirty(pd)) { 1482 !cpu_physical_memory_is_dirty(pd)) {
1397 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY; 1483 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1398 env->tlb_write[is_user][index].addend = addend; 1484 env->tlb_write[is_user][index].addend = addend;
@@ -1420,7 +1506,9 @@ int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot, @@ -1420,7 +1506,9 @@ int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1420 } else { 1506 } else {
1421 if (prot & PROT_WRITE) { 1507 if (prot & PROT_WRITE) {
1422 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 1508 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
  1509 +#if defined(TARGET_HAS_SMC) || 1
1423 first_tb || 1510 first_tb ||
  1511 +#endif
1424 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 1512 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1425 !cpu_physical_memory_is_dirty(pd))) { 1513 !cpu_physical_memory_is_dirty(pd))) {
1426 /* ROM: we do as if code was inside */ 1514 /* ROM: we do as if code was inside */
@@ -1450,7 +1538,7 @@ int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot, @@ -1450,7 +1538,7 @@ int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1450 1538
1451 /* called from signal handler: invalidate the code and unprotect the 1539 /* called from signal handler: invalidate the code and unprotect the
1452 page. Return TRUE if the fault was succesfully handled. */ 1540 page. Return TRUE if the fault was succesfully handled. */
1453 -int page_unprotect(unsigned long addr) 1541 +int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1454 { 1542 {
1455 #if !defined(CONFIG_SOFTMMU) 1543 #if !defined(CONFIG_SOFTMMU)
1456 VirtPageDesc *vp; 1544 VirtPageDesc *vp;
@@ -1476,13 +1564,13 @@ int page_unprotect(unsigned long addr) @@ -1476,13 +1564,13 @@ int page_unprotect(unsigned long addr)
1476 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 1564 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1477 addr, vp->phys_addr, vp->prot); 1565 addr, vp->phys_addr, vp->prot);
1478 #endif 1566 #endif
1479 - /* set the dirty bit */  
1480 - phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;  
1481 - /* flush the code inside */  
1482 - tb_invalidate_phys_page(vp->phys_addr);  
1483 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0) 1567 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1484 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n", 1568 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1485 (unsigned long)addr, vp->prot); 1569 (unsigned long)addr, vp->prot);
  1570 + /* set the dirty bit */
  1571 + phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
  1572 + /* flush the code inside */
  1573 + tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1486 return 1; 1574 return 1;
1487 #else 1575 #else
1488 return 0; 1576 return 0;
@@ -1582,7 +1670,7 @@ void page_set_flags(unsigned long start, unsigned long end, int flags) @@ -1582,7 +1670,7 @@ void page_set_flags(unsigned long start, unsigned long end, int flags)
1582 if (!(p->flags & PAGE_WRITE) && 1670 if (!(p->flags & PAGE_WRITE) &&
1583 (flags & PAGE_WRITE) && 1671 (flags & PAGE_WRITE) &&
1584 p->first_tb) { 1672 p->first_tb) {
1585 - tb_invalidate_phys_page(addr); 1673 + tb_invalidate_phys_page(addr, 0, NULL);
1586 } 1674 }
1587 p->flags = flags; 1675 p->flags = flags;
1588 } 1676 }
@@ -1591,7 +1679,7 @@ void page_set_flags(unsigned long start, unsigned long end, int flags) @@ -1591,7 +1679,7 @@ void page_set_flags(unsigned long start, unsigned long end, int flags)
1591 1679
1592 /* called from signal handler: invalidate the code and unprotect the 1680 /* called from signal handler: invalidate the code and unprotect the
1593 page. Return TRUE if the fault was succesfully handled. */ 1681 page. Return TRUE if the fault was succesfully handled. */
1594 -int page_unprotect(unsigned long address) 1682 +int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1595 { 1683 {
1596 unsigned int page_index, prot, pindex; 1684 unsigned int page_index, prot, pindex;
1597 PageDesc *p, *p1; 1685 PageDesc *p, *p1;
@@ -1619,7 +1707,7 @@ int page_unprotect(unsigned long address) @@ -1619,7 +1707,7 @@ int page_unprotect(unsigned long address)
1619 p1[pindex].flags |= PAGE_WRITE; 1707 p1[pindex].flags |= PAGE_WRITE;
1620 /* and since the content will be modified, we must invalidate 1708 /* and since the content will be modified, we must invalidate
1621 the corresponding translated code. */ 1709 the corresponding translated code. */
1622 - tb_invalidate_phys_page(address); 1710 + tb_invalidate_phys_page(address, pc, puc);
1623 #ifdef DEBUG_TB_CHECK 1711 #ifdef DEBUG_TB_CHECK
1624 tb_invalidate_check(address); 1712 tb_invalidate_check(address);
1625 #endif 1713 #endif
@@ -1639,14 +1727,13 @@ void page_unprotect_range(uint8_t *data, unsigned long data_size) @@ -1639,14 +1727,13 @@ void page_unprotect_range(uint8_t *data, unsigned long data_size)
1639 start &= TARGET_PAGE_MASK; 1727 start &= TARGET_PAGE_MASK;
1640 end = TARGET_PAGE_ALIGN(end); 1728 end = TARGET_PAGE_ALIGN(end);
1641 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { 1729 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1642 - page_unprotect(addr); 1730 + page_unprotect(addr, 0, NULL);
1643 } 1731 }
1644 } 1732 }
1645 1733
1646 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr) 1734 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1647 { 1735 {
1648 } 1736 }
1649 -  
1650 #endif /* defined(CONFIG_USER_ONLY) */ 1737 #endif /* defined(CONFIG_USER_ONLY) */
1651 1738
1652 /* register physical memory. 'size' must be a multiple of the target 1739 /* register physical memory. 'size' must be a multiple of the target
@@ -1672,7 +1759,7 @@ static uint32_t unassigned_mem_readb(uint32_t addr) @@ -1672,7 +1759,7 @@ static uint32_t unassigned_mem_readb(uint32_t addr)
1672 return 0; 1759 return 0;
1673 } 1760 }
1674 1761
1675 -static void unassigned_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr) 1762 +static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
1676 { 1763 {
1677 } 1764 }
1678 1765
@@ -1691,37 +1778,37 @@ static CPUWriteMemoryFunc *unassigned_mem_write[3] = { @@ -1691,37 +1778,37 @@ static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1691 /* self modifying code support in soft mmu mode : writing to a page 1778 /* self modifying code support in soft mmu mode : writing to a page
1692 containing code comes to these functions */ 1779 containing code comes to these functions */
1693 1780
1694 -static void code_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr) 1781 +static void code_mem_writeb(uint32_t addr, uint32_t val)
1695 { 1782 {
1696 unsigned long phys_addr; 1783 unsigned long phys_addr;
1697 1784
1698 phys_addr = addr - (long)phys_ram_base; 1785 phys_addr = addr - (long)phys_ram_base;
1699 #if !defined(CONFIG_USER_ONLY) 1786 #if !defined(CONFIG_USER_ONLY)
1700 - tb_invalidate_phys_page_fast(phys_addr, 1, vaddr); 1787 + tb_invalidate_phys_page_fast(phys_addr, 1);
1701 #endif 1788 #endif
1702 stb_raw((uint8_t *)addr, val); 1789 stb_raw((uint8_t *)addr, val);
1703 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; 1790 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1704 } 1791 }
1705 1792
1706 -static void code_mem_writew(uint32_t addr, uint32_t val, uint32_t vaddr) 1793 +static void code_mem_writew(uint32_t addr, uint32_t val)
1707 { 1794 {
1708 unsigned long phys_addr; 1795 unsigned long phys_addr;
1709 1796
1710 phys_addr = addr - (long)phys_ram_base; 1797 phys_addr = addr - (long)phys_ram_base;
1711 #if !defined(CONFIG_USER_ONLY) 1798 #if !defined(CONFIG_USER_ONLY)
1712 - tb_invalidate_phys_page_fast(phys_addr, 2, vaddr); 1799 + tb_invalidate_phys_page_fast(phys_addr, 2);
1713 #endif 1800 #endif
1714 stw_raw((uint8_t *)addr, val); 1801 stw_raw((uint8_t *)addr, val);
1715 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; 1802 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1716 } 1803 }
1717 1804
1718 -static void code_mem_writel(uint32_t addr, uint32_t val, uint32_t vaddr) 1805 +static void code_mem_writel(uint32_t addr, uint32_t val)
1719 { 1806 {
1720 unsigned long phys_addr; 1807 unsigned long phys_addr;
1721 1808
1722 phys_addr = addr - (long)phys_ram_base; 1809 phys_addr = addr - (long)phys_ram_base;
1723 #if !defined(CONFIG_USER_ONLY) 1810 #if !defined(CONFIG_USER_ONLY)
1724 - tb_invalidate_phys_page_fast(phys_addr, 4, vaddr); 1811 + tb_invalidate_phys_page_fast(phys_addr, 4);
1725 #endif 1812 #endif
1726 stl_raw((uint8_t *)addr, val); 1813 stl_raw((uint8_t *)addr, val);
1727 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; 1814 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
@@ -1739,22 +1826,22 @@ static CPUWriteMemoryFunc *code_mem_write[3] = { @@ -1739,22 +1826,22 @@ static CPUWriteMemoryFunc *code_mem_write[3] = {
1739 code_mem_writel, 1826 code_mem_writel,
1740 }; 1827 };
1741 1828
1742 -static void notdirty_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr) 1829 +static void notdirty_mem_writeb(uint32_t addr, uint32_t val)
1743 { 1830 {
1744 stb_raw((uint8_t *)addr, val); 1831 stb_raw((uint8_t *)addr, val);
1745 - tlb_set_dirty(addr, vaddr); 1832 + tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1746 } 1833 }
1747 1834
1748 -static void notdirty_mem_writew(uint32_t addr, uint32_t val, uint32_t vaddr) 1835 +static void notdirty_mem_writew(uint32_t addr, uint32_t val)
1749 { 1836 {
1750 stw_raw((uint8_t *)addr, val); 1837 stw_raw((uint8_t *)addr, val);
1751 - tlb_set_dirty(addr, vaddr); 1838 + tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1752 } 1839 }
1753 1840
1754 -static void notdirty_mem_writel(uint32_t addr, uint32_t val, uint32_t vaddr) 1841 +static void notdirty_mem_writel(uint32_t addr, uint32_t val)
1755 { 1842 {
1756 stl_raw((uint8_t *)addr, val); 1843 stl_raw((uint8_t *)addr, val);
1757 - tlb_set_dirty(addr, vaddr); 1844 + tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1758 } 1845 }
1759 1846
1760 static CPUWriteMemoryFunc *notdirty_mem_write[3] = { 1847 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
@@ -1861,17 +1948,17 @@ void cpu_physical_memory_rw(target_ulong addr, uint8_t *buf, @@ -1861,17 +1948,17 @@ void cpu_physical_memory_rw(target_ulong addr, uint8_t *buf,
1861 if (l >= 4 && ((addr & 3) == 0)) { 1948 if (l >= 4 && ((addr & 3) == 0)) {
1862 /* 32 bit read access */ 1949 /* 32 bit read access */
1863 val = ldl_raw(buf); 1950 val = ldl_raw(buf);
1864 - io_mem_write[io_index][2](addr, val, 0); 1951 + io_mem_write[io_index][2](addr, val);
1865 l = 4; 1952 l = 4;
1866 } else if (l >= 2 && ((addr & 1) == 0)) { 1953 } else if (l >= 2 && ((addr & 1) == 0)) {
1867 /* 16 bit read access */ 1954 /* 16 bit read access */
1868 val = lduw_raw(buf); 1955 val = lduw_raw(buf);
1869 - io_mem_write[io_index][1](addr, val, 0); 1956 + io_mem_write[io_index][1](addr, val);
1870 l = 2; 1957 l = 2;
1871 } else { 1958 } else {
1872 /* 8 bit access */ 1959 /* 8 bit access */
1873 val = ldub_raw(buf); 1960 val = ldub_raw(buf);
1874 - io_mem_write[io_index][0](addr, val, 0); 1961 + io_mem_write[io_index][0](addr, val);
1875 l = 1; 1962 l = 1;
1876 } 1963 }
1877 } else { 1964 } else {
softmmu_template.h
@@ -70,20 +70,23 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr, @@ -70,20 +70,23 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr,
70 70
71 static inline void glue(io_write, SUFFIX)(unsigned long physaddr, 71 static inline void glue(io_write, SUFFIX)(unsigned long physaddr,
72 DATA_TYPE val, 72 DATA_TYPE val,
73 - unsigned long tlb_addr) 73 + unsigned long tlb_addr,
  74 + void *retaddr)
74 { 75 {
75 int index; 76 int index;
76 77
77 index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 78 index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  79 + env->mem_write_vaddr = tlb_addr;
  80 + env->mem_write_pc = (unsigned long)retaddr;
78 #if SHIFT <= 2 81 #if SHIFT <= 2
79 - io_mem_write[index][SHIFT](physaddr, val, tlb_addr); 82 + io_mem_write[index][SHIFT](physaddr, val);
80 #else 83 #else
81 #ifdef TARGET_WORDS_BIGENDIAN 84 #ifdef TARGET_WORDS_BIGENDIAN
82 - io_mem_write[index][2](physaddr, val >> 32, tlb_addr);  
83 - io_mem_write[index][2](physaddr + 4, val, tlb_addr); 85 + io_mem_write[index][2](physaddr, val >> 32);
  86 + io_mem_write[index][2](physaddr + 4, val);
84 #else 87 #else
85 - io_mem_write[index][2](physaddr, val, tlb_addr);  
86 - io_mem_write[index][2](physaddr + 4, val >> 32, tlb_addr); 88 + io_mem_write[index][2](physaddr, val);
  89 + io_mem_write[index][2](physaddr + 4, val >> 32);
87 #endif 90 #endif
88 #endif /* SHIFT > 2 */ 91 #endif /* SHIFT > 2 */
89 } 92 }
@@ -193,7 +196,8 @@ void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr, @@ -193,7 +196,8 @@ void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr,
193 /* IO access */ 196 /* IO access */
194 if ((addr & (DATA_SIZE - 1)) != 0) 197 if ((addr & (DATA_SIZE - 1)) != 0)
195 goto do_unaligned_access; 198 goto do_unaligned_access;
196 - glue(io_write, SUFFIX)(physaddr, val, tlb_addr); 199 + retaddr = GETPC();
  200 + glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
197 } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 201 } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
198 do_unaligned_access: 202 do_unaligned_access:
199 retaddr = GETPC(); 203 retaddr = GETPC();
@@ -229,7 +233,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr, @@ -229,7 +233,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr,
229 /* IO access */ 233 /* IO access */
230 if ((addr & (DATA_SIZE - 1)) != 0) 234 if ((addr & (DATA_SIZE - 1)) != 0)
231 goto do_unaligned_access; 235 goto do_unaligned_access;
232 - glue(io_write, SUFFIX)(physaddr, val, tlb_addr); 236 + glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
233 } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 237 } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
234 do_unaligned_access: 238 do_unaligned_access:
235 /* XXX: not efficient, but simple */ 239 /* XXX: not efficient, but simple */
target-arm/cpu.h
@@ -43,6 +43,13 @@ typedef struct CPUARMState { @@ -43,6 +43,13 @@ typedef struct CPUARMState {
43 struct TranslationBlock *current_tb; 43 struct TranslationBlock *current_tb;
44 int user_mode_only; 44 int user_mode_only;
45 45
  46 + /* in order to avoid passing too many arguments to the memory
  47 + write helpers, we store some rarely used information in the CPU
  48 + context) */
  49 + unsigned long mem_write_pc; /* host pc at which the memory was
  50 + written */
  51 + unsigned long mem_write_vaddr; /* target virtual addr at which the
  52 + memory was written */
46 /* user data */ 53 /* user data */
47 void *opaque; 54 void *opaque;
48 } CPUARMState; 55 } CPUARMState;
target-i386/cpu.h
@@ -22,6 +22,12 @@ @@ -22,6 +22,12 @@
22 22
23 #define TARGET_LONG_BITS 32 23 #define TARGET_LONG_BITS 32
24 24
  25 +/* target supports implicit self modifying code */
  26 +#define TARGET_HAS_SMC
  27 +/* support for self modifying code even if the modified instruction is
  28 + close to the modifying instruction */
  29 +#define TARGET_HAS_PRECISE_SMC
  30 +
25 #include "cpu-defs.h" 31 #include "cpu-defs.h"
26 32
27 #if defined(__i386__) && !defined(CONFIG_SOFTMMU) 33 #if defined(__i386__) && !defined(CONFIG_SOFTMMU)
@@ -331,8 +337,16 @@ typedef struct CPUX86State { @@ -331,8 +337,16 @@ typedef struct CPUX86State {
331 int interrupt_request; 337 int interrupt_request;
332 int user_mode_only; /* user mode only simulation */ 338 int user_mode_only; /* user mode only simulation */
333 339
334 - /* soft mmu support */  
335 uint32_t a20_mask; 340 uint32_t a20_mask;
  341 +
  342 + /* soft mmu support */
  343 + /* in order to avoid passing too many arguments to the memory
  344 + write helpers, we store some rarely used information in the CPU
  345 + context) */
  346 + unsigned long mem_write_pc; /* host pc at which the memory was
  347 + written */
  348 + unsigned long mem_write_vaddr; /* target virtual addr at which the
  349 + memory was written */
336 /* 0 = kernel, 1 = user */ 350 /* 0 = kernel, 1 = user */
337 CPUTLBEntry tlb_read[2][CPU_TLB_SIZE]; 351 CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
338 CPUTLBEntry tlb_write[2][CPU_TLB_SIZE]; 352 CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
@@ -358,7 +372,7 @@ int cpu_x86_inl(CPUX86State *env, int addr); @@ -358,7 +372,7 @@ int cpu_x86_inl(CPUX86State *env, int addr);
358 CPUX86State *cpu_x86_init(void); 372 CPUX86State *cpu_x86_init(void);
359 int cpu_x86_exec(CPUX86State *s); 373 int cpu_x86_exec(CPUX86State *s);
360 void cpu_x86_close(CPUX86State *s); 374 void cpu_x86_close(CPUX86State *s);
361 -int cpu_x86_get_pic_interrupt(CPUX86State *s); 375 +int cpu_get_pic_interrupt(CPUX86State *s);
362 376
363 /* this function must always be used to load data in the segment 377 /* this function must always be used to load data in the segment
364 cache: it synchronizes the hflags with the segment cache values */ 378 cache: it synchronizes the hflags with the segment cache values */
target-i386/translate-copy.c
@@ -1189,6 +1189,8 @@ static inline int gen_intermediate_code_internal(CPUState *env, @@ -1189,6 +1189,8 @@ static inline int gen_intermediate_code_internal(CPUState *env,
1189 return -1; 1189 return -1;
1190 if (!(flags & HF_SS32_MASK)) 1190 if (!(flags & HF_SS32_MASK))
1191 return -1; 1191 return -1;
  1192 + if (tb->cflags & CF_SINGLE_INSN)
  1193 + return -1;
1192 gen_code_end = gen_code_ptr + 1194 gen_code_end = gen_code_ptr +
1193 GEN_CODE_MAX_SIZE - GEN_CODE_MAX_INSN_SIZE; 1195 GEN_CODE_MAX_SIZE - GEN_CODE_MAX_INSN_SIZE;
1194 dc->gen_code_ptr = gen_code_ptr; 1196 dc->gen_code_ptr = gen_code_ptr;
target-i386/translate.c
@@ -4491,7 +4491,7 @@ static inline int gen_intermediate_code_internal(CPUState *env, @@ -4491,7 +4491,7 @@ static inline int gen_intermediate_code_internal(CPUState *env,
4491 DisasContext dc1, *dc = &dc1; 4491 DisasContext dc1, *dc = &dc1;
4492 uint8_t *pc_ptr; 4492 uint8_t *pc_ptr;
4493 uint16_t *gen_opc_end; 4493 uint16_t *gen_opc_end;
4494 - int flags, j, lj; 4494 + int flags, j, lj, cflags;
4495 uint8_t *pc_start; 4495 uint8_t *pc_start;
4496 uint8_t *cs_base; 4496 uint8_t *cs_base;
4497 4497
@@ -4499,6 +4499,7 @@ static inline int gen_intermediate_code_internal(CPUState *env, @@ -4499,6 +4499,7 @@ static inline int gen_intermediate_code_internal(CPUState *env,
4499 pc_start = (uint8_t *)tb->pc; 4499 pc_start = (uint8_t *)tb->pc;
4500 cs_base = (uint8_t *)tb->cs_base; 4500 cs_base = (uint8_t *)tb->cs_base;
4501 flags = tb->flags; 4501 flags = tb->flags;
  4502 + cflags = tb->cflags;
4502 4503
4503 dc->pe = (flags >> HF_PE_SHIFT) & 1; 4504 dc->pe = (flags >> HF_PE_SHIFT) & 1;
4504 dc->code32 = (flags >> HF_CS32_SHIFT) & 1; 4505 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
@@ -4573,7 +4574,8 @@ static inline int gen_intermediate_code_internal(CPUState *env, @@ -4573,7 +4574,8 @@ static inline int gen_intermediate_code_internal(CPUState *env,
4573 the flag and abort the translation to give the irqs a 4574 the flag and abort the translation to give the irqs a
4574 change to be happen */ 4575 change to be happen */
4575 if (dc->tf || dc->singlestep_enabled || 4576 if (dc->tf || dc->singlestep_enabled ||
4576 - (flags & HF_INHIBIT_IRQ_MASK)) { 4577 + (flags & HF_INHIBIT_IRQ_MASK) ||
  4578 + (cflags & CF_SINGLE_INSN)) {
4577 gen_op_jmp_im(pc_ptr - dc->cs_base); 4579 gen_op_jmp_im(pc_ptr - dc->cs_base);
4578 gen_eob(dc); 4580 gen_eob(dc);
4579 break; 4581 break;
target-ppc/cpu.h
@@ -164,6 +164,13 @@ typedef struct CPUPPCState { @@ -164,6 +164,13 @@ typedef struct CPUPPCState {
164 int user_mode_only; /* user mode only simulation */ 164 int user_mode_only; /* user mode only simulation */
165 struct TranslationBlock *current_tb; /* currently executing TB */ 165 struct TranslationBlock *current_tb; /* currently executing TB */
166 /* soft mmu support */ 166 /* soft mmu support */
  167 + /* in order to avoid passing too many arguments to the memory
  168 + write helpers, we store some rarely used information in the CPU
  169 + context) */
  170 + unsigned long mem_write_pc; /* host pc at which the memory was
  171 + written */
  172 + unsigned long mem_write_vaddr; /* target virtual addr at which the
  173 + memory was written */
167 /* 0 = kernel, 1 = user (may have 2 = kernel code, 3 = user code ?) */ 174 /* 0 = kernel, 1 = user (may have 2 = kernel code, 3 = user code ?) */
168 CPUTLBEntry tlb_read[2][CPU_TLB_SIZE]; 175 CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
169 CPUTLBEntry tlb_write[2][CPU_TLB_SIZE]; 176 CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
target-sparc/cpu.h
@@ -43,6 +43,14 @@ typedef struct CPUSPARCState { @@ -43,6 +43,14 @@ typedef struct CPUSPARCState {
43 void *opaque; 43 void *opaque;
44 /* NOTE: we allow 8 more registers to handle wrapping */ 44 /* NOTE: we allow 8 more registers to handle wrapping */
45 uint32_t regbase[NWINDOWS * 16 + 8]; 45 uint32_t regbase[NWINDOWS * 16 + 8];
  46 +
  47 + /* in order to avoid passing too many arguments to the memory
  48 + write helpers, we store some rarely used information in the CPU
  49 + context) */
  50 + unsigned long mem_write_pc; /* host pc at which the memory was
  51 + written */
  52 + unsigned long mem_write_vaddr; /* target virtual addr at which the
  53 + memory was written */
46 } CPUSPARCState; 54 } CPUSPARCState;
47 55
48 CPUSPARCState *cpu_sparc_init(void); 56 CPUSPARCState *cpu_sparc_init(void);