Commit 2e70f6efa8b960d3b5401373ad6fa98747bb9578
1 parent
f6e5889e
Add instruction counter.
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4799 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
29 changed files
with
830 additions
and
117 deletions
cpu-all.h
... | ... | @@ -782,6 +782,8 @@ void cpu_abort(CPUState *env, const char *fmt, ...) |
782 | 782 | __attribute__ ((__noreturn__)); |
783 | 783 | extern CPUState *first_cpu; |
784 | 784 | extern CPUState *cpu_single_env; |
785 | +extern int64_t qemu_icount; | |
786 | +extern int use_icount; | |
785 | 787 | |
786 | 788 | #define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */ |
787 | 789 | #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ | ... | ... |
cpu-defs.h
... | ... | @@ -130,17 +130,29 @@ typedef struct CPUTLBEntry { |
130 | 130 | sizeof(target_phys_addr_t))]; |
131 | 131 | } CPUTLBEntry; |
132 | 132 | |
133 | +#ifdef WORDS_BIGENDIAN | |
134 | +typedef struct icount_decr_u16 { | |
135 | + uint16_t high; | |
136 | + uint16_t low; | |
137 | +} icount_decr_u16; | |
138 | +#else | |
139 | +typedef struct icount_decr_u16 { | |
140 | + uint16_t low; | |
141 | + uint16_t high; | |
142 | +} icount_decr_u16; | |
143 | +#endif | |
144 | + | |
133 | 145 | #define CPU_TEMP_BUF_NLONGS 128 |
134 | 146 | #define CPU_COMMON \ |
135 | 147 | struct TranslationBlock *current_tb; /* currently executing TB */ \ |
136 | 148 | /* soft mmu support */ \ |
137 | - /* in order to avoid passing too many arguments to the memory \ | |
138 | - write helpers, we store some rarely used information in the CPU \ | |
149 | + /* in order to avoid passing too many arguments to the MMIO \ | |
150 | + helpers, we store some rarely used information in the CPU \ | |
139 | 151 | context) */ \ |
140 | - unsigned long mem_write_pc; /* host pc at which the memory was \ | |
141 | - written */ \ | |
142 | - target_ulong mem_write_vaddr; /* target virtual addr at which the \ | |
143 | - memory was written */ \ | |
152 | + unsigned long mem_io_pc; /* host pc at which the memory was \ | |
153 | + accessed */ \ | |
154 | + target_ulong mem_io_vaddr; /* target virtual addr at which the \ | |
155 | + memory was accessed */ \ | |
144 | 156 | int halted; /* TRUE if the CPU is in suspend state */ \ |
145 | 157 | /* The meaning of the MMU modes is defined in the target code. */ \ |
146 | 158 | CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ |
... | ... | @@ -149,6 +161,16 @@ typedef struct CPUTLBEntry { |
149 | 161 | /* buffer for temporaries in the code generator */ \ |
150 | 162 | long temp_buf[CPU_TEMP_BUF_NLONGS]; \ |
151 | 163 | \ |
164 | + int64_t icount_extra; /* Instructions until next timer event. */ \ | |
165 | + /* Number of cycles left, with interrupt flag in high bit. \ | |
166 | + This allows a single read-compare-cbranch-write sequence to test \ | |
167 | + for both decrementer underflow and exceptions. */ \ | |
168 | + union { \ | |
169 | + uint32_t u32; \ | |
170 | + icount_decr_u16 u16; \ | |
171 | + } icount_decr; \ | |
172 | + uint32_t can_do_io; /* nonzero if memory mapped IO is safe. */ \ | |
173 | + \ | |
152 | 174 | /* from this point: preserved by CPU reset */ \ |
153 | 175 | /* ice debug support */ \ |
154 | 176 | target_ulong breakpoints[MAX_BREAKPOINTS]; \ | ... | ... |
cpu-exec.c
... | ... | @@ -82,15 +82,40 @@ void cpu_resume_from_signal(CPUState *env1, void *puc) |
82 | 82 | longjmp(env->jmp_env, 1); |
83 | 83 | } |
84 | 84 | |
85 | +/* Execute the code without caching the generated code. An interpreter | |
86 | + could be used if available. */ | |
87 | +static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb) | |
88 | +{ | |
89 | + unsigned long next_tb; | |
90 | + TranslationBlock *tb; | |
91 | + | |
92 | + /* Should never happen. | |
93 | + We only end up here when an existing TB is too long. */ | |
94 | + if (max_cycles > CF_COUNT_MASK) | |
95 | + max_cycles = CF_COUNT_MASK; | |
96 | + | |
97 | + tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, | |
98 | + max_cycles); | |
99 | + env->current_tb = tb; | |
100 | + /* execute the generated code */ | |
101 | + next_tb = tcg_qemu_tb_exec(tb->tc_ptr); | |
102 | + | |
103 | + if ((next_tb & 3) == 2) { | |
104 | + /* Restore PC. This may happen if async event occurs before | |
105 | + the TB starts executing. */ | |
106 | + CPU_PC_FROM_TB(env, tb); | |
107 | + } | |
108 | + tb_phys_invalidate(tb, -1); | |
109 | + tb_free(tb); | |
110 | +} | |
111 | + | |
85 | 112 | static TranslationBlock *tb_find_slow(target_ulong pc, |
86 | 113 | target_ulong cs_base, |
87 | 114 | uint64_t flags) |
88 | 115 | { |
89 | 116 | TranslationBlock *tb, **ptb1; |
90 | - int code_gen_size; | |
91 | 117 | unsigned int h; |
92 | 118 | target_ulong phys_pc, phys_page1, phys_page2, virt_page2; |
93 | - uint8_t *tc_ptr; | |
94 | 119 | |
95 | 120 | tb_invalidated_flag = 0; |
96 | 121 | |
... | ... | @@ -124,30 +149,8 @@ static TranslationBlock *tb_find_slow(target_ulong pc, |
124 | 149 | ptb1 = &tb->phys_hash_next; |
125 | 150 | } |
126 | 151 | not_found: |
127 | - /* if no translated code available, then translate it now */ | |
128 | - tb = tb_alloc(pc); | |
129 | - if (!tb) { | |
130 | - /* flush must be done */ | |
131 | - tb_flush(env); | |
132 | - /* cannot fail at this point */ | |
133 | - tb = tb_alloc(pc); | |
134 | - /* don't forget to invalidate previous TB info */ | |
135 | - tb_invalidated_flag = 1; | |
136 | - } | |
137 | - tc_ptr = code_gen_ptr; | |
138 | - tb->tc_ptr = tc_ptr; | |
139 | - tb->cs_base = cs_base; | |
140 | - tb->flags = flags; | |
141 | - cpu_gen_code(env, tb, &code_gen_size); | |
142 | - code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); | |
143 | - | |
144 | - /* check next page if needed */ | |
145 | - virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; | |
146 | - phys_page2 = -1; | |
147 | - if ((pc & TARGET_PAGE_MASK) != virt_page2) { | |
148 | - phys_page2 = get_phys_addr_code(env, virt_page2); | |
149 | - } | |
150 | - tb_link_phys(tb, phys_pc, phys_page2); | |
152 | + /* if no translated code available, then translate it now */ | |
153 | + tb = tb_gen_code(env, pc, cs_base, flags, 0); | |
151 | 154 | |
152 | 155 | found: |
153 | 156 | /* we add the TB in the virtual pc hash table */ |
... | ... | @@ -583,6 +586,7 @@ int cpu_exec(CPUState *env1) |
583 | 586 | of memory exceptions while generating the code, we |
584 | 587 | must recompute the hash index here */ |
585 | 588 | next_tb = 0; |
589 | + tb_invalidated_flag = 0; | |
586 | 590 | } |
587 | 591 | #ifdef DEBUG_EXEC |
588 | 592 | if ((loglevel & CPU_LOG_EXEC)) { |
... | ... | @@ -604,16 +608,45 @@ int cpu_exec(CPUState *env1) |
604 | 608 | } |
605 | 609 | } |
606 | 610 | spin_unlock(&tb_lock); |
607 | - tc_ptr = tb->tc_ptr; | |
608 | 611 | env->current_tb = tb; |
612 | + while (env->current_tb) { | |
613 | + tc_ptr = tb->tc_ptr; | |
609 | 614 | /* execute the generated code */ |
610 | 615 | #if defined(__sparc__) && !defined(HOST_SOLARIS) |
611 | 616 | #undef env |
612 | - env = cpu_single_env; | |
617 | + env = cpu_single_env; | |
613 | 618 | #define env cpu_single_env |
614 | 619 | #endif |
615 | - next_tb = tcg_qemu_tb_exec(tc_ptr); | |
616 | - env->current_tb = NULL; | |
620 | + next_tb = tcg_qemu_tb_exec(tc_ptr); | |
621 | + env->current_tb = NULL; | |
622 | + if ((next_tb & 3) == 2) { | |
623 | + /* Instruction counter exired. */ | |
624 | + int insns_left; | |
625 | + tb = (TranslationBlock *)(long)(next_tb & ~3); | |
626 | + /* Restore PC. */ | |
627 | + CPU_PC_FROM_TB(env, tb); | |
628 | + insns_left = env->icount_decr.u32; | |
629 | + if (env->icount_extra && insns_left >= 0) { | |
630 | + /* Refill decrementer and continue execution. */ | |
631 | + env->icount_extra += insns_left; | |
632 | + if (env->icount_extra > 0xffff) { | |
633 | + insns_left = 0xffff; | |
634 | + } else { | |
635 | + insns_left = env->icount_extra; | |
636 | + } | |
637 | + env->icount_extra -= insns_left; | |
638 | + env->icount_decr.u16.low = insns_left; | |
639 | + } else { | |
640 | + if (insns_left > 0) { | |
641 | + /* Execute remaining instructions. */ | |
642 | + cpu_exec_nocache(insns_left, tb); | |
643 | + } | |
644 | + env->exception_index = EXCP_INTERRUPT; | |
645 | + next_tb = 0; | |
646 | + cpu_loop_exit(); | |
647 | + } | |
648 | + } | |
649 | + } | |
617 | 650 | /* reset soft MMU for next block (it can currently |
618 | 651 | only be set by a memory fault) */ |
619 | 652 | #if defined(USE_KQEMU) | ... | ... |
exec-all.h
... | ... | @@ -27,7 +27,7 @@ |
27 | 27 | #define DISAS_UPDATE 2 /* cpu state was modified dynamically */ |
28 | 28 | #define DISAS_TB_JUMP 3 /* only pc was modified statically */ |
29 | 29 | |
30 | -struct TranslationBlock; | |
30 | +typedef struct TranslationBlock TranslationBlock; | |
31 | 31 | |
32 | 32 | /* XXX: make safe guess about sizes */ |
33 | 33 | #define MAX_OP_PER_INSTR 64 |
... | ... | @@ -48,6 +48,7 @@ extern target_ulong gen_opc_pc[OPC_BUF_SIZE]; |
48 | 48 | extern target_ulong gen_opc_npc[OPC_BUF_SIZE]; |
49 | 49 | extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE]; |
50 | 50 | extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; |
51 | +extern uint16_t gen_opc_icount[OPC_BUF_SIZE]; | |
51 | 52 | extern target_ulong gen_opc_jump_pc[2]; |
52 | 53 | extern uint32_t gen_opc_hflags[OPC_BUF_SIZE]; |
53 | 54 | |
... | ... | @@ -75,6 +76,10 @@ int cpu_restore_state_copy(struct TranslationBlock *tb, |
75 | 76 | CPUState *env, unsigned long searched_pc, |
76 | 77 | void *puc); |
77 | 78 | void cpu_resume_from_signal(CPUState *env1, void *puc); |
79 | +void cpu_io_recompile(CPUState *env, void *retaddr); | |
80 | +TranslationBlock *tb_gen_code(CPUState *env, | |
81 | + target_ulong pc, target_ulong cs_base, int flags, | |
82 | + int cflags); | |
78 | 83 | void cpu_exec_init(CPUState *env); |
79 | 84 | int page_unprotect(target_ulong address, unsigned long pc, void *puc); |
80 | 85 | void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end, |
... | ... | @@ -117,16 +122,15 @@ static inline int tlb_set_page(CPUState *env1, target_ulong vaddr, |
117 | 122 | #define USE_DIRECT_JUMP |
118 | 123 | #endif |
119 | 124 | |
120 | -typedef struct TranslationBlock { | |
125 | +struct TranslationBlock { | |
121 | 126 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ |
122 | 127 | target_ulong cs_base; /* CS base for this block */ |
123 | 128 | uint64_t flags; /* flags defining in which context the code was generated */ |
124 | 129 | uint16_t size; /* size of target code for this block (1 <= |
125 | 130 | size <= TARGET_PAGE_SIZE) */ |
126 | 131 | uint16_t cflags; /* compile flags */ |
127 | -#define CF_TB_FP_USED 0x0002 /* fp ops are used in the TB */ | |
128 | -#define CF_FP_USED 0x0004 /* fp ops are used in the TB or in a chained TB */ | |
129 | -#define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */ | |
132 | +#define CF_COUNT_MASK 0x7fff | |
133 | +#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ | |
130 | 134 | |
131 | 135 | uint8_t *tc_ptr; /* pointer to the translated code */ |
132 | 136 | /* next matching tb for physical address. */ |
... | ... | @@ -150,7 +154,8 @@ typedef struct TranslationBlock { |
150 | 154 | jmp_first */ |
151 | 155 | struct TranslationBlock *jmp_next[2]; |
152 | 156 | struct TranslationBlock *jmp_first; |
153 | -} TranslationBlock; | |
157 | + uint32_t icount; | |
158 | +}; | |
154 | 159 | |
155 | 160 | static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) |
156 | 161 | { |
... | ... | @@ -173,9 +178,11 @@ static inline unsigned int tb_phys_hash_func(unsigned long pc) |
173 | 178 | } |
174 | 179 | |
175 | 180 | TranslationBlock *tb_alloc(target_ulong pc); |
181 | +void tb_free(TranslationBlock *tb); | |
176 | 182 | void tb_flush(CPUState *env); |
177 | 183 | void tb_link_phys(TranslationBlock *tb, |
178 | 184 | target_ulong phys_pc, target_ulong phys_page2); |
185 | +void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr); | |
179 | 186 | |
180 | 187 | extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; |
181 | 188 | extern uint8_t *code_gen_ptr; |
... | ... | @@ -364,6 +371,20 @@ static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr) |
364 | 371 | } |
365 | 372 | return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base; |
366 | 373 | } |
374 | + | |
375 | +/* Deterministic execution requires that IO only be performaed on the last | |
376 | + instruction of a TB so that interrupts take effect immediately. */ | |
377 | +static inline int can_do_io(CPUState *env) | |
378 | +{ | |
379 | + if (!use_icount) | |
380 | + return 1; | |
381 | + | |
382 | + /* If not executing code then assume we are ok. */ | |
383 | + if (!env->current_tb) | |
384 | + return 1; | |
385 | + | |
386 | + return env->can_do_io != 0; | |
387 | +} | |
367 | 388 | #endif |
368 | 389 | |
369 | 390 | #ifdef USE_KQEMU | ... | ... |
exec.c
... | ... | @@ -107,6 +107,13 @@ CPUState *first_cpu; |
107 | 107 | /* current CPU in the current thread. It is only valid inside |
108 | 108 | cpu_exec() */ |
109 | 109 | CPUState *cpu_single_env; |
110 | +/* 0 = Do not count executed instructions. | |
111 | + 1 = Precice instruction counting. | |
112 | + 2 = Adaptive rate instruction counting. */ | |
113 | +int use_icount = 0; | |
114 | +/* Current instruction counter. While executing translated code this may | |
115 | + include some instructions that have not yet been executed. */ | |
116 | +int64_t qemu_icount; | |
110 | 117 | |
111 | 118 | typedef struct PageDesc { |
112 | 119 | /* list of TBs intersecting this ram page */ |
... | ... | @@ -633,7 +640,7 @@ static inline void tb_reset_jump(TranslationBlock *tb, int n) |
633 | 640 | tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); |
634 | 641 | } |
635 | 642 | |
636 | -static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr) | |
643 | +void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr) | |
637 | 644 | { |
638 | 645 | CPUState *env; |
639 | 646 | PageDesc *p; |
... | ... | @@ -746,11 +753,9 @@ static void build_page_bitmap(PageDesc *p) |
746 | 753 | } |
747 | 754 | } |
748 | 755 | |
749 | -#ifdef TARGET_HAS_PRECISE_SMC | |
750 | - | |
751 | -static void tb_gen_code(CPUState *env, | |
752 | - target_ulong pc, target_ulong cs_base, int flags, | |
753 | - int cflags) | |
756 | +TranslationBlock *tb_gen_code(CPUState *env, | |
757 | + target_ulong pc, target_ulong cs_base, | |
758 | + int flags, int cflags) | |
754 | 759 | { |
755 | 760 | TranslationBlock *tb; |
756 | 761 | uint8_t *tc_ptr; |
... | ... | @@ -764,6 +769,8 @@ static void tb_gen_code(CPUState *env, |
764 | 769 | tb_flush(env); |
765 | 770 | /* cannot fail at this point */ |
766 | 771 | tb = tb_alloc(pc); |
772 | + /* Don't forget to invalidate previous TB info. */ | |
773 | + tb_invalidated_flag = 1; | |
767 | 774 | } |
768 | 775 | tc_ptr = code_gen_ptr; |
769 | 776 | tb->tc_ptr = tc_ptr; |
... | ... | @@ -780,8 +787,8 @@ static void tb_gen_code(CPUState *env, |
780 | 787 | phys_page2 = get_phys_addr_code(env, virt_page2); |
781 | 788 | } |
782 | 789 | tb_link_phys(tb, phys_pc, phys_page2); |
790 | + return tb; | |
783 | 791 | } |
784 | -#endif | |
785 | 792 | |
786 | 793 | /* invalidate all TBs which intersect with the target physical page |
787 | 794 | starting in range [start;end[. NOTE: start and end must refer to |
... | ... | @@ -836,13 +843,13 @@ void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t |
836 | 843 | if (current_tb_not_found) { |
837 | 844 | current_tb_not_found = 0; |
838 | 845 | current_tb = NULL; |
839 | - if (env->mem_write_pc) { | |
846 | + if (env->mem_io_pc) { | |
840 | 847 | /* now we have a real cpu fault */ |
841 | - current_tb = tb_find_pc(env->mem_write_pc); | |
848 | + current_tb = tb_find_pc(env->mem_io_pc); | |
842 | 849 | } |
843 | 850 | } |
844 | 851 | if (current_tb == tb && |
845 | - !(current_tb->cflags & CF_SINGLE_INSN)) { | |
852 | + (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
846 | 853 | /* If we are modifying the current TB, we must stop |
847 | 854 | its execution. We could be more precise by checking |
848 | 855 | that the modification is after the current PC, but it |
... | ... | @@ -851,7 +858,7 @@ void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t |
851 | 858 | |
852 | 859 | current_tb_modified = 1; |
853 | 860 | cpu_restore_state(current_tb, env, |
854 | - env->mem_write_pc, NULL); | |
861 | + env->mem_io_pc, NULL); | |
855 | 862 | #if defined(TARGET_I386) |
856 | 863 | current_flags = env->hflags; |
857 | 864 | current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); |
... | ... | @@ -883,7 +890,7 @@ void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t |
883 | 890 | if (!p->first_tb) { |
884 | 891 | invalidate_page_bitmap(p); |
885 | 892 | if (is_cpu_write_access) { |
886 | - tlb_unprotect_code_phys(env, start, env->mem_write_vaddr); | |
893 | + tlb_unprotect_code_phys(env, start, env->mem_io_vaddr); | |
887 | 894 | } |
888 | 895 | } |
889 | 896 | #endif |
... | ... | @@ -893,8 +900,7 @@ void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t |
893 | 900 | modifying the memory. It will ensure that it cannot modify |
894 | 901 | itself */ |
895 | 902 | env->current_tb = NULL; |
896 | - tb_gen_code(env, current_pc, current_cs_base, current_flags, | |
897 | - CF_SINGLE_INSN); | |
903 | + tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); | |
898 | 904 | cpu_resume_from_signal(env, NULL); |
899 | 905 | } |
900 | 906 | #endif |
... | ... | @@ -909,7 +915,7 @@ static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int le |
909 | 915 | if (1) { |
910 | 916 | if (loglevel) { |
911 | 917 | fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", |
912 | - cpu_single_env->mem_write_vaddr, len, | |
918 | + cpu_single_env->mem_io_vaddr, len, | |
913 | 919 | cpu_single_env->eip, |
914 | 920 | cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); |
915 | 921 | } |
... | ... | @@ -961,7 +967,7 @@ static void tb_invalidate_phys_page(target_phys_addr_t addr, |
961 | 967 | tb = (TranslationBlock *)((long)tb & ~3); |
962 | 968 | #ifdef TARGET_HAS_PRECISE_SMC |
963 | 969 | if (current_tb == tb && |
964 | - !(current_tb->cflags & CF_SINGLE_INSN)) { | |
970 | + (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
965 | 971 | /* If we are modifying the current TB, we must stop |
966 | 972 | its execution. We could be more precise by checking |
967 | 973 | that the modification is after the current PC, but it |
... | ... | @@ -990,8 +996,7 @@ static void tb_invalidate_phys_page(target_phys_addr_t addr, |
990 | 996 | modifying the memory. It will ensure that it cannot modify |
991 | 997 | itself */ |
992 | 998 | env->current_tb = NULL; |
993 | - tb_gen_code(env, current_pc, current_cs_base, current_flags, | |
994 | - CF_SINGLE_INSN); | |
999 | + tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); | |
995 | 1000 | cpu_resume_from_signal(env, puc); |
996 | 1001 | } |
997 | 1002 | #endif |
... | ... | @@ -1068,6 +1073,17 @@ TranslationBlock *tb_alloc(target_ulong pc) |
1068 | 1073 | return tb; |
1069 | 1074 | } |
1070 | 1075 | |
1076 | +void tb_free(TranslationBlock *tb) | |
1077 | +{ | |
1078 | + /* In practice this is mostly used for single use temorary TB | |
1079 | + Ignore the hard cases and just back up if this TB happens to | |
1080 | + be the last one generated. */ | |
1081 | + if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) { | |
1082 | + code_gen_ptr = tb->tc_ptr; | |
1083 | + nb_tbs--; | |
1084 | + } | |
1085 | +} | |
1086 | + | |
1071 | 1087 | /* add a new TB and link it to the physical page tables. phys_page2 is |
1072 | 1088 | (-1) to indicate that only one page contains the TB. */ |
1073 | 1089 | void tb_link_phys(TranslationBlock *tb, |
... | ... | @@ -1369,7 +1385,9 @@ void cpu_interrupt(CPUState *env, int mask) |
1369 | 1385 | TranslationBlock *tb; |
1370 | 1386 | static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; |
1371 | 1387 | #endif |
1388 | + int old_mask; | |
1372 | 1389 | |
1390 | + old_mask = env->interrupt_request; | |
1373 | 1391 | /* FIXME: This is probably not threadsafe. A different thread could |
1374 | 1392 | be in the mittle of a read-modify-write operation. */ |
1375 | 1393 | env->interrupt_request |= mask; |
... | ... | @@ -1379,13 +1397,25 @@ void cpu_interrupt(CPUState *env, int mask) |
1379 | 1397 | emulation this often isn't actually as bad as it sounds. Often |
1380 | 1398 | signals are used primarily to interrupt blocking syscalls. */ |
1381 | 1399 | #else |
1382 | - /* if the cpu is currently executing code, we must unlink it and | |
1383 | - all the potentially executing TB */ | |
1384 | - tb = env->current_tb; | |
1385 | - if (tb && !testandset(&interrupt_lock)) { | |
1386 | - env->current_tb = NULL; | |
1387 | - tb_reset_jump_recursive(tb); | |
1388 | - resetlock(&interrupt_lock); | |
1400 | + if (use_icount) { | |
1401 | + env->icount_decr.u16.high = 0x8000; | |
1402 | +#ifndef CONFIG_USER_ONLY | |
1403 | + /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means | |
1404 | + an async event happened and we need to process it. */ | |
1405 | + if (!can_do_io(env) | |
1406 | + && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) { | |
1407 | + cpu_abort(env, "Raised interrupt while not in I/O function"); | |
1408 | + } | |
1409 | +#endif | |
1410 | + } else { | |
1411 | + tb = env->current_tb; | |
1412 | + /* if the cpu is currently executing code, we must unlink it and | |
1413 | + all the potentially executing TB */ | |
1414 | + if (tb && !testandset(&interrupt_lock)) { | |
1415 | + env->current_tb = NULL; | |
1416 | + tb_reset_jump_recursive(tb); | |
1417 | + resetlock(&interrupt_lock); | |
1418 | + } | |
1389 | 1419 | } |
1390 | 1420 | #endif |
1391 | 1421 | } |
... | ... | @@ -2227,7 +2257,7 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr, |
2227 | 2257 | /* we remove the notdirty callback only if the code has been |
2228 | 2258 | flushed */ |
2229 | 2259 | if (dirty_flags == 0xff) |
2230 | - tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr); | |
2260 | + tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); | |
2231 | 2261 | } |
2232 | 2262 | |
2233 | 2263 | static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr, |
... | ... | @@ -2252,7 +2282,7 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr, |
2252 | 2282 | /* we remove the notdirty callback only if the code has been |
2253 | 2283 | flushed */ |
2254 | 2284 | if (dirty_flags == 0xff) |
2255 | - tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr); | |
2285 | + tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); | |
2256 | 2286 | } |
2257 | 2287 | |
2258 | 2288 | static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr, |
... | ... | @@ -2277,7 +2307,7 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr, |
2277 | 2307 | /* we remove the notdirty callback only if the code has been |
2278 | 2308 | flushed */ |
2279 | 2309 | if (dirty_flags == 0xff) |
2280 | - tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr); | |
2310 | + tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); | |
2281 | 2311 | } |
2282 | 2312 | |
2283 | 2313 | static CPUReadMemoryFunc *error_mem_read[3] = { |
... | ... | @@ -2299,7 +2329,7 @@ static void check_watchpoint(int offset, int flags) |
2299 | 2329 | target_ulong vaddr; |
2300 | 2330 | int i; |
2301 | 2331 | |
2302 | - vaddr = (env->mem_write_vaddr & TARGET_PAGE_MASK) + offset; | |
2332 | + vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; | |
2303 | 2333 | for (i = 0; i < env->nb_watchpoints; i++) { |
2304 | 2334 | if (vaddr == env->watchpoint[i].vaddr |
2305 | 2335 | && (env->watchpoint[i].type & flags)) { |
... | ... | @@ -2967,6 +2997,65 @@ int cpu_memory_rw_debug(CPUState *env, target_ulong addr, |
2967 | 2997 | return 0; |
2968 | 2998 | } |
2969 | 2999 | |
3000 | +/* in deterministic execution mode, instructions doing device I/Os | |
3001 | + must be at the end of the TB */ | |
3002 | +void cpu_io_recompile(CPUState *env, void *retaddr) | |
3003 | +{ | |
3004 | + TranslationBlock *tb; | |
3005 | + uint32_t n, cflags; | |
3006 | + target_ulong pc, cs_base; | |
3007 | + uint64_t flags; | |
3008 | + | |
3009 | + tb = tb_find_pc((unsigned long)retaddr); | |
3010 | + if (!tb) { | |
3011 | + cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", | |
3012 | + retaddr); | |
3013 | + } | |
3014 | + n = env->icount_decr.u16.low + tb->icount; | |
3015 | + cpu_restore_state(tb, env, (unsigned long)retaddr, NULL); | |
3016 | + /* Calculate how many instructions had been executed before the fault | |
3017 | + occured. */ | |
3018 | + n = n - env->icount_decr.u16.low; | |
3019 | + /* Generate a new TB ending on the I/O insn. */ | |
3020 | + n++; | |
3021 | + /* On MIPS and SH, delay slot instructions can only be restarted if | |
3022 | + they were already the first instruction in the TB. If this is not | |
3023 | + the first instruction in a TB then re-execute the preceeding | |
3024 | + branch. */ | |
3025 | +#if defined(TARGET_MIPS) | |
3026 | + if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { | |
3027 | + env->active_tc.PC -= 4; | |
3028 | + env->icount_decr.u16.low++; | |
3029 | + env->hflags &= ~MIPS_HFLAG_BMASK; | |
3030 | + } | |
3031 | +#elif defined(TARGET_SH4) | |
3032 | + if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 | |
3033 | + && n > 1) { | |
3034 | + env->pc -= 2; | |
3035 | + env->icount_decr.u16.low++; | |
3036 | + env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); | |
3037 | + } | |
3038 | +#endif | |
3039 | + /* This should never happen. */ | |
3040 | + if (n > CF_COUNT_MASK) | |
3041 | + cpu_abort(env, "TB too big during recompile"); | |
3042 | + | |
3043 | + cflags = n | CF_LAST_IO; | |
3044 | + pc = tb->pc; | |
3045 | + cs_base = tb->cs_base; | |
3046 | + flags = tb->flags; | |
3047 | + tb_phys_invalidate(tb, -1); | |
3048 | + /* FIXME: In theory this could raise an exception. In practice | |
3049 | + we have already translated the block once so it's probably ok. */ | |
3050 | + tb_gen_code(env, pc, cs_base, flags, cflags); | |
3051 | + /* TODO: If env->pc != tb->pc (i.e. the failuting instruction was not | |
3052 | + the first in the TB) then we end up generating a whole new TB and | |
3053 | + repeating the fault, which is horribly inefficient. | |
3054 | + Better would be to execute just this insn uncached, or generate a | |
3055 | + second new TB. */ | |
3056 | + cpu_resume_from_signal(env, NULL); | |
3057 | +} | |
3058 | + | |
2970 | 3059 | void dump_exec_info(FILE *f, |
2971 | 3060 | int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) |
2972 | 3061 | { | ... | ... |
hw/mips_timer.c
... | ... | @@ -91,7 +91,12 @@ static void mips_timer_cb (void *opaque) |
91 | 91 | if (env->CP0_Cause & (1 << CP0Ca_DC)) |
92 | 92 | return; |
93 | 93 | |
94 | + /* ??? This callback should occur when the counter is exactly equal to | |
95 | + the comparator value. Offset the count by one to avoid immediately | |
96 | + retriggering the callback before any virtual time has passed. */ | |
97 | + env->CP0_Count++; | |
94 | 98 | cpu_mips_timer_update(env); |
99 | + env->CP0_Count--; | |
95 | 100 | if (env->insn_flags & ISA_MIPS32R2) |
96 | 101 | env->CP0_Cause |= 1 << CP0Ca_TI; |
97 | 102 | qemu_irq_raise(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]); | ... | ... |
qemu-doc.texi
... | ... | @@ -965,6 +965,17 @@ On M68K this implements the "ColdFire GDB" interface used by libgloss. |
965 | 965 | |
966 | 966 | Note that this allows guest direct access to the host filesystem, |
967 | 967 | so should only be used with trusted guest OS. |
968 | + | |
969 | +@item -icount [N|auto] | |
970 | +Enable virtual instruction counter. The virtual cpu will execute one | |
971 | +instruction every 2^N ns of virtual time. If @code{auto} is specified | |
972 | +then the virtual cpu speed will be automatically adjusted to keep virtual | |
973 | +time within a few seconds of real time. | |
974 | + | |
975 | +Note that while this option can give deterministic behavior, it does not | |
976 | +provide cycle accurate emulation. Modern CPUs contain superscalar out of | |
977 | +order cores with complex cache heirachies. The number of instructions | |
978 | +executed often has little or no correlation with actual performance. | |
968 | 979 | @end table |
969 | 980 | |
970 | 981 | @c man end | ... | ... |
softmmu_template.h
... | ... | @@ -51,12 +51,18 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, |
51 | 51 | int mmu_idx, |
52 | 52 | void *retaddr); |
53 | 53 | static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr, |
54 | - target_ulong addr) | |
54 | + target_ulong addr, | |
55 | + void *retaddr) | |
55 | 56 | { |
56 | 57 | DATA_TYPE res; |
57 | 58 | int index; |
58 | 59 | index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
59 | 60 | physaddr = (physaddr & TARGET_PAGE_MASK) + addr; |
61 | + env->mem_io_pc = (unsigned long)retaddr; | |
62 | + if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT) | |
63 | + && !can_do_io(env)) { | |
64 | + cpu_io_recompile(env, retaddr); | |
65 | + } | |
60 | 66 | |
61 | 67 | #if SHIFT <= 2 |
62 | 68 | res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr); |
... | ... | @@ -95,8 +101,9 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, |
95 | 101 | /* IO access */ |
96 | 102 | if ((addr & (DATA_SIZE - 1)) != 0) |
97 | 103 | goto do_unaligned_access; |
104 | + retaddr = GETPC(); | |
98 | 105 | addend = env->iotlb[mmu_idx][index]; |
99 | - res = glue(io_read, SUFFIX)(addend, addr); | |
106 | + res = glue(io_read, SUFFIX)(addend, addr, retaddr); | |
100 | 107 | } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
101 | 108 | /* slow unaligned access (it spans two pages or IO) */ |
102 | 109 | do_unaligned_access: |
... | ... | @@ -148,8 +155,9 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, |
148 | 155 | /* IO access */ |
149 | 156 | if ((addr & (DATA_SIZE - 1)) != 0) |
150 | 157 | goto do_unaligned_access; |
158 | + retaddr = GETPC(); | |
151 | 159 | addend = env->iotlb[mmu_idx][index]; |
152 | - res = glue(io_read, SUFFIX)(addend, addr); | |
160 | + res = glue(io_read, SUFFIX)(addend, addr, retaddr); | |
153 | 161 | } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
154 | 162 | do_unaligned_access: |
155 | 163 | /* slow unaligned access (it spans two pages) */ |
... | ... | @@ -194,9 +202,13 @@ static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr, |
194 | 202 | int index; |
195 | 203 | index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
196 | 204 | physaddr = (physaddr & TARGET_PAGE_MASK) + addr; |
205 | + if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT) | |
206 | + && !can_do_io(env)) { | |
207 | + cpu_io_recompile(env, retaddr); | |
208 | + } | |
197 | 209 | |
198 | - env->mem_write_vaddr = addr; | |
199 | - env->mem_write_pc = (unsigned long)retaddr; | |
210 | + env->mem_io_vaddr = addr; | |
211 | + env->mem_io_pc = (unsigned long)retaddr; | |
200 | 212 | #if SHIFT <= 2 |
201 | 213 | io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val); |
202 | 214 | #else | ... | ... |
target-alpha/cpu.h
target-alpha/translate.c
... | ... | @@ -43,6 +43,19 @@ struct DisasContext { |
43 | 43 | uint32_t amask; |
44 | 44 | }; |
45 | 45 | |
46 | +TCGv cpu_env; | |
47 | + | |
48 | +#include "gen-icount.h" | |
49 | + | |
50 | +void alpha_translate_init() | |
51 | +{ | |
52 | + static int done_init = 0; | |
53 | + if (done_init) | |
54 | + return; | |
55 | + cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env"); | |
56 | + done_init = 1; | |
57 | +} | |
58 | + | |
46 | 59 | static always_inline void gen_op_nop (void) |
47 | 60 | { |
48 | 61 | #if defined(GENERATE_NOP) |
... | ... | @@ -1970,6 +1983,8 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
1970 | 1983 | uint16_t *gen_opc_end; |
1971 | 1984 | int j, lj = -1; |
1972 | 1985 | int ret; |
1986 | + int num_insns; | |
1987 | + int max_insns; | |
1973 | 1988 | |
1974 | 1989 | pc_start = tb->pc; |
1975 | 1990 | gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; |
... | ... | @@ -1981,6 +1996,12 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
1981 | 1996 | ctx.mem_idx = ((env->ps >> 3) & 3); |
1982 | 1997 | ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1; |
1983 | 1998 | #endif |
1999 | + num_insns = 0; | |
2000 | + max_insns = tb->cflags & CF_COUNT_MASK; | |
2001 | + if (max_insns == 0) | |
2002 | + max_insns = CF_COUNT_MASK; | |
2003 | + | |
2004 | + gen_icount_start(); | |
1984 | 2005 | for (ret = 0; ret == 0;) { |
1985 | 2006 | if (env->nb_breakpoints > 0) { |
1986 | 2007 | for(j = 0; j < env->nb_breakpoints; j++) { |
... | ... | @@ -1998,8 +2019,11 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
1998 | 2019 | gen_opc_instr_start[lj++] = 0; |
1999 | 2020 | gen_opc_pc[lj] = ctx.pc; |
2000 | 2021 | gen_opc_instr_start[lj] = 1; |
2022 | + gen_opc_icount[lj] = num_insns; | |
2001 | 2023 | } |
2002 | 2024 | } |
2025 | + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) | |
2026 | + gen_io_start(); | |
2003 | 2027 | #if defined ALPHA_DEBUG_DISAS |
2004 | 2028 | insn_count++; |
2005 | 2029 | if (logfile != NULL) { |
... | ... | @@ -2014,6 +2038,7 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
2014 | 2038 | fprintf(logfile, "opcode %08x %d\n", insn, insn_count); |
2015 | 2039 | } |
2016 | 2040 | #endif |
2041 | + num_insns++; | |
2017 | 2042 | ctx.pc += 4; |
2018 | 2043 | ret = translate_one(ctxp, insn); |
2019 | 2044 | if (ret != 0) |
... | ... | @@ -2022,7 +2047,8 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
2022 | 2047 | * generation |
2023 | 2048 | */ |
2024 | 2049 | if (((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) || |
2025 | - (env->singlestep_enabled)) { | |
2050 | + (env->singlestep_enabled) || | |
2051 | + num_insns >= max_insns) { | |
2026 | 2052 | break; |
2027 | 2053 | } |
2028 | 2054 | #if defined (DO_SINGLE_STEP) |
... | ... | @@ -2035,8 +2061,11 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
2035 | 2061 | #if defined (DO_TB_FLUSH) |
2036 | 2062 | gen_op_tb_flush(); |
2037 | 2063 | #endif |
2064 | + if (tb->cflags & CF_LAST_IO) | |
2065 | + gen_io_end(); | |
2038 | 2066 | /* Generate the return instruction */ |
2039 | 2067 | tcg_gen_exit_tb(0); |
2068 | + gen_icount_end(tb, num_insns); | |
2040 | 2069 | *gen_opc_ptr = INDEX_op_end; |
2041 | 2070 | if (search_pc) { |
2042 | 2071 | j = gen_opc_ptr - gen_opc_buf; |
... | ... | @@ -2045,6 +2074,7 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
2045 | 2074 | gen_opc_instr_start[lj++] = 0; |
2046 | 2075 | } else { |
2047 | 2076 | tb->size = ctx.pc - pc_start; |
2077 | + tb->icount = num_insns; | |
2048 | 2078 | } |
2049 | 2079 | #if defined ALPHA_DEBUG_DISAS |
2050 | 2080 | if (loglevel & CPU_LOG_TB_CPU) { |
... | ... | @@ -2079,6 +2109,7 @@ CPUAlphaState * cpu_alpha_init (const char *cpu_model) |
2079 | 2109 | if (!env) |
2080 | 2110 | return NULL; |
2081 | 2111 | cpu_exec_init(env); |
2112 | + alpha_translate_init(); | |
2082 | 2113 | tlb_flush(env, 1); |
2083 | 2114 | /* XXX: should not be hardcoded */ |
2084 | 2115 | env->implver = IMPLVER_2106x; | ... | ... |
target-arm/cpu.h
target-arm/translate.c
... | ... | @@ -84,6 +84,9 @@ static TCGv cpu_V0, cpu_V1, cpu_M0; |
84 | 84 | static TCGv cpu_T[2]; |
85 | 85 | static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d; |
86 | 86 | |
87 | +#define ICOUNT_TEMP cpu_T[0] | |
88 | +#include "gen-icount.h" | |
89 | + | |
87 | 90 | /* initialize TCG globals. */ |
88 | 91 | void arm_translate_init(void) |
89 | 92 | { |
... | ... | @@ -8539,6 +8542,8 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
8539 | 8542 | int j, lj; |
8540 | 8543 | target_ulong pc_start; |
8541 | 8544 | uint32_t next_page_start; |
8545 | + int num_insns; | |
8546 | + int max_insns; | |
8542 | 8547 | |
8543 | 8548 | /* generate intermediate code */ |
8544 | 8549 | num_temps = 0; |
... | ... | @@ -8575,6 +8580,12 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
8575 | 8580 | cpu_M0 = tcg_temp_new(TCG_TYPE_I64); |
8576 | 8581 | next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; |
8577 | 8582 | lj = -1; |
8583 | + num_insns = 0; | |
8584 | + max_insns = tb->cflags & CF_COUNT_MASK; | |
8585 | + if (max_insns == 0) | |
8586 | + max_insns = CF_COUNT_MASK; | |
8587 | + | |
8588 | + gen_icount_start(); | |
8578 | 8589 | /* Reset the conditional execution bits immediately. This avoids |
8579 | 8590 | complications trying to do it at the end of the block. */ |
8580 | 8591 | if (env->condexec_bits) |
... | ... | @@ -8625,8 +8636,12 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
8625 | 8636 | } |
8626 | 8637 | gen_opc_pc[lj] = dc->pc; |
8627 | 8638 | gen_opc_instr_start[lj] = 1; |
8639 | + gen_opc_icount[lj] = num_insns; | |
8628 | 8640 | } |
8629 | 8641 | |
8642 | + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) | |
8643 | + gen_io_start(); | |
8644 | + | |
8630 | 8645 | if (env->thumb) { |
8631 | 8646 | disas_thumb_insn(env, dc); |
8632 | 8647 | if (dc->condexec_mask) { |
... | ... | @@ -8659,9 +8674,20 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
8659 | 8674 | * Otherwise the subsequent code could get translated several times. |
8660 | 8675 | * Also stop translation when a page boundary is reached. This |
8661 | 8676 | * ensures prefech aborts occur at the right place. */ |
8677 | + num_insns ++; | |
8662 | 8678 | } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && |
8663 | 8679 | !env->singlestep_enabled && |
8664 | - dc->pc < next_page_start); | |
8680 | + dc->pc < next_page_start && | |
8681 | + num_insns < max_insns); | |
8682 | + | |
8683 | + if (tb->cflags & CF_LAST_IO) { | |
8684 | + if (dc->condjmp) { | |
8685 | + /* FIXME: This can theoretically happen with self-modifying | |
8686 | + code. */ | |
8687 | + cpu_abort(env, "IO on conditional branch instruction"); | |
8688 | + } | |
8689 | + gen_io_end(); | |
8690 | + } | |
8665 | 8691 | |
8666 | 8692 | /* At this stage dc->condjmp will only be set when the skipped |
8667 | 8693 | instruction was a conditional branch or trap, and the PC has |
... | ... | @@ -8726,7 +8752,9 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
8726 | 8752 | dc->condjmp = 0; |
8727 | 8753 | } |
8728 | 8754 | } |
8755 | + | |
8729 | 8756 | done_generating: |
8757 | + gen_icount_end(tb, num_insns); | |
8730 | 8758 | *gen_opc_ptr = INDEX_op_end; |
8731 | 8759 | |
8732 | 8760 | #ifdef DEBUG_DISAS |
... | ... | @@ -8744,6 +8772,7 @@ done_generating: |
8744 | 8772 | gen_opc_instr_start[lj++] = 0; |
8745 | 8773 | } else { |
8746 | 8774 | tb->size = dc->pc - pc_start; |
8775 | + tb->icount = num_insns; | |
8747 | 8776 | } |
8748 | 8777 | return 0; |
8749 | 8778 | } | ... | ... |
target-cris/cpu.h
... | ... | @@ -238,5 +238,7 @@ static inline void cpu_clone_regs(CPUState *env, target_ulong newsp) |
238 | 238 | #define SFR_RW_MM_TLB_LO env->pregs[PR_SRS]][5 |
239 | 239 | #define SFR_RW_MM_TLB_HI env->pregs[PR_SRS]][6 |
240 | 240 | |
241 | +#define CPU_PC_FROM_TB(env, tb) env->pc = tb->pc | |
242 | + | |
241 | 243 | #include "cpu-all.h" |
242 | 244 | #endif | ... | ... |
target-cris/translate.c
... | ... | @@ -77,6 +77,8 @@ TCGv env_btaken; |
77 | 77 | TCGv env_btarget; |
78 | 78 | TCGv env_pc; |
79 | 79 | |
80 | +#include "gen-icount.h" | |
81 | + | |
80 | 82 | /* This is the state at translation time. */ |
81 | 83 | typedef struct DisasContext { |
82 | 84 | CPUState *env; |
... | ... | @@ -3032,6 +3034,8 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
3032 | 3034 | struct DisasContext *dc = &ctx; |
3033 | 3035 | uint32_t next_page_start; |
3034 | 3036 | target_ulong npc; |
3037 | + int num_insns; | |
3038 | + int max_insns; | |
3035 | 3039 | |
3036 | 3040 | if (!logfile) |
3037 | 3041 | logfile = stderr; |
... | ... | @@ -3092,6 +3096,12 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
3092 | 3096 | |
3093 | 3097 | next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; |
3094 | 3098 | lj = -1; |
3099 | + num_insns = 0; | |
3100 | + max_insns = tb->cflags & CF_COUNT_MASK; | |
3101 | + if (max_insns == 0) | |
3102 | + max_insns = CF_COUNT_MASK; | |
3103 | + | |
3104 | + gen_icount_start(); | |
3095 | 3105 | do |
3096 | 3106 | { |
3097 | 3107 | check_breakpoint(env, dc); |
... | ... | @@ -3108,6 +3118,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
3108 | 3118 | else |
3109 | 3119 | gen_opc_pc[lj] = dc->pc; |
3110 | 3120 | gen_opc_instr_start[lj] = 1; |
3121 | + gen_opc_icount[lj] = num_insns; | |
3111 | 3122 | } |
3112 | 3123 | |
3113 | 3124 | /* Pretty disas. */ |
... | ... | @@ -3116,6 +3127,8 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
3116 | 3127 | DIS(fprintf(logfile, "%x ", dc->pc)); |
3117 | 3128 | } |
3118 | 3129 | |
3130 | + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) | |
3131 | + gen_io_start(); | |
3119 | 3132 | dc->clear_x = 1; |
3120 | 3133 | if (unlikely(loglevel & CPU_LOG_TB_OP)) |
3121 | 3134 | tcg_gen_debug_insn_start(dc->pc); |
... | ... | @@ -3125,6 +3138,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
3125 | 3138 | if (dc->clear_x) |
3126 | 3139 | cris_clear_x_flag(dc); |
3127 | 3140 | |
3141 | + num_insns++; | |
3128 | 3142 | /* Check for delayed branches here. If we do it before |
3129 | 3143 | actually genereating any host code, the simulator will just |
3130 | 3144 | loop doing nothing for on this program location. */ |
... | ... | @@ -3151,12 +3165,15 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
3151 | 3165 | if (!(tb->pc & 1) && env->singlestep_enabled) |
3152 | 3166 | break; |
3153 | 3167 | } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end |
3154 | - && (dc->pc < next_page_start)); | |
3168 | + && (dc->pc < next_page_start) | |
3169 | + && num_insns < max_insns); | |
3155 | 3170 | |
3156 | 3171 | npc = dc->pc; |
3157 | 3172 | if (dc->jmp == JMP_DIRECT && !dc->delayed_branch) |
3158 | 3173 | npc = dc->jmp_pc; |
3159 | 3174 | |
3175 | + if (tb->cflags & CF_LAST_IO) | |
3176 | + gen_io_end(); | |
3160 | 3177 | /* Force an update if the per-tb cpu state has changed. */ |
3161 | 3178 | if (dc->is_jmp == DISAS_NEXT |
3162 | 3179 | && (dc->cpustate_changed || !dc->flagx_known |
... | ... | @@ -3194,6 +3211,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
3194 | 3211 | break; |
3195 | 3212 | } |
3196 | 3213 | } |
3214 | + gen_icount_end(tb, num_insns); | |
3197 | 3215 | *gen_opc_ptr = INDEX_op_end; |
3198 | 3216 | if (search_pc) { |
3199 | 3217 | j = gen_opc_ptr - gen_opc_buf; |
... | ... | @@ -3202,6 +3220,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
3202 | 3220 | gen_opc_instr_start[lj++] = 0; |
3203 | 3221 | } else { |
3204 | 3222 | tb->size = dc->pc - pc_start; |
3223 | + tb->icount = num_insns; | |
3205 | 3224 | } |
3206 | 3225 | |
3207 | 3226 | #ifdef DEBUG_DISAS | ... | ... |
target-i386/cpu.h
target-i386/translate.c
... | ... | @@ -65,6 +65,8 @@ static TCGv cpu_T[2], cpu_T3; |
65 | 65 | static TCGv cpu_tmp0, cpu_tmp1_i64, cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp4, cpu_ptr0, cpu_ptr1; |
66 | 66 | static TCGv cpu_tmp5, cpu_tmp6; |
67 | 67 | |
68 | +#include "gen-icount.h" | |
69 | + | |
68 | 70 | #ifdef TARGET_X86_64 |
69 | 71 | static int x86_64_hregs; |
70 | 72 | #endif |
... | ... | @@ -1203,6 +1205,8 @@ static inline void gen_cmps(DisasContext *s, int ot) |
1203 | 1205 | |
1204 | 1206 | static inline void gen_ins(DisasContext *s, int ot) |
1205 | 1207 | { |
1208 | + if (use_icount) | |
1209 | + gen_io_start(); | |
1206 | 1210 | gen_string_movl_A0_EDI(s); |
1207 | 1211 | /* Note: we must do this dummy write first to be restartable in |
1208 | 1212 | case of page fault. */ |
... | ... | @@ -1215,10 +1219,14 @@ static inline void gen_ins(DisasContext *s, int ot) |
1215 | 1219 | gen_op_st_T0_A0(ot + s->mem_index); |
1216 | 1220 | gen_op_movl_T0_Dshift(ot); |
1217 | 1221 | gen_op_add_reg_T0(s->aflag, R_EDI); |
1222 | + if (use_icount) | |
1223 | + gen_io_end(); | |
1218 | 1224 | } |
1219 | 1225 | |
1220 | 1226 | static inline void gen_outs(DisasContext *s, int ot) |
1221 | 1227 | { |
1228 | + if (use_icount) | |
1229 | + gen_io_start(); | |
1222 | 1230 | gen_string_movl_A0_ESI(s); |
1223 | 1231 | gen_op_ld_T0_A0(ot + s->mem_index); |
1224 | 1232 | |
... | ... | @@ -1230,6 +1238,8 @@ static inline void gen_outs(DisasContext *s, int ot) |
1230 | 1238 | |
1231 | 1239 | gen_op_movl_T0_Dshift(ot); |
1232 | 1240 | gen_op_add_reg_T0(s->aflag, R_ESI); |
1241 | + if (use_icount) | |
1242 | + gen_io_end(); | |
1233 | 1243 | } |
1234 | 1244 | |
1235 | 1245 | /* same method as Valgrind : we generate jumps to current or next |
... | ... | @@ -5570,6 +5580,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5570 | 5580 | gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); |
5571 | 5581 | } else { |
5572 | 5582 | gen_ins(s, ot); |
5583 | + if (use_icount) { | |
5584 | + gen_jmp(s, s->pc - s->cs_base); | |
5585 | + } | |
5573 | 5586 | } |
5574 | 5587 | break; |
5575 | 5588 | case 0x6e: /* outsS */ |
... | ... | @@ -5586,6 +5599,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5586 | 5599 | gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); |
5587 | 5600 | } else { |
5588 | 5601 | gen_outs(s, ot); |
5602 | + if (use_icount) { | |
5603 | + gen_jmp(s, s->pc - s->cs_base); | |
5604 | + } | |
5589 | 5605 | } |
5590 | 5606 | break; |
5591 | 5607 | |
... | ... | @@ -5602,9 +5618,15 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5602 | 5618 | gen_op_movl_T0_im(val); |
5603 | 5619 | gen_check_io(s, ot, pc_start - s->cs_base, |
5604 | 5620 | SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); |
5621 | + if (use_icount) | |
5622 | + gen_io_start(); | |
5605 | 5623 | tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); |
5606 | 5624 | tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[1], cpu_tmp2_i32); |
5607 | 5625 | gen_op_mov_reg_T1(ot, R_EAX); |
5626 | + if (use_icount) { | |
5627 | + gen_io_end(); | |
5628 | + gen_jmp(s, s->pc - s->cs_base); | |
5629 | + } | |
5608 | 5630 | break; |
5609 | 5631 | case 0xe6: |
5610 | 5632 | case 0xe7: |
... | ... | @@ -5618,10 +5640,16 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5618 | 5640 | svm_is_rep(prefixes)); |
5619 | 5641 | gen_op_mov_TN_reg(ot, 1, R_EAX); |
5620 | 5642 | |
5643 | + if (use_icount) | |
5644 | + gen_io_start(); | |
5621 | 5645 | tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); |
5622 | 5646 | tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); |
5623 | 5647 | tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]); |
5624 | 5648 | tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32); |
5649 | + if (use_icount) { | |
5650 | + gen_io_end(); | |
5651 | + gen_jmp(s, s->pc - s->cs_base); | |
5652 | + } | |
5625 | 5653 | break; |
5626 | 5654 | case 0xec: |
5627 | 5655 | case 0xed: |
... | ... | @@ -5633,9 +5661,15 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5633 | 5661 | gen_op_andl_T0_ffff(); |
5634 | 5662 | gen_check_io(s, ot, pc_start - s->cs_base, |
5635 | 5663 | SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); |
5664 | + if (use_icount) | |
5665 | + gen_io_start(); | |
5636 | 5666 | tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); |
5637 | 5667 | tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[1], cpu_tmp2_i32); |
5638 | 5668 | gen_op_mov_reg_T1(ot, R_EAX); |
5669 | + if (use_icount) { | |
5670 | + gen_io_end(); | |
5671 | + gen_jmp(s, s->pc - s->cs_base); | |
5672 | + } | |
5639 | 5673 | break; |
5640 | 5674 | case 0xee: |
5641 | 5675 | case 0xef: |
... | ... | @@ -5649,10 +5683,16 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
5649 | 5683 | svm_is_rep(prefixes)); |
5650 | 5684 | gen_op_mov_TN_reg(ot, 1, R_EAX); |
5651 | 5685 | |
5686 | + if (use_icount) | |
5687 | + gen_io_start(); | |
5652 | 5688 | tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); |
5653 | 5689 | tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); |
5654 | 5690 | tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]); |
5655 | 5691 | tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32); |
5692 | + if (use_icount) { | |
5693 | + gen_io_end(); | |
5694 | + gen_jmp(s, s->pc - s->cs_base); | |
5695 | + } | |
5656 | 5696 | break; |
5657 | 5697 | |
5658 | 5698 | /************************/ |
... | ... | @@ -7109,6 +7149,8 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
7109 | 7149 | uint64_t flags; |
7110 | 7150 | target_ulong pc_start; |
7111 | 7151 | target_ulong cs_base; |
7152 | + int num_insns; | |
7153 | + int max_insns; | |
7112 | 7154 | |
7113 | 7155 | /* generate intermediate code */ |
7114 | 7156 | pc_start = tb->pc; |
... | ... | @@ -7179,7 +7221,12 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
7179 | 7221 | dc->is_jmp = DISAS_NEXT; |
7180 | 7222 | pc_ptr = pc_start; |
7181 | 7223 | lj = -1; |
7224 | + num_insns = 0; | |
7225 | + max_insns = tb->cflags & CF_COUNT_MASK; | |
7226 | + if (max_insns == 0) | |
7227 | + max_insns = CF_COUNT_MASK; | |
7182 | 7228 | |
7229 | + gen_icount_start(); | |
7183 | 7230 | for(;;) { |
7184 | 7231 | if (env->nb_breakpoints > 0) { |
7185 | 7232 | for(j = 0; j < env->nb_breakpoints; j++) { |
... | ... | @@ -7199,8 +7246,13 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
7199 | 7246 | gen_opc_pc[lj] = pc_ptr; |
7200 | 7247 | gen_opc_cc_op[lj] = dc->cc_op; |
7201 | 7248 | gen_opc_instr_start[lj] = 1; |
7249 | + gen_opc_icount[lj] = num_insns; | |
7202 | 7250 | } |
7251 | + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) | |
7252 | + gen_io_start(); | |
7253 | + | |
7203 | 7254 | pc_ptr = disas_insn(dc, pc_ptr); |
7255 | + num_insns++; | |
7204 | 7256 | /* stop translation if indicated */ |
7205 | 7257 | if (dc->is_jmp) |
7206 | 7258 | break; |
... | ... | @@ -7210,20 +7262,23 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
7210 | 7262 | the flag and abort the translation to give the irqs a |
7211 | 7263 | change to be happen */ |
7212 | 7264 | if (dc->tf || dc->singlestep_enabled || |
7213 | - (flags & HF_INHIBIT_IRQ_MASK) || | |
7214 | - (cflags & CF_SINGLE_INSN)) { | |
7265 | + (flags & HF_INHIBIT_IRQ_MASK)) { | |
7215 | 7266 | gen_jmp_im(pc_ptr - dc->cs_base); |
7216 | 7267 | gen_eob(dc); |
7217 | 7268 | break; |
7218 | 7269 | } |
7219 | 7270 | /* if too long translation, stop generation too */ |
7220 | 7271 | if (gen_opc_ptr >= gen_opc_end || |
7221 | - (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32)) { | |
7272 | + (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) || | |
7273 | + num_insns >= max_insns) { | |
7222 | 7274 | gen_jmp_im(pc_ptr - dc->cs_base); |
7223 | 7275 | gen_eob(dc); |
7224 | 7276 | break; |
7225 | 7277 | } |
7226 | 7278 | } |
7279 | + if (tb->cflags & CF_LAST_IO) | |
7280 | + gen_io_end(); | |
7281 | + gen_icount_end(tb, num_insns); | |
7227 | 7282 | *gen_opc_ptr = INDEX_op_end; |
7228 | 7283 | /* we don't forget to fill the last values */ |
7229 | 7284 | if (search_pc) { |
... | ... | @@ -7252,8 +7307,10 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
7252 | 7307 | } |
7253 | 7308 | #endif |
7254 | 7309 | |
7255 | - if (!search_pc) | |
7310 | + if (!search_pc) { | |
7256 | 7311 | tb->size = pc_ptr - pc_start; |
7312 | + tb->icount = num_insns; | |
7313 | + } | |
7257 | 7314 | return 0; |
7258 | 7315 | } |
7259 | 7316 | ... | ... |
target-m68k/cpu.h
target-m68k/translate.c
... | ... | @@ -63,6 +63,8 @@ static TCGv NULL_QREG; |
63 | 63 | /* Used to distinguish stores from bad addressing modes. */ |
64 | 64 | static TCGv store_dummy; |
65 | 65 | |
66 | +#include "gen-icount.h" | |
67 | + | |
66 | 68 | void m68k_tcg_init(void) |
67 | 69 | { |
68 | 70 | char *p; |
... | ... | @@ -2919,6 +2921,8 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
2919 | 2921 | target_ulong pc_start; |
2920 | 2922 | int pc_offset; |
2921 | 2923 | int last_cc_op; |
2924 | + int num_insns; | |
2925 | + int max_insns; | |
2922 | 2926 | |
2923 | 2927 | /* generate intermediate code */ |
2924 | 2928 | pc_start = tb->pc; |
... | ... | @@ -2937,6 +2941,12 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
2937 | 2941 | dc->is_mem = 0; |
2938 | 2942 | dc->mactmp = NULL_QREG; |
2939 | 2943 | lj = -1; |
2944 | + num_insns = 0; | |
2945 | + max_insns = tb->cflags & CF_COUNT_MASK; | |
2946 | + if (max_insns == 0) | |
2947 | + max_insns = CF_COUNT_MASK; | |
2948 | + | |
2949 | + gen_icount_start(); | |
2940 | 2950 | do { |
2941 | 2951 | pc_offset = dc->pc - pc_start; |
2942 | 2952 | gen_throws_exception = NULL; |
... | ... | @@ -2960,10 +2970,14 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
2960 | 2970 | } |
2961 | 2971 | gen_opc_pc[lj] = dc->pc; |
2962 | 2972 | gen_opc_instr_start[lj] = 1; |
2973 | + gen_opc_icount[lj] = num_insns; | |
2963 | 2974 | } |
2975 | + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) | |
2976 | + gen_io_start(); | |
2964 | 2977 | last_cc_op = dc->cc_op; |
2965 | 2978 | dc->insn_pc = dc->pc; |
2966 | 2979 | disas_m68k_insn(env, dc); |
2980 | + num_insns++; | |
2967 | 2981 | |
2968 | 2982 | /* Terminate the TB on memory ops if watchpoints are present. */ |
2969 | 2983 | /* FIXME: This should be replacd by the deterministic execution |
... | ... | @@ -2972,8 +2986,11 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
2972 | 2986 | break; |
2973 | 2987 | } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && |
2974 | 2988 | !env->singlestep_enabled && |
2975 | - (pc_offset) < (TARGET_PAGE_SIZE - 32)); | |
2989 | + (pc_offset) < (TARGET_PAGE_SIZE - 32) && | |
2990 | + num_insns < max_insns); | |
2976 | 2991 | |
2992 | + if (tb->cflags & CF_LAST_IO) | |
2993 | + gen_io_end(); | |
2977 | 2994 | if (__builtin_expect(env->singlestep_enabled, 0)) { |
2978 | 2995 | /* Make sure the pc is updated, and raise a debug exception. */ |
2979 | 2996 | if (!dc->is_jmp) { |
... | ... | @@ -2999,6 +3016,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
2999 | 3016 | break; |
3000 | 3017 | } |
3001 | 3018 | } |
3019 | + gen_icount_end(tb, num_insns); | |
3002 | 3020 | *gen_opc_ptr = INDEX_op_end; |
3003 | 3021 | |
3004 | 3022 | #ifdef DEBUG_DISAS |
... | ... | @@ -3016,6 +3034,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, |
3016 | 3034 | gen_opc_instr_start[lj++] = 0; |
3017 | 3035 | } else { |
3018 | 3036 | tb->size = dc->pc - pc_start; |
3037 | + tb->icount = num_insns; | |
3019 | 3038 | } |
3020 | 3039 | |
3021 | 3040 | //optimize_flags(); | ... | ... |
target-mips/cpu.h
... | ... | @@ -572,4 +572,10 @@ CPUMIPSState *cpu_mips_init(const char *cpu_model); |
572 | 572 | uint32_t cpu_mips_get_clock (void); |
573 | 573 | int cpu_mips_signal_handler(int host_signum, void *pinfo, void *puc); |
574 | 574 | |
575 | +#define CPU_PC_FROM_TB(env, tb) do { \ | |
576 | + env->active_tc.PC = tb->pc; \ | |
577 | + env->hflags &= ~MIPS_HFLAG_BMASK; \ | |
578 | + env->hflags |= tb->flags & MIPS_HFLAG_BMASK; \ | |
579 | + } while (0) | |
580 | + | |
575 | 581 | #endif /* !defined (__MIPS_CPU_H__) */ | ... | ... |
target-mips/translate.c
... | ... | @@ -428,6 +428,8 @@ static TCGv cpu_env, current_fpu; |
428 | 428 | /* FPU TNs, global for now. */ |
429 | 429 | static TCGv fpu32_T[3], fpu64_T[3], fpu32h_T[3]; |
430 | 430 | |
431 | +#include "gen-icount.h" | |
432 | + | |
431 | 433 | static inline void tcg_gen_helper_0_i(void *func, TCGv arg) |
432 | 434 | { |
433 | 435 | TCGv tmp = tcg_const_i32(arg); |
... | ... | @@ -3061,7 +3063,14 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int se |
3061 | 3063 | case 9: |
3062 | 3064 | switch (sel) { |
3063 | 3065 | case 0: |
3066 | + /* Mark as an IO operation because we read the time. */ | |
3067 | + if (use_icount) | |
3068 | + gen_io_start(); | |
3064 | 3069 | tcg_gen_helper_1_0(do_mfc0_count, t0); |
3070 | + if (use_icount) { | |
3071 | + gen_io_end(); | |
3072 | + ctx->bstate = BS_STOP; | |
3073 | + } | |
3065 | 3074 | rn = "Count"; |
3066 | 3075 | break; |
3067 | 3076 | /* 6,7 are implementation dependent */ |
... | ... | @@ -3422,6 +3431,9 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int se |
3422 | 3431 | if (sel != 0) |
3423 | 3432 | check_insn(env, ctx, ISA_MIPS32); |
3424 | 3433 | |
3434 | + if (use_icount) | |
3435 | + gen_io_start(); | |
3436 | + | |
3425 | 3437 | switch (reg) { |
3426 | 3438 | case 0: |
3427 | 3439 | switch (sel) { |
... | ... | @@ -4004,6 +4016,11 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int se |
4004 | 4016 | rn, reg, sel); |
4005 | 4017 | } |
4006 | 4018 | #endif |
4019 | + /* For simplicitly assume that all writes can cause interrupts. */ | |
4020 | + if (use_icount) { | |
4021 | + gen_io_end(); | |
4022 | + ctx->bstate = BS_STOP; | |
4023 | + } | |
4007 | 4024 | return; |
4008 | 4025 | |
4009 | 4026 | die: |
... | ... | @@ -4238,7 +4255,14 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int s |
4238 | 4255 | case 9: |
4239 | 4256 | switch (sel) { |
4240 | 4257 | case 0: |
4258 | + /* Mark as an IO operation because we read the time. */ | |
4259 | + if (use_icount) | |
4260 | + gen_io_start(); | |
4241 | 4261 | tcg_gen_helper_1_0(do_mfc0_count, t0); |
4262 | + if (use_icount) { | |
4263 | + gen_io_end(); | |
4264 | + ctx->bstate = BS_STOP; | |
4265 | + } | |
4242 | 4266 | rn = "Count"; |
4243 | 4267 | break; |
4244 | 4268 | /* 6,7 are implementation dependent */ |
... | ... | @@ -4591,6 +4615,9 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int s |
4591 | 4615 | if (sel != 0) |
4592 | 4616 | check_insn(env, ctx, ISA_MIPS64); |
4593 | 4617 | |
4618 | + if (use_icount) | |
4619 | + gen_io_start(); | |
4620 | + | |
4594 | 4621 | switch (reg) { |
4595 | 4622 | case 0: |
4596 | 4623 | switch (sel) { |
... | ... | @@ -5161,6 +5188,11 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int s |
5161 | 5188 | } |
5162 | 5189 | #endif |
5163 | 5190 | tcg_temp_free(t0); |
5191 | + /* For simplicitly assume that all writes can cause interrupts. */ | |
5192 | + if (use_icount) { | |
5193 | + gen_io_end(); | |
5194 | + ctx->bstate = BS_STOP; | |
5195 | + } | |
5164 | 5196 | return; |
5165 | 5197 | |
5166 | 5198 | die: |
... | ... | @@ -7760,6 +7792,7 @@ static void decode_opc (CPUState *env, DisasContext *ctx) |
7760 | 7792 | ctx->hflags &= ~MIPS_HFLAG_BMASK; |
7761 | 7793 | ctx->bstate = BS_BRANCH; |
7762 | 7794 | save_cpu_state(ctx, 0); |
7795 | + /* FIXME: Need to clear can_do_io. */ | |
7763 | 7796 | switch (hflags) { |
7764 | 7797 | case MIPS_HFLAG_B: |
7765 | 7798 | /* unconditional branch */ |
... | ... | @@ -7807,6 +7840,8 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb, |
7807 | 7840 | target_ulong pc_start; |
7808 | 7841 | uint16_t *gen_opc_end; |
7809 | 7842 | int j, lj = -1; |
7843 | + int num_insns; | |
7844 | + int max_insns; | |
7810 | 7845 | |
7811 | 7846 | if (search_pc && loglevel) |
7812 | 7847 | fprintf (logfile, "search pc %d\n", search_pc); |
... | ... | @@ -7826,6 +7861,11 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb, |
7826 | 7861 | #else |
7827 | 7862 | ctx.mem_idx = ctx.hflags & MIPS_HFLAG_KSU; |
7828 | 7863 | #endif |
7864 | + num_insns = 0; | |
7865 | + num_insns = 0; | |
7866 | + max_insns = tb->cflags & CF_COUNT_MASK; | |
7867 | + if (max_insns == 0) | |
7868 | + max_insns = CF_COUNT_MASK; | |
7829 | 7869 | #ifdef DEBUG_DISAS |
7830 | 7870 | if (loglevel & CPU_LOG_TB_CPU) { |
7831 | 7871 | fprintf(logfile, "------------------------------------------------\n"); |
... | ... | @@ -7838,6 +7878,7 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb, |
7838 | 7878 | fprintf(logfile, "\ntb %p idx %d hflags %04x\n", |
7839 | 7879 | tb, ctx.mem_idx, ctx.hflags); |
7840 | 7880 | #endif |
7881 | + gen_icount_start(); | |
7841 | 7882 | while (ctx.bstate == BS_NONE) { |
7842 | 7883 | if (env->nb_breakpoints > 0) { |
7843 | 7884 | for(j = 0; j < env->nb_breakpoints; j++) { |
... | ... | @@ -7863,10 +7904,14 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb, |
7863 | 7904 | gen_opc_pc[lj] = ctx.pc; |
7864 | 7905 | gen_opc_hflags[lj] = ctx.hflags & MIPS_HFLAG_BMASK; |
7865 | 7906 | gen_opc_instr_start[lj] = 1; |
7907 | + gen_opc_icount[lj] = num_insns; | |
7866 | 7908 | } |
7909 | + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) | |
7910 | + gen_io_start(); | |
7867 | 7911 | ctx.opcode = ldl_code(ctx.pc); |
7868 | 7912 | decode_opc(env, &ctx); |
7869 | 7913 | ctx.pc += 4; |
7914 | + num_insns++; | |
7870 | 7915 | |
7871 | 7916 | if (env->singlestep_enabled) |
7872 | 7917 | break; |
... | ... | @@ -7880,10 +7925,14 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb, |
7880 | 7925 | if (gen_opc_ptr >= gen_opc_end) |
7881 | 7926 | break; |
7882 | 7927 | |
7928 | + if (num_insns >= max_insns) | |
7929 | + break; | |
7883 | 7930 | #if defined (MIPS_SINGLE_STEP) |
7884 | 7931 | break; |
7885 | 7932 | #endif |
7886 | 7933 | } |
7934 | + if (tb->cflags & CF_LAST_IO) | |
7935 | + gen_io_end(); | |
7887 | 7936 | if (env->singlestep_enabled) { |
7888 | 7937 | save_cpu_state(&ctx, ctx.bstate == BS_NONE); |
7889 | 7938 | tcg_gen_helper_0_i(do_raise_exception, EXCP_DEBUG); |
... | ... | @@ -7907,6 +7956,7 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb, |
7907 | 7956 | } |
7908 | 7957 | } |
7909 | 7958 | done_generating: |
7959 | + gen_icount_end(tb, num_insns); | |
7910 | 7960 | *gen_opc_ptr = INDEX_op_end; |
7911 | 7961 | if (search_pc) { |
7912 | 7962 | j = gen_opc_ptr - gen_opc_buf; |
... | ... | @@ -7915,6 +7965,7 @@ done_generating: |
7915 | 7965 | gen_opc_instr_start[lj++] = 0; |
7916 | 7966 | } else { |
7917 | 7967 | tb->size = ctx.pc - pc_start; |
7968 | + tb->icount = num_insns; | |
7918 | 7969 | } |
7919 | 7970 | #ifdef DEBUG_DISAS |
7920 | 7971 | #if defined MIPS_DEBUG_DISAS | ... | ... |
target-ppc/cpu.h
... | ... | @@ -697,6 +697,7 @@ struct mmu_ctx_t { |
697 | 697 | |
698 | 698 | /*****************************************************************************/ |
699 | 699 | CPUPPCState *cpu_ppc_init (const char *cpu_model); |
700 | +void ppc_translate_init(void); | |
700 | 701 | int cpu_ppc_exec (CPUPPCState *s); |
701 | 702 | void cpu_ppc_close (CPUPPCState *s); |
702 | 703 | /* you can call this signal handler from your SIGBUS and SIGSEGV |
... | ... | @@ -833,6 +834,8 @@ static inline void cpu_clone_regs(CPUState *env, target_ulong newsp) |
833 | 834 | } |
834 | 835 | #endif |
835 | 836 | |
837 | +#define CPU_PC_FROM_TB(env, tb) env->nip = tb->pc | |
838 | + | |
836 | 839 | #include "cpu-all.h" |
837 | 840 | |
838 | 841 | /*****************************************************************************/ | ... | ... |
target-ppc/helper.c
... | ... | @@ -2977,6 +2977,7 @@ CPUPPCState *cpu_ppc_init (const char *cpu_model) |
2977 | 2977 | if (!env) |
2978 | 2978 | return NULL; |
2979 | 2979 | cpu_exec_init(env); |
2980 | + ppc_translate_init(); | |
2980 | 2981 | env->cpu_model_str = cpu_model; |
2981 | 2982 | cpu_ppc_register_internal(env, def); |
2982 | 2983 | cpu_ppc_reset(env); | ... | ... |
target-ppc/translate.c
... | ... | @@ -43,6 +43,19 @@ |
43 | 43 | /*****************************************************************************/ |
44 | 44 | /* Code translation helpers */ |
45 | 45 | |
46 | +static TCGv cpu_env; | |
47 | + | |
48 | +#include "gen-icount.h" | |
49 | + | |
50 | +void ppc_translate_init(void) | |
51 | +{ | |
52 | + int done_init = 0; | |
53 | + if (done_init) | |
54 | + return; | |
55 | + cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env"); | |
56 | + done_init = 1; | |
57 | +} | |
58 | + | |
46 | 59 | #if defined(OPTIMIZE_FPRF_UPDATE) |
47 | 60 | static uint16_t *gen_fprf_buf[OPC_BUF_SIZE]; |
48 | 61 | static uint16_t **gen_fprf_ptr; |
... | ... | @@ -6168,6 +6181,8 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
6168 | 6181 | uint16_t *gen_opc_end; |
6169 | 6182 | int supervisor, little_endian; |
6170 | 6183 | int j, lj = -1; |
6184 | + int num_insns; | |
6185 | + int max_insns; | |
6171 | 6186 | |
6172 | 6187 | pc_start = tb->pc; |
6173 | 6188 | gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; |
... | ... | @@ -6211,6 +6226,12 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
6211 | 6226 | /* Single step trace mode */ |
6212 | 6227 | msr_se = 1; |
6213 | 6228 | #endif |
6229 | + num_insns = 0; | |
6230 | + max_insns = tb->cflags & CF_COUNT_MASK; | |
6231 | + if (max_insns == 0) | |
6232 | + max_insns = CF_COUNT_MASK; | |
6233 | + | |
6234 | + gen_icount_start(); | |
6214 | 6235 | /* Set env in case of segfault during code fetch */ |
6215 | 6236 | while (ctx.exception == POWERPC_EXCP_NONE && gen_opc_ptr < gen_opc_end) { |
6216 | 6237 | if (unlikely(env->nb_breakpoints > 0)) { |
... | ... | @@ -6230,6 +6251,7 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
6230 | 6251 | gen_opc_instr_start[lj++] = 0; |
6231 | 6252 | gen_opc_pc[lj] = ctx.nip; |
6232 | 6253 | gen_opc_instr_start[lj] = 1; |
6254 | + gen_opc_icount[lj] = num_insns; | |
6233 | 6255 | } |
6234 | 6256 | } |
6235 | 6257 | #if defined PPC_DEBUG_DISAS |
... | ... | @@ -6239,6 +6261,8 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
6239 | 6261 | ctx.nip, supervisor, (int)msr_ir); |
6240 | 6262 | } |
6241 | 6263 | #endif |
6264 | + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) | |
6265 | + gen_io_start(); | |
6242 | 6266 | if (unlikely(little_endian)) { |
6243 | 6267 | ctx.opcode = bswap32(ldl_code(ctx.nip)); |
6244 | 6268 | } else { |
... | ... | @@ -6253,6 +6277,7 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
6253 | 6277 | #endif |
6254 | 6278 | ctx.nip += 4; |
6255 | 6279 | table = env->opcodes; |
6280 | + num_insns++; | |
6256 | 6281 | handler = table[opc1(ctx.opcode)]; |
6257 | 6282 | if (is_indirect_opcode(handler)) { |
6258 | 6283 | table = ind_table(handler); |
... | ... | @@ -6306,7 +6331,8 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
6306 | 6331 | ctx.exception != POWERPC_EXCP_BRANCH)) { |
6307 | 6332 | GEN_EXCP(ctxp, POWERPC_EXCP_TRACE, 0); |
6308 | 6333 | } else if (unlikely(((ctx.nip & (TARGET_PAGE_SIZE - 1)) == 0) || |
6309 | - (env->singlestep_enabled))) { | |
6334 | + (env->singlestep_enabled) || | |
6335 | + num_insns >= max_insns)) { | |
6310 | 6336 | /* if we reach a page boundary or are single stepping, stop |
6311 | 6337 | * generation |
6312 | 6338 | */ |
... | ... | @@ -6316,6 +6342,8 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
6316 | 6342 | break; |
6317 | 6343 | #endif |
6318 | 6344 | } |
6345 | + if (tb->cflags & CF_LAST_IO) | |
6346 | + gen_io_end(); | |
6319 | 6347 | if (ctx.exception == POWERPC_EXCP_NONE) { |
6320 | 6348 | gen_goto_tb(&ctx, 0, ctx.nip); |
6321 | 6349 | } else if (ctx.exception != POWERPC_EXCP_BRANCH) { |
... | ... | @@ -6326,6 +6354,7 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
6326 | 6354 | /* Generate the return instruction */ |
6327 | 6355 | tcg_gen_exit_tb(0); |
6328 | 6356 | } |
6357 | + gen_icount_end(tb, num_insns); | |
6329 | 6358 | *gen_opc_ptr = INDEX_op_end; |
6330 | 6359 | if (unlikely(search_pc)) { |
6331 | 6360 | j = gen_opc_ptr - gen_opc_buf; |
... | ... | @@ -6334,6 +6363,7 @@ static always_inline int gen_intermediate_code_internal (CPUState *env, |
6334 | 6363 | gen_opc_instr_start[lj++] = 0; |
6335 | 6364 | } else { |
6336 | 6365 | tb->size = ctx.nip - pc_start; |
6366 | + tb->icount = num_insns; | |
6337 | 6367 | } |
6338 | 6368 | #if defined(DEBUG_DISAS) |
6339 | 6369 | if (loglevel & CPU_LOG_TB_CPU) { | ... | ... |
target-sh4/cpu.h
... | ... | @@ -152,6 +152,11 @@ static inline void cpu_clone_regs(CPUState *env, target_ulong newsp) |
152 | 152 | } |
153 | 153 | #endif |
154 | 154 | |
155 | +#define CPU_PC_FROM_TB(env, tb) do { \ | |
156 | + env->pc = tb->pc; \ | |
157 | + env->flags = tb->flags; \ | |
158 | + } while (0) | |
159 | + | |
155 | 160 | #include "cpu-all.h" |
156 | 161 | |
157 | 162 | /* Memory access type */ | ... | ... |
target-sh4/translate.c
... | ... | @@ -56,6 +56,19 @@ enum { |
56 | 56 | BS_EXCP = 3, /* We reached an exception condition */ |
57 | 57 | }; |
58 | 58 | |
59 | +static TCGv cpu_env; | |
60 | + | |
61 | +#include "gen-icount.h" | |
62 | + | |
63 | +void sh4_translate_init() | |
64 | +{ | |
65 | + static int done_init = 0; | |
66 | + if (done_init) | |
67 | + return; | |
68 | + cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env"); | |
69 | + done_init = 1; | |
70 | +} | |
71 | + | |
59 | 72 | #ifdef CONFIG_USER_ONLY |
60 | 73 | |
61 | 74 | #define GEN_OP_LD(width, reg) \ |
... | ... | @@ -143,6 +156,7 @@ CPUSH4State *cpu_sh4_init(const char *cpu_model) |
143 | 156 | if (!env) |
144 | 157 | return NULL; |
145 | 158 | cpu_exec_init(env); |
159 | + sh4_translate_init(); | |
146 | 160 | cpu_sh4_reset(env); |
147 | 161 | tlb_flush(env, 1); |
148 | 162 | return env; |
... | ... | @@ -1189,6 +1203,8 @@ gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb, |
1189 | 1203 | target_ulong pc_start; |
1190 | 1204 | static uint16_t *gen_opc_end; |
1191 | 1205 | int i, ii; |
1206 | + int num_insns; | |
1207 | + int max_insns; | |
1192 | 1208 | |
1193 | 1209 | pc_start = tb->pc; |
1194 | 1210 | gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; |
... | ... | @@ -1213,6 +1229,11 @@ gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb, |
1213 | 1229 | #endif |
1214 | 1230 | |
1215 | 1231 | ii = -1; |
1232 | + num_insns = 0; | |
1233 | + max_insns = tb->cflags & CF_COUNT_MASK; | |
1234 | + if (max_insns == 0) | |
1235 | + max_insns = CF_COUNT_MASK; | |
1236 | + gen_icount_start(); | |
1216 | 1237 | while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) { |
1217 | 1238 | if (env->nb_breakpoints > 0) { |
1218 | 1239 | for (i = 0; i < env->nb_breakpoints; i++) { |
... | ... | @@ -1235,22 +1256,30 @@ gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb, |
1235 | 1256 | gen_opc_pc[ii] = ctx.pc; |
1236 | 1257 | gen_opc_hflags[ii] = ctx.flags; |
1237 | 1258 | gen_opc_instr_start[ii] = 1; |
1259 | + gen_opc_icount[ii] = num_insns; | |
1238 | 1260 | } |
1261 | + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) | |
1262 | + gen_io_start(); | |
1239 | 1263 | #if 0 |
1240 | 1264 | fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc); |
1241 | 1265 | fflush(stderr); |
1242 | 1266 | #endif |
1243 | 1267 | ctx.opcode = lduw_code(ctx.pc); |
1244 | 1268 | decode_opc(&ctx); |
1269 | + num_insns++; | |
1245 | 1270 | ctx.pc += 2; |
1246 | 1271 | if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) |
1247 | 1272 | break; |
1248 | 1273 | if (env->singlestep_enabled) |
1249 | 1274 | break; |
1275 | + if (num_insns >= max_insns) | |
1276 | + break; | |
1250 | 1277 | #ifdef SH4_SINGLE_STEP |
1251 | 1278 | break; |
1252 | 1279 | #endif |
1253 | 1280 | } |
1281 | + if (tb->cflags & CF_LAST_IO) | |
1282 | + gen_io_end(); | |
1254 | 1283 | if (env->singlestep_enabled) { |
1255 | 1284 | gen_op_debug(); |
1256 | 1285 | } else { |
... | ... | @@ -1274,6 +1303,7 @@ gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb, |
1274 | 1303 | } |
1275 | 1304 | } |
1276 | 1305 | |
1306 | + gen_icount_end(tb, num_insns); | |
1277 | 1307 | *gen_opc_ptr = INDEX_op_end; |
1278 | 1308 | if (search_pc) { |
1279 | 1309 | i = gen_opc_ptr - gen_opc_buf; |
... | ... | @@ -1282,6 +1312,7 @@ gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb, |
1282 | 1312 | gen_opc_instr_start[ii++] = 0; |
1283 | 1313 | } else { |
1284 | 1314 | tb->size = ctx.pc - pc_start; |
1315 | + tb->icount = num_insns; | |
1285 | 1316 | } |
1286 | 1317 | |
1287 | 1318 | #ifdef DEBUG_DISAS | ... | ... |
target-sparc/cpu.h
... | ... | @@ -437,6 +437,11 @@ static inline void cpu_clone_regs(CPUState *env, target_ulong newsp) |
437 | 437 | } |
438 | 438 | #endif |
439 | 439 | |
440 | +#define CPU_PC_FROM_TB(env, tb) do { \ | |
441 | + env->pc = tb->pc; \ | |
442 | + env->npc = tb->cs_base; \ | |
443 | + } while(0) | |
444 | + | |
440 | 445 | #include "cpu-all.h" |
441 | 446 | |
442 | 447 | #endif | ... | ... |
target-sparc/translate.c
... | ... | @@ -48,6 +48,8 @@ static TCGv cpu_xcc; |
48 | 48 | /* local register indexes (only used inside old micro ops) */ |
49 | 49 | static TCGv cpu_tmp0, cpu_tmp32, cpu_tmp64; |
50 | 50 | |
51 | +#include "gen-icount.h" | |
52 | + | |
51 | 53 | typedef struct DisasContext { |
52 | 54 | target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */ |
53 | 55 | target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */ |
... | ... | @@ -4719,6 +4721,8 @@ static inline int gen_intermediate_code_internal(TranslationBlock * tb, |
4719 | 4721 | uint16_t *gen_opc_end; |
4720 | 4722 | DisasContext dc1, *dc = &dc1; |
4721 | 4723 | int j, lj = -1; |
4724 | + int num_insns; | |
4725 | + int max_insns; | |
4722 | 4726 | |
4723 | 4727 | memset(dc, 0, sizeof(DisasContext)); |
4724 | 4728 | dc->tb = tb; |
... | ... | @@ -4747,6 +4751,11 @@ static inline int gen_intermediate_code_internal(TranslationBlock * tb, |
4747 | 4751 | cpu_val = tcg_temp_local_new(TCG_TYPE_TL); |
4748 | 4752 | cpu_addr = tcg_temp_local_new(TCG_TYPE_TL); |
4749 | 4753 | |
4754 | + num_insns = 0; | |
4755 | + max_insns = tb->cflags & CF_COUNT_MASK; | |
4756 | + if (max_insns == 0) | |
4757 | + max_insns = CF_COUNT_MASK; | |
4758 | + gen_icount_start(); | |
4750 | 4759 | do { |
4751 | 4760 | if (env->nb_breakpoints > 0) { |
4752 | 4761 | for(j = 0; j < env->nb_breakpoints; j++) { |
... | ... | @@ -4771,10 +4780,14 @@ static inline int gen_intermediate_code_internal(TranslationBlock * tb, |
4771 | 4780 | gen_opc_pc[lj] = dc->pc; |
4772 | 4781 | gen_opc_npc[lj] = dc->npc; |
4773 | 4782 | gen_opc_instr_start[lj] = 1; |
4783 | + gen_opc_icount[lj] = num_insns; | |
4774 | 4784 | } |
4775 | 4785 | } |
4786 | + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) | |
4787 | + gen_io_start(); | |
4776 | 4788 | last_pc = dc->pc; |
4777 | 4789 | disas_sparc_insn(dc); |
4790 | + num_insns++; | |
4778 | 4791 | |
4779 | 4792 | if (dc->is_br) |
4780 | 4793 | break; |
... | ... | @@ -4793,7 +4806,8 @@ static inline int gen_intermediate_code_internal(TranslationBlock * tb, |
4793 | 4806 | break; |
4794 | 4807 | } |
4795 | 4808 | } while ((gen_opc_ptr < gen_opc_end) && |
4796 | - (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32)); | |
4809 | + (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) && | |
4810 | + num_insns < max_insns); | |
4797 | 4811 | |
4798 | 4812 | exit_gen_loop: |
4799 | 4813 | tcg_temp_free(cpu_addr); |
... | ... | @@ -4802,6 +4816,8 @@ static inline int gen_intermediate_code_internal(TranslationBlock * tb, |
4802 | 4816 | tcg_temp_free(cpu_tmp64); |
4803 | 4817 | tcg_temp_free(cpu_tmp32); |
4804 | 4818 | tcg_temp_free(cpu_tmp0); |
4819 | + if (tb->cflags & CF_LAST_IO) | |
4820 | + gen_io_end(); | |
4805 | 4821 | if (!dc->is_br) { |
4806 | 4822 | if (dc->pc != DYNAMIC_PC && |
4807 | 4823 | (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) { |
... | ... | @@ -4814,6 +4830,7 @@ static inline int gen_intermediate_code_internal(TranslationBlock * tb, |
4814 | 4830 | tcg_gen_exit_tb(0); |
4815 | 4831 | } |
4816 | 4832 | } |
4833 | + gen_icount_end(tb, num_insns); | |
4817 | 4834 | *gen_opc_ptr = INDEX_op_end; |
4818 | 4835 | if (spc) { |
4819 | 4836 | j = gen_opc_ptr - gen_opc_buf; |
... | ... | @@ -4829,6 +4846,7 @@ static inline int gen_intermediate_code_internal(TranslationBlock * tb, |
4829 | 4846 | gen_opc_jump_pc[1] = dc->jump_pc[1]; |
4830 | 4847 | } else { |
4831 | 4848 | tb->size = last_pc + 4 - pc_start; |
4849 | + tb->icount = num_insns; | |
4832 | 4850 | } |
4833 | 4851 | #ifdef DEBUG_DISAS |
4834 | 4852 | if (loglevel & CPU_LOG_TB_IN_ASM) { | ... | ... |
translate-all.c
... | ... | @@ -38,6 +38,7 @@ uint16_t gen_opc_buf[OPC_BUF_SIZE]; |
38 | 38 | TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE]; |
39 | 39 | |
40 | 40 | target_ulong gen_opc_pc[OPC_BUF_SIZE]; |
41 | +uint16_t gen_opc_icount[OPC_BUF_SIZE]; | |
41 | 42 | uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; |
42 | 43 | #if defined(TARGET_I386) |
43 | 44 | uint8_t gen_opc_cc_op[OPC_BUF_SIZE]; |
... | ... | @@ -158,6 +159,13 @@ int cpu_restore_state(TranslationBlock *tb, |
158 | 159 | if (gen_intermediate_code_pc(env, tb) < 0) |
159 | 160 | return -1; |
160 | 161 | |
162 | + if (use_icount) { | |
163 | + /* Reset the cycle counter to the start of the block. */ | |
164 | + env->icount_decr.u16.low += tb->icount; | |
165 | + /* Clear the IO flag. */ | |
166 | + env->can_do_io = 0; | |
167 | + } | |
168 | + | |
161 | 169 | /* find opc index corresponding to search_pc */ |
162 | 170 | tc_ptr = (unsigned long)tb->tc_ptr; |
163 | 171 | if (searched_pc < tc_ptr) |
... | ... | @@ -177,6 +185,7 @@ int cpu_restore_state(TranslationBlock *tb, |
177 | 185 | /* now find start of instruction before */ |
178 | 186 | while (gen_opc_instr_start[j] == 0) |
179 | 187 | j--; |
188 | + env->icount_decr.u16.low -= gen_opc_icount[j]; | |
180 | 189 | |
181 | 190 | gen_pc_load(env, tb, searched_pc, j, puc); |
182 | 191 | ... | ... |
vl.c
... | ... | @@ -239,6 +239,14 @@ struct drive_opt { |
239 | 239 | static CPUState *cur_cpu; |
240 | 240 | static CPUState *next_cpu; |
241 | 241 | static int event_pending = 1; |
242 | +/* Conversion factor from emulated instrctions to virtual clock ticks. */ | |
243 | +static int icount_time_shift; | |
244 | +/* Arbitrarily pick 1MIPS as the minimum alowable speed. */ | |
245 | +#define MAX_ICOUNT_SHIFT 10 | |
246 | +/* Compensate for varying guest execution speed. */ | |
247 | +static int64_t qemu_icount_bias; | |
248 | +QEMUTimer *icount_rt_timer; | |
249 | +QEMUTimer *icount_vm_timer; | |
242 | 250 | |
243 | 251 | #define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR) |
244 | 252 | |
... | ... | @@ -733,9 +741,22 @@ static int64_t get_clock(void) |
733 | 741 | return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000); |
734 | 742 | } |
735 | 743 | } |
736 | - | |
737 | 744 | #endif |
738 | 745 | |
746 | +/* Return the virtual CPU time, based on the instruction counter. */ | |
747 | +static int64_t cpu_get_icount(void) | |
748 | +{ | |
749 | + int64_t icount; | |
750 | + CPUState *env = cpu_single_env;; | |
751 | + icount = qemu_icount; | |
752 | + if (env) { | |
753 | + if (!can_do_io(env)) | |
754 | + fprintf(stderr, "Bad clock read\n"); | |
755 | + icount -= (env->icount_decr.u16.low + env->icount_extra); | |
756 | + } | |
757 | + return qemu_icount_bias + (icount << icount_time_shift); | |
758 | +} | |
759 | + | |
739 | 760 | /***********************************************************/ |
740 | 761 | /* guest cycle counter */ |
741 | 762 | |
... | ... | @@ -747,6 +768,9 @@ static int cpu_ticks_enabled; |
747 | 768 | /* return the host CPU cycle counter and handle stop/restart */ |
748 | 769 | int64_t cpu_get_ticks(void) |
749 | 770 | { |
771 | + if (use_icount) { | |
772 | + return cpu_get_icount(); | |
773 | + } | |
750 | 774 | if (!cpu_ticks_enabled) { |
751 | 775 | return cpu_ticks_offset; |
752 | 776 | } else { |
... | ... | @@ -878,6 +902,71 @@ static void rtc_stop_timer(struct qemu_alarm_timer *t); |
878 | 902 | |
879 | 903 | #endif /* _WIN32 */ |
880 | 904 | |
905 | +/* Correlation between real and virtual time is always going to be | |
906 | + farly approximate, so ignore small variation. | |
907 | + When the guest is idle real and virtual time will be aligned in | |
908 | + the IO wait loop. */ | |
909 | +#define ICOUNT_WOBBLE (QEMU_TIMER_BASE / 10) | |
910 | + | |
911 | +static void icount_adjust(void) | |
912 | +{ | |
913 | + int64_t cur_time; | |
914 | + int64_t cur_icount; | |
915 | + int64_t delta; | |
916 | + static int64_t last_delta; | |
917 | + /* If the VM is not running, then do nothing. */ | |
918 | + if (!vm_running) | |
919 | + return; | |
920 | + | |
921 | + cur_time = cpu_get_clock(); | |
922 | + cur_icount = qemu_get_clock(vm_clock); | |
923 | + delta = cur_icount - cur_time; | |
924 | + /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */ | |
925 | + if (delta > 0 | |
926 | + && last_delta + ICOUNT_WOBBLE < delta * 2 | |
927 | + && icount_time_shift > 0) { | |
928 | + /* The guest is getting too far ahead. Slow time down. */ | |
929 | + icount_time_shift--; | |
930 | + } | |
931 | + if (delta < 0 | |
932 | + && last_delta - ICOUNT_WOBBLE > delta * 2 | |
933 | + && icount_time_shift < MAX_ICOUNT_SHIFT) { | |
934 | + /* The guest is getting too far behind. Speed time up. */ | |
935 | + icount_time_shift++; | |
936 | + } | |
937 | + last_delta = delta; | |
938 | + qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift); | |
939 | +} | |
940 | + | |
941 | +static void icount_adjust_rt(void * opaque) | |
942 | +{ | |
943 | + qemu_mod_timer(icount_rt_timer, | |
944 | + qemu_get_clock(rt_clock) + 1000); | |
945 | + icount_adjust(); | |
946 | +} | |
947 | + | |
948 | +static void icount_adjust_vm(void * opaque) | |
949 | +{ | |
950 | + qemu_mod_timer(icount_vm_timer, | |
951 | + qemu_get_clock(vm_clock) + QEMU_TIMER_BASE / 10); | |
952 | + icount_adjust(); | |
953 | +} | |
954 | + | |
955 | +static void init_icount_adjust(void) | |
956 | +{ | |
957 | + /* Have both realtime and virtual time triggers for speed adjustment. | |
958 | + The realtime trigger catches emulated time passing too slowly, | |
959 | + the virtual time trigger catches emulated time passing too fast. | |
960 | + Realtime triggers occur even when idle, so use them less frequently | |
961 | + than VM triggers. */ | |
962 | + icount_rt_timer = qemu_new_timer(rt_clock, icount_adjust_rt, NULL); | |
963 | + qemu_mod_timer(icount_rt_timer, | |
964 | + qemu_get_clock(rt_clock) + 1000); | |
965 | + icount_vm_timer = qemu_new_timer(vm_clock, icount_adjust_vm, NULL); | |
966 | + qemu_mod_timer(icount_vm_timer, | |
967 | + qemu_get_clock(vm_clock) + QEMU_TIMER_BASE / 10); | |
968 | +} | |
969 | + | |
881 | 970 | static struct qemu_alarm_timer alarm_timers[] = { |
882 | 971 | #ifndef _WIN32 |
883 | 972 | #ifdef __linux__ |
... | ... | @@ -914,6 +1003,7 @@ static void configure_alarms(char const *opt) |
914 | 1003 | int count = (sizeof(alarm_timers) / sizeof(*alarm_timers)) - 1; |
915 | 1004 | char *arg; |
916 | 1005 | char *name; |
1006 | + struct qemu_alarm_timer tmp; | |
917 | 1007 | |
918 | 1008 | if (!strcmp(opt, "?")) { |
919 | 1009 | show_available_alarms(); |
... | ... | @@ -925,8 +1015,6 @@ static void configure_alarms(char const *opt) |
925 | 1015 | /* Reorder the array */ |
926 | 1016 | name = strtok(arg, ","); |
927 | 1017 | while (name) { |
928 | - struct qemu_alarm_timer tmp; | |
929 | - | |
930 | 1018 | for (i = 0; i < count && alarm_timers[i].name; i++) { |
931 | 1019 | if (!strcmp(alarm_timers[i].name, name)) |
932 | 1020 | break; |
... | ... | @@ -954,7 +1042,7 @@ next: |
954 | 1042 | free(arg); |
955 | 1043 | |
956 | 1044 | if (cur) { |
957 | - /* Disable remaining timers */ | |
1045 | + /* Disable remaining timers */ | |
958 | 1046 | for (i = cur; i < count; i++) |
959 | 1047 | alarm_timers[i].name = NULL; |
960 | 1048 | } else { |
... | ... | @@ -1039,9 +1127,15 @@ void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time) |
1039 | 1127 | *pt = ts; |
1040 | 1128 | |
1041 | 1129 | /* Rearm if necessary */ |
1042 | - if ((alarm_timer->flags & ALARM_FLAG_EXPIRED) == 0 && | |
1043 | - pt == &active_timers[ts->clock->type]) | |
1044 | - qemu_rearm_alarm_timer(alarm_timer); | |
1130 | + if (pt == &active_timers[ts->clock->type]) { | |
1131 | + if ((alarm_timer->flags & ALARM_FLAG_EXPIRED) == 0) { | |
1132 | + qemu_rearm_alarm_timer(alarm_timer); | |
1133 | + } | |
1134 | + /* Interrupt execution to force deadline recalculation. */ | |
1135 | + if (use_icount && cpu_single_env) { | |
1136 | + cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT); | |
1137 | + } | |
1138 | + } | |
1045 | 1139 | } |
1046 | 1140 | |
1047 | 1141 | int qemu_timer_pending(QEMUTimer *ts) |
... | ... | @@ -1085,7 +1179,11 @@ int64_t qemu_get_clock(QEMUClock *clock) |
1085 | 1179 | return get_clock() / 1000000; |
1086 | 1180 | default: |
1087 | 1181 | case QEMU_TIMER_VIRTUAL: |
1088 | - return cpu_get_clock(); | |
1182 | + if (use_icount) { | |
1183 | + return cpu_get_icount(); | |
1184 | + } else { | |
1185 | + return cpu_get_clock(); | |
1186 | + } | |
1089 | 1187 | } |
1090 | 1188 | } |
1091 | 1189 | |
... | ... | @@ -1184,8 +1282,9 @@ static void host_alarm_handler(int host_signum) |
1184 | 1282 | } |
1185 | 1283 | #endif |
1186 | 1284 | if (alarm_has_dynticks(alarm_timer) || |
1187 | - qemu_timer_expired(active_timers[QEMU_TIMER_VIRTUAL], | |
1188 | - qemu_get_clock(vm_clock)) || | |
1285 | + (!use_icount && | |
1286 | + qemu_timer_expired(active_timers[QEMU_TIMER_VIRTUAL], | |
1287 | + qemu_get_clock(vm_clock))) || | |
1189 | 1288 | qemu_timer_expired(active_timers[QEMU_TIMER_REALTIME], |
1190 | 1289 | qemu_get_clock(rt_clock))) { |
1191 | 1290 | #ifdef _WIN32 |
... | ... | @@ -1209,28 +1308,45 @@ static void host_alarm_handler(int host_signum) |
1209 | 1308 | } |
1210 | 1309 | } |
1211 | 1310 | |
1212 | -static uint64_t qemu_next_deadline(void) | |
1311 | +static int64_t qemu_next_deadline(void) | |
1213 | 1312 | { |
1214 | - int64_t nearest_delta_us = INT64_MAX; | |
1215 | - int64_t vmdelta_us; | |
1216 | - | |
1217 | - if (active_timers[QEMU_TIMER_REALTIME]) | |
1218 | - nearest_delta_us = (active_timers[QEMU_TIMER_REALTIME]->expire_time - | |
1219 | - qemu_get_clock(rt_clock))*1000; | |
1313 | + int64_t delta; | |
1220 | 1314 | |
1221 | 1315 | if (active_timers[QEMU_TIMER_VIRTUAL]) { |
1222 | - /* round up */ | |
1223 | - vmdelta_us = (active_timers[QEMU_TIMER_VIRTUAL]->expire_time - | |
1224 | - qemu_get_clock(vm_clock)+999)/1000; | |
1225 | - if (vmdelta_us < nearest_delta_us) | |
1226 | - nearest_delta_us = vmdelta_us; | |
1316 | + delta = active_timers[QEMU_TIMER_VIRTUAL]->expire_time - | |
1317 | + qemu_get_clock(vm_clock); | |
1318 | + } else { | |
1319 | + /* To avoid problems with overflow limit this to 2^32. */ | |
1320 | + delta = INT32_MAX; | |
1227 | 1321 | } |
1228 | 1322 | |
1229 | - /* Avoid arming the timer to negative, zero, or too low values */ | |
1230 | - if (nearest_delta_us <= MIN_TIMER_REARM_US) | |
1231 | - nearest_delta_us = MIN_TIMER_REARM_US; | |
1323 | + if (delta < 0) | |
1324 | + delta = 0; | |
1232 | 1325 | |
1233 | - return nearest_delta_us; | |
1326 | + return delta; | |
1327 | +} | |
1328 | + | |
1329 | +static uint64_t qemu_next_deadline_dyntick(void) | |
1330 | +{ | |
1331 | + int64_t delta; | |
1332 | + int64_t rtdelta; | |
1333 | + | |
1334 | + if (use_icount) | |
1335 | + delta = INT32_MAX; | |
1336 | + else | |
1337 | + delta = (qemu_next_deadline() + 999) / 1000; | |
1338 | + | |
1339 | + if (active_timers[QEMU_TIMER_REALTIME]) { | |
1340 | + rtdelta = (active_timers[QEMU_TIMER_REALTIME]->expire_time - | |
1341 | + qemu_get_clock(rt_clock))*1000; | |
1342 | + if (rtdelta < delta) | |
1343 | + delta = rtdelta; | |
1344 | + } | |
1345 | + | |
1346 | + if (delta < MIN_TIMER_REARM_US) | |
1347 | + delta = MIN_TIMER_REARM_US; | |
1348 | + | |
1349 | + return delta; | |
1234 | 1350 | } |
1235 | 1351 | |
1236 | 1352 | #ifndef _WIN32 |
... | ... | @@ -1386,7 +1502,7 @@ static void dynticks_rearm_timer(struct qemu_alarm_timer *t) |
1386 | 1502 | !active_timers[QEMU_TIMER_VIRTUAL]) |
1387 | 1503 | return; |
1388 | 1504 | |
1389 | - nearest_delta_us = qemu_next_deadline(); | |
1505 | + nearest_delta_us = qemu_next_deadline_dyntick(); | |
1390 | 1506 | |
1391 | 1507 | /* check whether a timer is already running */ |
1392 | 1508 | if (timer_gettime(host_timer, &timeout)) { |
... | ... | @@ -1513,7 +1629,7 @@ static void win32_rearm_timer(struct qemu_alarm_timer *t) |
1513 | 1629 | !active_timers[QEMU_TIMER_VIRTUAL]) |
1514 | 1630 | return; |
1515 | 1631 | |
1516 | - nearest_delta_us = qemu_next_deadline(); | |
1632 | + nearest_delta_us = qemu_next_deadline_dyntick(); | |
1517 | 1633 | nearest_delta_us /= 1000; |
1518 | 1634 | |
1519 | 1635 | timeKillEvent(data->timerId); |
... | ... | @@ -7068,10 +7184,33 @@ static int main_loop(void) |
7068 | 7184 | #ifdef CONFIG_PROFILER |
7069 | 7185 | ti = profile_getclock(); |
7070 | 7186 | #endif |
7187 | + if (use_icount) { | |
7188 | + int64_t count; | |
7189 | + int decr; | |
7190 | + qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); | |
7191 | + env->icount_decr.u16.low = 0; | |
7192 | + env->icount_extra = 0; | |
7193 | + count = qemu_next_deadline(); | |
7194 | + count = (count + (1 << icount_time_shift) - 1) | |
7195 | + >> icount_time_shift; | |
7196 | + qemu_icount += count; | |
7197 | + decr = (count > 0xffff) ? 0xffff : count; | |
7198 | + count -= decr; | |
7199 | + env->icount_decr.u16.low = decr; | |
7200 | + env->icount_extra = count; | |
7201 | + } | |
7071 | 7202 | ret = cpu_exec(env); |
7072 | 7203 | #ifdef CONFIG_PROFILER |
7073 | 7204 | qemu_time += profile_getclock() - ti; |
7074 | 7205 | #endif |
7206 | + if (use_icount) { | |
7207 | + /* Fold pending instructions back into the | |
7208 | + instruction counter, and clear the interrupt flag. */ | |
7209 | + qemu_icount -= (env->icount_decr.u16.low | |
7210 | + + env->icount_extra); | |
7211 | + env->icount_decr.u32 = 0; | |
7212 | + env->icount_extra = 0; | |
7213 | + } | |
7075 | 7214 | next_cpu = env->next_cpu ?: first_cpu; |
7076 | 7215 | if (event_pending && likely(ret != EXCP_DEBUG)) { |
7077 | 7216 | ret = EXCP_INTERRUPT; |
... | ... | @@ -7115,10 +7254,46 @@ static int main_loop(void) |
7115 | 7254 | } |
7116 | 7255 | /* If all cpus are halted then wait until the next IRQ */ |
7117 | 7256 | /* XXX: use timeout computed from timers */ |
7118 | - if (ret == EXCP_HALTED) | |
7119 | - timeout = 10; | |
7120 | - else | |
7257 | + if (ret == EXCP_HALTED) { | |
7258 | + if (use_icount) { | |
7259 | + int64_t add; | |
7260 | + int64_t delta; | |
7261 | + /* Advance virtual time to the next event. */ | |
7262 | + if (use_icount == 1) { | |
7263 | + /* When not using an adaptive execution frequency | |
7264 | + we tend to get badly out of sync with real time, | |
7265 | + so just delay for a resonable amount of time. */ | |
7266 | + delta = 0; | |
7267 | + } else { | |
7268 | + delta = cpu_get_icount() - cpu_get_clock(); | |
7269 | + } | |
7270 | + if (delta > 0) { | |
7271 | + /* If virtual time is ahead of real time then just | |
7272 | + wait for IO. */ | |
7273 | + timeout = (delta / 1000000) + 1; | |
7274 | + } else { | |
7275 | + /* Wait for either IO to occur or the next | |
7276 | + timer event. */ | |
7277 | + add = qemu_next_deadline(); | |
7278 | + /* We advance the timer before checking for IO. | |
7279 | + Limit the amount we advance so that early IO | |
7280 | + activity won't get the guest too far ahead. */ | |
7281 | + if (add > 10000000) | |
7282 | + add = 10000000; | |
7283 | + delta += add; | |
7284 | + add = (add + (1 << icount_time_shift) - 1) | |
7285 | + >> icount_time_shift; | |
7286 | + qemu_icount += add; | |
7287 | + timeout = delta / 1000000; | |
7288 | + if (timeout < 0) | |
7289 | + timeout = 0; | |
7290 | + } | |
7291 | + } else { | |
7292 | + timeout = 10; | |
7293 | + } | |
7294 | + } else { | |
7121 | 7295 | timeout = 0; |
7296 | + } | |
7122 | 7297 | } else { |
7123 | 7298 | timeout = 10; |
7124 | 7299 | } |
... | ... | @@ -7270,6 +7445,8 @@ static void help(int exitcode) |
7270 | 7445 | "-clock force the use of the given methods for timer alarm.\n" |
7271 | 7446 | " To see what timers are available use -clock ?\n" |
7272 | 7447 | "-startdate select initial date of the clock\n" |
7448 | + "-icount [N|auto]\n" | |
7449 | + " Enable virtual instruction counter with 2^N clock ticks per instructon\n" | |
7273 | 7450 | "\n" |
7274 | 7451 | "During emulation, the following keys are useful:\n" |
7275 | 7452 | "ctrl-alt-f toggle full screen\n" |
... | ... | @@ -7374,6 +7551,7 @@ enum { |
7374 | 7551 | QEMU_OPTION_clock, |
7375 | 7552 | QEMU_OPTION_startdate, |
7376 | 7553 | QEMU_OPTION_tb_size, |
7554 | + QEMU_OPTION_icount, | |
7377 | 7555 | }; |
7378 | 7556 | |
7379 | 7557 | typedef struct QEMUOption { |
... | ... | @@ -7486,6 +7664,7 @@ const QEMUOption qemu_options[] = { |
7486 | 7664 | { "clock", HAS_ARG, QEMU_OPTION_clock }, |
7487 | 7665 | { "startdate", HAS_ARG, QEMU_OPTION_startdate }, |
7488 | 7666 | { "tb-size", HAS_ARG, QEMU_OPTION_tb_size }, |
7667 | + { "icount", HAS_ARG, QEMU_OPTION_icount }, | |
7489 | 7668 | { NULL }, |
7490 | 7669 | }; |
7491 | 7670 | |
... | ... | @@ -8310,6 +8489,14 @@ int main(int argc, char **argv) |
8310 | 8489 | if (tb_size < 0) |
8311 | 8490 | tb_size = 0; |
8312 | 8491 | break; |
8492 | + case QEMU_OPTION_icount: | |
8493 | + use_icount = 1; | |
8494 | + if (strcmp(optarg, "auto") == 0) { | |
8495 | + icount_time_shift = -1; | |
8496 | + } else { | |
8497 | + icount_time_shift = strtol(optarg, NULL, 0); | |
8498 | + } | |
8499 | + break; | |
8313 | 8500 | } |
8314 | 8501 | } |
8315 | 8502 | } |
... | ... | @@ -8395,6 +8582,13 @@ int main(int argc, char **argv) |
8395 | 8582 | init_timers(); |
8396 | 8583 | init_timer_alarm(); |
8397 | 8584 | qemu_aio_init(); |
8585 | + if (use_icount && icount_time_shift < 0) { | |
8586 | + use_icount = 2; | |
8587 | + /* 125MIPS seems a reasonable initial guess at the guest speed. | |
8588 | + It will be corrected fairly quickly anyway. */ | |
8589 | + icount_time_shift = 3; | |
8590 | + init_icount_adjust(); | |
8591 | + } | |
8398 | 8592 | |
8399 | 8593 | #ifdef _WIN32 |
8400 | 8594 | socket_init(); | ... | ... |