Commit 8a40a180d39ec535b16a9456965a59722950cee2

Authored by bellard
1 parent 313adae9

make the TB cache independent of MMU mappings (faster MMU context switches and n…

…eeded for SMP support)


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1632 c046a42c-6fe2-441c-8c8c-71466251a162
Showing 3 changed files with 206 additions and 426 deletions
cpu-exec.c
@@ -73,6 +73,137 @@ void cpu_resume_from_signal(CPUState *env1, void *puc) @@ -73,6 +73,137 @@ void cpu_resume_from_signal(CPUState *env1, void *puc)
73 longjmp(env->jmp_env, 1); 73 longjmp(env->jmp_env, 1);
74 } 74 }
75 75
  76 +
  77 +static TranslationBlock *tb_find_slow(target_ulong pc,
  78 + target_ulong cs_base,
  79 + unsigned int flags)
  80 +{
  81 + TranslationBlock *tb, **ptb1;
  82 + int code_gen_size;
  83 + unsigned int h;
  84 + target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
  85 + uint8_t *tc_ptr;
  86 +
  87 + spin_lock(&tb_lock);
  88 +
  89 + tb_invalidated_flag = 0;
  90 +
  91 + regs_to_env(); /* XXX: do it just before cpu_gen_code() */
  92 +
  93 + /* find translated block using physical mappings */
  94 + phys_pc = get_phys_addr_code(env, pc);
  95 + phys_page1 = phys_pc & TARGET_PAGE_MASK;
  96 + phys_page2 = -1;
  97 + h = tb_phys_hash_func(phys_pc);
  98 + ptb1 = &tb_phys_hash[h];
  99 + for(;;) {
  100 + tb = *ptb1;
  101 + if (!tb)
  102 + goto not_found;
  103 + if (tb->pc == pc &&
  104 + tb->page_addr[0] == phys_page1 &&
  105 + tb->cs_base == cs_base &&
  106 + tb->flags == flags) {
  107 + /* check next page if needed */
  108 + if (tb->page_addr[1] != -1) {
  109 + virt_page2 = (pc & TARGET_PAGE_MASK) +
  110 + TARGET_PAGE_SIZE;
  111 + phys_page2 = get_phys_addr_code(env, virt_page2);
  112 + if (tb->page_addr[1] == phys_page2)
  113 + goto found;
  114 + } else {
  115 + goto found;
  116 + }
  117 + }
  118 + ptb1 = &tb->phys_hash_next;
  119 + }
  120 + not_found:
  121 + /* if no translated code available, then translate it now */
  122 + tb = tb_alloc(pc);
  123 + if (!tb) {
  124 + /* flush must be done */
  125 + tb_flush(env);
  126 + /* cannot fail at this point */
  127 + tb = tb_alloc(pc);
  128 + /* don't forget to invalidate previous TB info */
  129 + T0 = 0;
  130 + }
  131 + tc_ptr = code_gen_ptr;
  132 + tb->tc_ptr = tc_ptr;
  133 + tb->cs_base = cs_base;
  134 + tb->flags = flags;
  135 + cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
  136 + code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
  137 +
  138 + /* check next page if needed */
  139 + virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
  140 + phys_page2 = -1;
  141 + if ((pc & TARGET_PAGE_MASK) != virt_page2) {
  142 + phys_page2 = get_phys_addr_code(env, virt_page2);
  143 + }
  144 + tb_link_phys(tb, phys_pc, phys_page2);
  145 +
  146 + found:
  147 + if (tb_invalidated_flag) {
  148 + /* as some TB could have been invalidated because
  149 + of memory exceptions while generating the code, we
  150 + must recompute the hash index here */
  151 + T0 = 0;
  152 + }
  153 + /* we add the TB in the virtual pc hash table */
  154 + env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
  155 + spin_unlock(&tb_lock);
  156 + return tb;
  157 +}
  158 +
  159 +static inline TranslationBlock *tb_find_fast(void)
  160 +{
  161 + TranslationBlock *tb;
  162 + target_ulong cs_base, pc;
  163 + unsigned int flags;
  164 +
  165 + /* we record a subset of the CPU state. It will
  166 + always be the same before a given translated block
  167 + is executed. */
  168 +#if defined(TARGET_I386)
  169 + flags = env->hflags;
  170 + flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
  171 + cs_base = env->segs[R_CS].base;
  172 + pc = cs_base + env->eip;
  173 +#elif defined(TARGET_ARM)
  174 + flags = env->thumb | (env->vfp.vec_len << 1)
  175 + | (env->vfp.vec_stride << 4);
  176 + cs_base = 0;
  177 + pc = env->regs[15];
  178 +#elif defined(TARGET_SPARC)
  179 +#ifdef TARGET_SPARC64
  180 + flags = (env->pstate << 2) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
  181 +#else
  182 + flags = env->psrs | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1);
  183 +#endif
  184 + cs_base = env->npc;
  185 + pc = env->pc;
  186 +#elif defined(TARGET_PPC)
  187 + flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
  188 + (msr_se << MSR_SE) | (msr_le << MSR_LE);
  189 + cs_base = 0;
  190 + pc = env->nip;
  191 +#elif defined(TARGET_MIPS)
  192 + flags = env->hflags & MIPS_HFLAGS_TMASK;
  193 + cs_base = NULL;
  194 + pc = env->PC;
  195 +#else
  196 +#error unsupported CPU
  197 +#endif
  198 + tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
  199 + if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
  200 + tb->flags != flags, 0)) {
  201 + tb = tb_find_slow(pc, cs_base, flags);
  202 + }
  203 + return tb;
  204 +}
  205 +
  206 +
76 /* main execution loop */ 207 /* main execution loop */
77 208
78 int cpu_exec(CPUState *env1) 209 int cpu_exec(CPUState *env1)
@@ -115,12 +246,10 @@ int cpu_exec(CPUState *env1) @@ -115,12 +246,10 @@ int cpu_exec(CPUState *env1)
115 #ifdef __sparc__ 246 #ifdef __sparc__
116 int saved_i7, tmp_T0; 247 int saved_i7, tmp_T0;
117 #endif 248 #endif
118 - int code_gen_size, ret, interrupt_request; 249 + int ret, interrupt_request;
119 void (*gen_func)(void); 250 void (*gen_func)(void);
120 - TranslationBlock *tb, **ptb;  
121 - target_ulong cs_base, pc; 251 + TranslationBlock *tb;
122 uint8_t *tc_ptr; 252 uint8_t *tc_ptr;
123 - unsigned int flags;  
124 253
125 /* first we save global registers */ 254 /* first we save global registers */
126 saved_env = env; 255 saved_env = env;
@@ -290,19 +419,29 @@ int cpu_exec(CPUState *env1) @@ -290,19 +419,29 @@ int cpu_exec(CPUState *env1)
290 } 419 }
291 #endif 420 #endif
292 if (msr_ee != 0) { 421 if (msr_ee != 0) {
293 - if ((interrupt_request & CPU_INTERRUPT_HARD)) { 422 + if ((interrupt_request & CPU_INTERRUPT_HARD)) {
294 /* Raise it */ 423 /* Raise it */
295 env->exception_index = EXCP_EXTERNAL; 424 env->exception_index = EXCP_EXTERNAL;
296 env->error_code = 0; 425 env->error_code = 0;
297 do_interrupt(env); 426 do_interrupt(env);
298 - env->interrupt_request &= ~CPU_INTERRUPT_HARD;  
299 - } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {  
300 - /* Raise it */  
301 - env->exception_index = EXCP_DECR;  
302 - env->error_code = 0;  
303 - do_interrupt(env); 427 + env->interrupt_request &= ~CPU_INTERRUPT_HARD;
  428 +#ifdef __sparc__
  429 + tmp_T0 = 0;
  430 +#else
  431 + T0 = 0;
  432 +#endif
  433 + } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
  434 + /* Raise it */
  435 + env->exception_index = EXCP_DECR;
  436 + env->error_code = 0;
  437 + do_interrupt(env);
304 env->interrupt_request &= ~CPU_INTERRUPT_TIMER; 438 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
305 - } 439 +#ifdef __sparc__
  440 + tmp_T0 = 0;
  441 +#else
  442 + T0 = 0;
  443 +#endif
  444 + }
306 } 445 }
307 #elif defined(TARGET_MIPS) 446 #elif defined(TARGET_MIPS)
308 if ((interrupt_request & CPU_INTERRUPT_HARD) && 447 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -316,6 +455,11 @@ int cpu_exec(CPUState *env1) @@ -316,6 +455,11 @@ int cpu_exec(CPUState *env1)
316 env->error_code = 0; 455 env->error_code = 0;
317 do_interrupt(env); 456 do_interrupt(env);
318 env->interrupt_request &= ~CPU_INTERRUPT_HARD; 457 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
  458 +#ifdef __sparc__
  459 + tmp_T0 = 0;
  460 +#else
  461 + T0 = 0;
  462 +#endif
319 } 463 }
320 #elif defined(TARGET_SPARC) 464 #elif defined(TARGET_SPARC)
321 if ((interrupt_request & CPU_INTERRUPT_HARD) && 465 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -329,6 +473,11 @@ int cpu_exec(CPUState *env1) @@ -329,6 +473,11 @@ int cpu_exec(CPUState *env1)
329 env->interrupt_request &= ~CPU_INTERRUPT_HARD; 473 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
330 do_interrupt(env->interrupt_index); 474 do_interrupt(env->interrupt_index);
331 env->interrupt_index = 0; 475 env->interrupt_index = 0;
  476 +#ifdef __sparc__
  477 + tmp_T0 = 0;
  478 +#else
  479 + T0 = 0;
  480 +#endif
332 } 481 }
333 } else if (interrupt_request & CPU_INTERRUPT_TIMER) { 482 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
334 //do_interrupt(0, 0, 0, 0, 0); 483 //do_interrupt(0, 0, 0, 0, 0);
@@ -399,123 +548,7 @@ int cpu_exec(CPUState *env1) @@ -399,123 +548,7 @@ int cpu_exec(CPUState *env1)
399 #endif 548 #endif
400 } 549 }
401 #endif 550 #endif
402 - /* we record a subset of the CPU state. It will  
403 - always be the same before a given translated block  
404 - is executed. */  
405 -#if defined(TARGET_I386)  
406 - flags = env->hflags;  
407 - flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));  
408 - cs_base = env->segs[R_CS].base;  
409 - pc = cs_base + env->eip;  
410 -#elif defined(TARGET_ARM)  
411 - flags = env->thumb | (env->vfp.vec_len << 1)  
412 - | (env->vfp.vec_stride << 4);  
413 - cs_base = 0;  
414 - pc = env->regs[15];  
415 -#elif defined(TARGET_SPARC)  
416 -#ifdef TARGET_SPARC64  
417 - flags = (env->pstate << 2) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);  
418 -#else  
419 - flags = env->psrs | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1);  
420 -#endif  
421 - cs_base = env->npc;  
422 - pc = env->pc;  
423 -#elif defined(TARGET_PPC)  
424 - flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |  
425 - (msr_se << MSR_SE) | (msr_le << MSR_LE);  
426 - cs_base = 0;  
427 - pc = env->nip;  
428 -#elif defined(TARGET_MIPS)  
429 - flags = env->hflags & MIPS_HFLAGS_TMASK;  
430 - cs_base = NULL;  
431 - pc = env->PC;  
432 -#else  
433 -#error unsupported CPU  
434 -#endif  
435 - tb = tb_find(&ptb, pc, cs_base,  
436 - flags);  
437 - if (!tb) {  
438 - TranslationBlock **ptb1;  
439 - unsigned int h;  
440 - target_ulong phys_pc, phys_page1, phys_page2, virt_page2;  
441 -  
442 -  
443 - spin_lock(&tb_lock);  
444 -  
445 - tb_invalidated_flag = 0;  
446 -  
447 - regs_to_env(); /* XXX: do it just before cpu_gen_code() */  
448 -  
449 - /* find translated block using physical mappings */  
450 - phys_pc = get_phys_addr_code(env, pc);  
451 - phys_page1 = phys_pc & TARGET_PAGE_MASK;  
452 - phys_page2 = -1;  
453 - h = tb_phys_hash_func(phys_pc);  
454 - ptb1 = &tb_phys_hash[h];  
455 - for(;;) {  
456 - tb = *ptb1;  
457 - if (!tb)  
458 - goto not_found;  
459 - if (tb->pc == pc &&  
460 - tb->page_addr[0] == phys_page1 &&  
461 - tb->cs_base == cs_base &&  
462 - tb->flags == flags) {  
463 - /* check next page if needed */  
464 - if (tb->page_addr[1] != -1) {  
465 - virt_page2 = (pc & TARGET_PAGE_MASK) +  
466 - TARGET_PAGE_SIZE;  
467 - phys_page2 = get_phys_addr_code(env, virt_page2);  
468 - if (tb->page_addr[1] == phys_page2)  
469 - goto found;  
470 - } else {  
471 - goto found;  
472 - }  
473 - }  
474 - ptb1 = &tb->phys_hash_next;  
475 - }  
476 - not_found:  
477 - /* if no translated code available, then translate it now */  
478 - tb = tb_alloc(pc);  
479 - if (!tb) {  
480 - /* flush must be done */  
481 - tb_flush(env);  
482 - /* cannot fail at this point */  
483 - tb = tb_alloc(pc);  
484 - /* don't forget to invalidate previous TB info */  
485 - ptb = &tb_hash[tb_hash_func(pc)];  
486 - T0 = 0;  
487 - }  
488 - tc_ptr = code_gen_ptr;  
489 - tb->tc_ptr = tc_ptr;  
490 - tb->cs_base = cs_base;  
491 - tb->flags = flags;  
492 - cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);  
493 - code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));  
494 -  
495 - /* check next page if needed */  
496 - virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;  
497 - phys_page2 = -1;  
498 - if ((pc & TARGET_PAGE_MASK) != virt_page2) {  
499 - phys_page2 = get_phys_addr_code(env, virt_page2);  
500 - }  
501 - tb_link_phys(tb, phys_pc, phys_page2);  
502 -  
503 - found:  
504 - if (tb_invalidated_flag) {  
505 - /* as some TB could have been invalidated because  
506 - of memory exceptions while generating the code, we  
507 - must recompute the hash index here */  
508 - ptb = &tb_hash[tb_hash_func(pc)];  
509 - while (*ptb != NULL)  
510 - ptb = &(*ptb)->hash_next;  
511 - T0 = 0;  
512 - }  
513 - /* we add the TB in the virtual pc hash table */  
514 - *ptb = tb;  
515 - tb->hash_next = NULL;  
516 - tb_link(tb);  
517 - spin_unlock(&tb_lock);  
518 - } 551 + tb = tb_find_fast();
519 #ifdef DEBUG_EXEC 552 #ifdef DEBUG_EXEC
520 if ((loglevel & CPU_LOG_EXEC)) { 553 if ((loglevel & CPU_LOG_EXEC)) {
521 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n", 554 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
@@ -526,9 +559,12 @@ int cpu_exec(CPUState *env1) @@ -526,9 +559,12 @@ int cpu_exec(CPUState *env1)
526 #ifdef __sparc__ 559 #ifdef __sparc__
527 T0 = tmp_T0; 560 T0 = tmp_T0;
528 #endif 561 #endif
529 - /* see if we can patch the calling TB. */ 562 + /* see if we can patch the calling TB. When the TB
  563 + spans two pages, we cannot safely do a direct
  564 + jump. */
530 { 565 {
531 - if (T0 != 0 566 + if (T0 != 0 &&
  567 + tb->page_addr[1] == -1
532 #if defined(TARGET_I386) && defined(USE_CODE_COPY) 568 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
533 && (tb->cflags & CF_CODE_COPY) == 569 && (tb->cflags & CF_CODE_COPY) ==
534 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY) 570 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
exec-all.h
@@ -105,9 +105,6 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, @@ -105,9 +105,6 @@ int tlb_set_page(CPUState *env, target_ulong vaddr,
105 #define CODE_GEN_MAX_SIZE 65536 105 #define CODE_GEN_MAX_SIZE 65536
106 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ 106 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
107 107
108 -#define CODE_GEN_HASH_BITS 15  
109 -#define CODE_GEN_HASH_SIZE (1 << CODE_GEN_HASH_BITS)  
110 -  
111 #define CODE_GEN_PHYS_HASH_BITS 15 108 #define CODE_GEN_PHYS_HASH_BITS 15
112 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) 109 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
113 110
@@ -167,7 +164,6 @@ typedef struct TranslationBlock { @@ -167,7 +164,6 @@ typedef struct TranslationBlock {
167 #define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */ 164 #define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */
168 165
169 uint8_t *tc_ptr; /* pointer to the translated code */ 166 uint8_t *tc_ptr; /* pointer to the translated code */
170 - struct TranslationBlock *hash_next; /* next matching tb for virtual address */  
171 /* next matching tb for physical address. */ 167 /* next matching tb for physical address. */
172 struct TranslationBlock *phys_hash_next; 168 struct TranslationBlock *phys_hash_next;
173 /* first and second physical page containing code. The lower bit 169 /* first and second physical page containing code. The lower bit
@@ -191,9 +187,9 @@ typedef struct TranslationBlock { @@ -191,9 +187,9 @@ typedef struct TranslationBlock {
191 struct TranslationBlock *jmp_first; 187 struct TranslationBlock *jmp_first;
192 } TranslationBlock; 188 } TranslationBlock;
193 189
194 -static inline unsigned int tb_hash_func(target_ulong pc) 190 +static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
195 { 191 {
196 - return pc & (CODE_GEN_HASH_SIZE - 1); 192 + return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
197 } 193 }
198 194
199 static inline unsigned int tb_phys_hash_func(unsigned long pc) 195 static inline unsigned int tb_phys_hash_func(unsigned long pc)
@@ -203,41 +199,14 @@ static inline unsigned int tb_phys_hash_func(unsigned long pc) @@ -203,41 +199,14 @@ static inline unsigned int tb_phys_hash_func(unsigned long pc)
203 199
204 TranslationBlock *tb_alloc(target_ulong pc); 200 TranslationBlock *tb_alloc(target_ulong pc);
205 void tb_flush(CPUState *env); 201 void tb_flush(CPUState *env);
206 -void tb_link(TranslationBlock *tb);  
207 void tb_link_phys(TranslationBlock *tb, 202 void tb_link_phys(TranslationBlock *tb,
208 target_ulong phys_pc, target_ulong phys_page2); 203 target_ulong phys_pc, target_ulong phys_page2);
209 204
210 -extern TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];  
211 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; 205 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
212 206
213 extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]; 207 extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
214 extern uint8_t *code_gen_ptr; 208 extern uint8_t *code_gen_ptr;
215 209
216 -/* find a translation block in the translation cache. If not found,  
217 - return NULL and the pointer to the last element of the list in pptb */  
218 -static inline TranslationBlock *tb_find(TranslationBlock ***pptb,  
219 - target_ulong pc,  
220 - target_ulong cs_base,  
221 - unsigned int flags)  
222 -{  
223 - TranslationBlock **ptb, *tb;  
224 - unsigned int h;  
225 -  
226 - h = tb_hash_func(pc);  
227 - ptb = &tb_hash[h];  
228 - for(;;) {  
229 - tb = *ptb;  
230 - if (!tb)  
231 - break;  
232 - if (tb->pc == pc && tb->cs_base == cs_base && tb->flags == flags)  
233 - return tb;  
234 - ptb = &tb->hash_next;  
235 - }  
236 - *pptb = ptb;  
237 - return NULL;  
238 -}  
239 -  
240 -  
241 #if defined(USE_DIRECT_JUMP) 210 #if defined(USE_DIRECT_JUMP)
242 211
243 #if defined(__powerpc__) 212 #if defined(__powerpc__)
@@ -61,7 +61,6 @@ @@ -61,7 +61,6 @@
61 #endif 61 #endif
62 62
63 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; 63 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
64 -TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];  
65 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; 64 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
66 int nb_tbs; 65 int nb_tbs;
67 /* any access to the tbs or the page table must use this lock */ 66 /* any access to the tbs or the page table must use this lock */
@@ -92,20 +91,6 @@ typedef struct PhysPageDesc { @@ -92,20 +91,6 @@ typedef struct PhysPageDesc {
92 uint32_t phys_offset; 91 uint32_t phys_offset;
93 } PhysPageDesc; 92 } PhysPageDesc;
94 93
95 -/* Note: the VirtPage handling is absolete and will be suppressed  
96 - ASAP */  
97 -typedef struct VirtPageDesc {  
98 - /* physical address of code page. It is valid only if 'valid_tag'  
99 - matches 'virt_valid_tag' */  
100 - target_ulong phys_addr;  
101 - unsigned int valid_tag;  
102 -#if !defined(CONFIG_SOFTMMU)  
103 - /* original page access rights. It is valid only if 'valid_tag'  
104 - matches 'virt_valid_tag' */  
105 - unsigned int prot;  
106 -#endif  
107 -} VirtPageDesc;  
108 -  
109 #define L2_BITS 10 94 #define L2_BITS 10
110 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) 95 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
111 96
@@ -123,17 +108,6 @@ unsigned long qemu_host_page_mask; @@ -123,17 +108,6 @@ unsigned long qemu_host_page_mask;
123 static PageDesc *l1_map[L1_SIZE]; 108 static PageDesc *l1_map[L1_SIZE];
124 PhysPageDesc **l1_phys_map; 109 PhysPageDesc **l1_phys_map;
125 110
126 -#if !defined(CONFIG_USER_ONLY)  
127 -#if TARGET_LONG_BITS > 32  
128 -#define VIRT_L_BITS 9  
129 -#define VIRT_L_SIZE (1 << VIRT_L_BITS)  
130 -static void *l1_virt_map[VIRT_L_SIZE];  
131 -#else  
132 -static VirtPageDesc *l1_virt_map[L1_SIZE];  
133 -#endif  
134 -static unsigned int virt_valid_tag;  
135 -#endif  
136 -  
137 /* io memory support */ 111 /* io memory support */
138 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; 112 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
139 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; 113 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
@@ -190,9 +164,6 @@ static void page_init(void) @@ -190,9 +164,6 @@ static void page_init(void)
190 while ((1 << qemu_host_page_bits) < qemu_host_page_size) 164 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
191 qemu_host_page_bits++; 165 qemu_host_page_bits++;
192 qemu_host_page_mask = ~(qemu_host_page_size - 1); 166 qemu_host_page_mask = ~(qemu_host_page_size - 1);
193 -#if !defined(CONFIG_USER_ONLY)  
194 - virt_valid_tag = 1;  
195 -#endif  
196 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); 167 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
197 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); 168 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
198 } 169 }
@@ -266,120 +237,6 @@ static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr, @@ -266,120 +237,6 @@ static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
266 target_ulong vaddr); 237 target_ulong vaddr);
267 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 238 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
268 target_ulong vaddr); 239 target_ulong vaddr);
269 -  
270 -static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc)  
271 -{  
272 -#if TARGET_LONG_BITS > 32  
273 - void **p, **lp;  
274 -  
275 - p = l1_virt_map;  
276 - lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));  
277 - p = *lp;  
278 - if (!p) {  
279 - if (!alloc)  
280 - return NULL;  
281 - p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);  
282 - *lp = p;  
283 - }  
284 - lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));  
285 - p = *lp;  
286 - if (!p) {  
287 - if (!alloc)  
288 - return NULL;  
289 - p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);  
290 - *lp = p;  
291 - }  
292 - lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));  
293 - p = *lp;  
294 - if (!p) {  
295 - if (!alloc)  
296 - return NULL;  
297 - p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);  
298 - *lp = p;  
299 - }  
300 - lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));  
301 - p = *lp;  
302 - if (!p) {  
303 - if (!alloc)  
304 - return NULL;  
305 - p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);  
306 - *lp = p;  
307 - }  
308 - lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));  
309 - p = *lp;  
310 - if (!p) {  
311 - if (!alloc)  
312 - return NULL;  
313 - p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE);  
314 - *lp = p;  
315 - }  
316 - return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1));  
317 -#else  
318 - VirtPageDesc *p, **lp;  
319 -  
320 - lp = &l1_virt_map[index >> L2_BITS];  
321 - p = *lp;  
322 - if (!p) {  
323 - /* allocate if not found */  
324 - if (!alloc)  
325 - return NULL;  
326 - p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE);  
327 - *lp = p;  
328 - }  
329 - return p + (index & (L2_SIZE - 1));  
330 -#endif  
331 -}  
332 -  
333 -static inline VirtPageDesc *virt_page_find(target_ulong index)  
334 -{  
335 - return virt_page_find_alloc(index, 0);  
336 -}  
337 -  
338 -#if TARGET_LONG_BITS > 32  
339 -static void virt_page_flush_internal(void **p, int level)  
340 -{  
341 - int i;  
342 - if (level == 0) {  
343 - VirtPageDesc *q = (VirtPageDesc *)p;  
344 - for(i = 0; i < VIRT_L_SIZE; i++)  
345 - q[i].valid_tag = 0;  
346 - } else {  
347 - level--;  
348 - for(i = 0; i < VIRT_L_SIZE; i++) {  
349 - if (p[i])  
350 - virt_page_flush_internal(p[i], level);  
351 - }  
352 - }  
353 -}  
354 -#endif  
355 -  
356 -static void virt_page_flush(void)  
357 -{  
358 - virt_valid_tag++;  
359 -  
360 - if (virt_valid_tag == 0) {  
361 - virt_valid_tag = 1;  
362 -#if TARGET_LONG_BITS > 32  
363 - virt_page_flush_internal(l1_virt_map, 5);  
364 -#else  
365 - {  
366 - int i, j;  
367 - VirtPageDesc *p;  
368 - for(i = 0; i < L1_SIZE; i++) {  
369 - p = l1_virt_map[i];  
370 - if (p) {  
371 - for(j = 0; j < L2_SIZE; j++)  
372 - p[j].valid_tag = 0;  
373 - }  
374 - }  
375 - }  
376 -#endif  
377 - }  
378 -}  
379 -#else  
380 -static void virt_page_flush(void)  
381 -{  
382 -}  
383 #endif 240 #endif
384 241
385 void cpu_exec_init(void) 242 void cpu_exec_init(void)
@@ -429,8 +286,7 @@ void tb_flush(CPUState *env) @@ -429,8 +286,7 @@ void tb_flush(CPUState *env)
429 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); 286 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
430 #endif 287 #endif
431 nb_tbs = 0; 288 nb_tbs = 0;
432 - memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));  
433 - virt_page_flush(); 289 + memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
434 290
435 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); 291 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
436 page_flush_tb(); 292 page_flush_tb();
@@ -566,28 +422,36 @@ static inline void tb_reset_jump(TranslationBlock *tb, int n) @@ -566,28 +422,36 @@ static inline void tb_reset_jump(TranslationBlock *tb, int n)
566 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); 422 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
567 } 423 }
568 424
569 -static inline void tb_invalidate(TranslationBlock *tb) 425 +static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
570 { 426 {
  427 + PageDesc *p;
571 unsigned int h, n1; 428 unsigned int h, n1;
572 - TranslationBlock *tb1, *tb2, **ptb; 429 + target_ulong phys_pc;
  430 + TranslationBlock *tb1, *tb2;
573 431
  432 + /* remove the TB from the hash list */
  433 + phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
  434 + h = tb_phys_hash_func(phys_pc);
  435 + tb_remove(&tb_phys_hash[h], tb,
  436 + offsetof(TranslationBlock, phys_hash_next));
  437 +
  438 + /* remove the TB from the page list */
  439 + if (tb->page_addr[0] != page_addr) {
  440 + p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
  441 + tb_page_remove(&p->first_tb, tb);
  442 + invalidate_page_bitmap(p);
  443 + }
  444 + if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
  445 + p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
  446 + tb_page_remove(&p->first_tb, tb);
  447 + invalidate_page_bitmap(p);
  448 + }
  449 +
574 tb_invalidated_flag = 1; 450 tb_invalidated_flag = 1;
575 451
576 /* remove the TB from the hash list */ 452 /* remove the TB from the hash list */
577 - h = tb_hash_func(tb->pc);  
578 - ptb = &tb_hash[h];  
579 - for(;;) {  
580 - tb1 = *ptb;  
581 - /* NOTE: the TB is not necessarily linked in the hash. It  
582 - indicates that it is not currently used */  
583 - if (tb1 == NULL)  
584 - return;  
585 - if (tb1 == tb) {  
586 - *ptb = tb1->hash_next;  
587 - break;  
588 - }  
589 - ptb = &tb1->hash_next;  
590 - } 453 + h = tb_jmp_cache_hash_func(tb->pc);
  454 + cpu_single_env->tb_jmp_cache[h] = NULL;
591 455
592 /* suppress this TB from the two jump lists */ 456 /* suppress this TB from the two jump lists */
593 tb_jmp_remove(tb, 0); 457 tb_jmp_remove(tb, 0);
@@ -606,33 +470,7 @@ static inline void tb_invalidate(TranslationBlock *tb) @@ -606,33 +470,7 @@ static inline void tb_invalidate(TranslationBlock *tb)
606 tb1 = tb2; 470 tb1 = tb2;
607 } 471 }
608 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ 472 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
609 -}  
610 -  
611 -static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)  
612 -{  
613 - PageDesc *p;  
614 - unsigned int h;  
615 - target_ulong phys_pc;  
616 -  
617 - /* remove the TB from the hash list */  
618 - phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);  
619 - h = tb_phys_hash_func(phys_pc);  
620 - tb_remove(&tb_phys_hash[h], tb,  
621 - offsetof(TranslationBlock, phys_hash_next));  
622 -  
623 - /* remove the TB from the page list */  
624 - if (tb->page_addr[0] != page_addr) {  
625 - p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);  
626 - tb_page_remove(&p->first_tb, tb);  
627 - invalidate_page_bitmap(p);  
628 - }  
629 - if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {  
630 - p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);  
631 - tb_page_remove(&p->first_tb, tb);  
632 - invalidate_page_bitmap(p);  
633 - }  
634 473
635 - tb_invalidate(tb);  
636 tb_phys_invalidate_count++; 474 tb_phys_invalidate_count++;
637 } 475 }
638 476
@@ -1025,57 +863,6 @@ void tb_link_phys(TranslationBlock *tb, @@ -1025,57 +863,6 @@ void tb_link_phys(TranslationBlock *tb,
1025 tb_alloc_page(tb, 1, phys_page2); 863 tb_alloc_page(tb, 1, phys_page2);
1026 else 864 else
1027 tb->page_addr[1] = -1; 865 tb->page_addr[1] = -1;
1028 -#ifdef DEBUG_TB_CHECK  
1029 - tb_page_check();  
1030 -#endif  
1031 -}  
1032 -  
1033 -/* link the tb with the other TBs */  
1034 -void tb_link(TranslationBlock *tb)  
1035 -{  
1036 -#if !defined(CONFIG_USER_ONLY)  
1037 - {  
1038 - VirtPageDesc *vp;  
1039 - target_ulong addr;  
1040 -  
1041 - /* save the code memory mappings (needed to invalidate the code) */  
1042 - addr = tb->pc & TARGET_PAGE_MASK;  
1043 - vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);  
1044 -#ifdef DEBUG_TLB_CHECK  
1045 - if (vp->valid_tag == virt_valid_tag &&  
1046 - vp->phys_addr != tb->page_addr[0]) {  
1047 - printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",  
1048 - addr, tb->page_addr[0], vp->phys_addr);  
1049 - }  
1050 -#endif  
1051 - vp->phys_addr = tb->page_addr[0];  
1052 - if (vp->valid_tag != virt_valid_tag) {  
1053 - vp->valid_tag = virt_valid_tag;  
1054 -#if !defined(CONFIG_SOFTMMU)  
1055 - vp->prot = 0;  
1056 -#endif  
1057 - }  
1058 -  
1059 - if (tb->page_addr[1] != -1) {  
1060 - addr += TARGET_PAGE_SIZE;  
1061 - vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);  
1062 -#ifdef DEBUG_TLB_CHECK  
1063 - if (vp->valid_tag == virt_valid_tag &&  
1064 - vp->phys_addr != tb->page_addr[1]) {  
1065 - printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",  
1066 - addr, tb->page_addr[1], vp->phys_addr);  
1067 - }  
1068 -#endif  
1069 - vp->phys_addr = tb->page_addr[1];  
1070 - if (vp->valid_tag != virt_valid_tag) {  
1071 - vp->valid_tag = virt_valid_tag;  
1072 -#if !defined(CONFIG_SOFTMMU)  
1073 - vp->prot = 0;  
1074 -#endif  
1075 - }  
1076 - }  
1077 - }  
1078 -#endif  
1079 866
1080 tb->jmp_first = (TranslationBlock *)((long)tb | 2); 867 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1081 tb->jmp_next[0] = NULL; 868 tb->jmp_next[0] = NULL;
@@ -1091,6 +878,10 @@ void tb_link(TranslationBlock *tb) @@ -1091,6 +878,10 @@ void tb_link(TranslationBlock *tb)
1091 tb_reset_jump(tb, 0); 878 tb_reset_jump(tb, 0);
1092 if (tb->tb_next_offset[1] != 0xffff) 879 if (tb->tb_next_offset[1] != 0xffff)
1093 tb_reset_jump(tb, 1); 880 tb_reset_jump(tb, 1);
  881 +
  882 +#ifdef DEBUG_TB_CHECK
  883 + tb_page_check();
  884 +#endif
1094 } 885 }
1095 886
1096 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < 887 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
@@ -1396,8 +1187,7 @@ void tlb_flush(CPUState *env, int flush_global) @@ -1396,8 +1187,7 @@ void tlb_flush(CPUState *env, int flush_global)
1396 env->tlb_write[1][i].address = -1; 1187 env->tlb_write[1][i].address = -1;
1397 } 1188 }
1398 1189
1399 - virt_page_flush();  
1400 - memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *)); 1190 + memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1401 1191
1402 #if !defined(CONFIG_SOFTMMU) 1192 #if !defined(CONFIG_SOFTMMU)
1403 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START); 1193 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
@@ -1419,9 +1209,7 @@ static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) @@ -1419,9 +1209,7 @@ static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1419 1209
1420 void tlb_flush_page(CPUState *env, target_ulong addr) 1210 void tlb_flush_page(CPUState *env, target_ulong addr)
1421 { 1211 {
1422 - int i, n;  
1423 - VirtPageDesc *vp;  
1424 - PageDesc *p; 1212 + int i;
1425 TranslationBlock *tb; 1213 TranslationBlock *tb;
1426 1214
1427 #if defined(DEBUG_TLB) 1215 #if defined(DEBUG_TLB)
@@ -1438,26 +1226,13 @@ void tlb_flush_page(CPUState *env, target_ulong addr) @@ -1438,26 +1226,13 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
1438 tlb_flush_entry(&env->tlb_read[1][i], addr); 1226 tlb_flush_entry(&env->tlb_read[1][i], addr);
1439 tlb_flush_entry(&env->tlb_write[1][i], addr); 1227 tlb_flush_entry(&env->tlb_write[1][i], addr);
1440 1228
1441 - /* remove from the virtual pc hash table all the TB at this  
1442 - virtual address */  
1443 -  
1444 - vp = virt_page_find(addr >> TARGET_PAGE_BITS);  
1445 - if (vp && vp->valid_tag == virt_valid_tag) {  
1446 - p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);  
1447 - if (p) {  
1448 - /* we remove all the links to the TBs in this virtual page */  
1449 - tb = p->first_tb;  
1450 - while (tb != NULL) {  
1451 - n = (long)tb & 3;  
1452 - tb = (TranslationBlock *)((long)tb & ~3);  
1453 - if ((tb->pc & TARGET_PAGE_MASK) == addr ||  
1454 - ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {  
1455 - tb_invalidate(tb);  
1456 - }  
1457 - tb = tb->page_next[n];  
1458 - } 1229 + for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
  1230 + tb = env->tb_jmp_cache[i];
  1231 + if (tb &&
  1232 + ((tb->pc & TARGET_PAGE_MASK) == addr ||
  1233 + ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
  1234 + env->tb_jmp_cache[i] = NULL;
1459 } 1235 }
1460 - vp->valid_tag = 0;  
1461 } 1236 }
1462 1237
1463 #if !defined(CONFIG_SOFTMMU) 1238 #if !defined(CONFIG_SOFTMMU)