Commit 9fa3e8535317f24a921338a32f1eb18cd46fa22d

Authored by bellard
1 parent 4390df51

new generic TLB support - faster self modifying code support - added ROM memory support


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@518 c046a42c-6fe2-441c-8c8c-71466251a162
Showing 1 changed file with 849 additions and 277 deletions
... ... @@ -32,6 +32,7 @@
32 32  
33 33 //#define DEBUG_TB_INVALIDATE
34 34 //#define DEBUG_FLUSH
  35 +//#define DEBUG_TLB
35 36  
36 37 /* make various TB consistency checks */
37 38 //#define DEBUG_TB_CHECK
... ... @@ -39,10 +40,14 @@
39 40 /* threshold to flush the translated code buffer */
40 41 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
41 42  
42   -#define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / 64)
  43 +#define SMC_BITMAP_USE_THRESHOLD 10
  44 +
  45 +#define MMAP_AREA_START 0x00000000
  46 +#define MMAP_AREA_END 0xa8000000
43 47  
44 48 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
45 49 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
  50 +TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
46 51 int nb_tbs;
47 52 /* any access to the tbs or the page table must use this lock */
48 53 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
... ... @@ -50,12 +55,36 @@ spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
50 55 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
51 56 uint8_t *code_gen_ptr;
52 57  
53   -/* XXX: pack the flags in the low bits of the pointer ? */
  58 +int phys_ram_size;
  59 +int phys_ram_fd;
  60 +uint8_t *phys_ram_base;
  61 +
54 62 typedef struct PageDesc {
55   - unsigned long flags;
  63 + /* offset in memory of the page + io_index in the low 12 bits */
  64 + unsigned long phys_offset;
  65 + /* list of TBs intersecting this physical page */
56 66 TranslationBlock *first_tb;
  67 + /* in order to optimize self modifying code, we count the number
  68 + of lookups we do to a given page to use a bitmap */
  69 + unsigned int code_write_count;
  70 + uint8_t *code_bitmap;
  71 +#if defined(CONFIG_USER_ONLY)
  72 + unsigned long flags;
  73 +#endif
57 74 } PageDesc;
58 75  
  76 +typedef struct VirtPageDesc {
  77 + /* physical address of code page. It is valid only if 'valid_tag'
  78 + matches 'virt_valid_tag' */
  79 + target_ulong phys_addr;
  80 + unsigned int valid_tag;
  81 +#if !defined(CONFIG_SOFTMMU)
  82 + /* original page access rights. It is valid only if 'valid_tag'
  83 + matches 'virt_valid_tag' */
  84 + unsigned int prot;
  85 +#endif
  86 +} VirtPageDesc;
  87 +
59 88 #define L2_BITS 10
60 89 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
61 90  
... ... @@ -71,8 +100,12 @@ unsigned long host_page_mask;
71 100  
72 101 static PageDesc *l1_map[L1_SIZE];
73 102  
  103 +#if !defined(CONFIG_USER_ONLY)
  104 +static VirtPageDesc *l1_virt_map[L1_SIZE];
  105 +static unsigned int virt_valid_tag;
  106 +#endif
  107 +
74 108 /* io memory support */
75   -static unsigned long *l1_physmap[L1_SIZE];
76 109 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
77 110 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
78 111 static int io_mem_nb;
... ... @@ -95,49 +128,9 @@ static void page_init(void)
95 128 while ((1 << host_page_bits) < host_page_size)
96 129 host_page_bits++;
97 130 host_page_mask = ~(host_page_size - 1);
98   -}
99   -
100   -/* dump memory mappings */
101   -void page_dump(FILE *f)
102   -{
103   - unsigned long start, end;
104   - int i, j, prot, prot1;
105   - PageDesc *p;
106   -
107   - fprintf(f, "%-8s %-8s %-8s %s\n",
108   - "start", "end", "size", "prot");
109   - start = -1;
110   - end = -1;
111   - prot = 0;
112   - for(i = 0; i <= L1_SIZE; i++) {
113   - if (i < L1_SIZE)
114   - p = l1_map[i];
115   - else
116   - p = NULL;
117   - for(j = 0;j < L2_SIZE; j++) {
118   - if (!p)
119   - prot1 = 0;
120   - else
121   - prot1 = p[j].flags;
122   - if (prot1 != prot) {
123   - end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
124   - if (start != -1) {
125   - fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
126   - start, end, end - start,
127   - prot & PAGE_READ ? 'r' : '-',
128   - prot & PAGE_WRITE ? 'w' : '-',
129   - prot & PAGE_EXEC ? 'x' : '-');
130   - }
131   - if (prot1 != 0)
132   - start = end;
133   - else
134   - start = -1;
135   - prot = prot1;
136   - }
137   - if (!p)
138   - break;
139   - }
140   - }
  131 +#if !defined(CONFIG_USER_ONLY)
  132 + virt_valid_tag = 1;
  133 +#endif
141 134 }
142 135  
143 136 static inline PageDesc *page_find_alloc(unsigned int index)
... ... @@ -165,42 +158,59 @@ static inline PageDesc *page_find(unsigned int index)
165 158 return p + (index & (L2_SIZE - 1));
166 159 }
167 160  
168   -int page_get_flags(unsigned long address)
  161 +#if !defined(CONFIG_USER_ONLY)
  162 +static void tlb_protect_code(CPUState *env, uint32_t addr);
  163 +static void tlb_unprotect_code(CPUState *env, uint32_t addr);
  164 +static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr);
  165 +
  166 +static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
169 167 {
170   - PageDesc *p;
  168 + VirtPageDesc **lp, *p;
171 169  
172   - p = page_find(address >> TARGET_PAGE_BITS);
  170 + lp = &l1_virt_map[index >> L2_BITS];
  171 + p = *lp;
  172 + if (!p) {
  173 + /* allocate if not found */
  174 + p = malloc(sizeof(VirtPageDesc) * L2_SIZE);
  175 + memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
  176 + *lp = p;
  177 + }
  178 + return p + (index & (L2_SIZE - 1));
  179 +}
  180 +
  181 +static inline VirtPageDesc *virt_page_find(unsigned int index)
  182 +{
  183 + VirtPageDesc *p;
  184 +
  185 + p = l1_virt_map[index >> L2_BITS];
173 186 if (!p)
174 187 return 0;
175   - return p->flags;
  188 + return p + (index & (L2_SIZE - 1));
176 189 }
177 190  
178   -/* modify the flags of a page and invalidate the code if
179   - necessary. The flag PAGE_WRITE_ORG is positionned automatically
180   - depending on PAGE_WRITE */
181   -void page_set_flags(unsigned long start, unsigned long end, int flags)
  191 +static void virt_page_flush(void)
182 192 {
183   - PageDesc *p;
184   - unsigned long addr;
185   -
186   - start = start & TARGET_PAGE_MASK;
187   - end = TARGET_PAGE_ALIGN(end);
188   - if (flags & PAGE_WRITE)
189   - flags |= PAGE_WRITE_ORG;
190   - spin_lock(&tb_lock);
191   - for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
192   - p = page_find_alloc(addr >> TARGET_PAGE_BITS);
193   - /* if the write protection is set, then we invalidate the code
194   - inside */
195   - if (!(p->flags & PAGE_WRITE) &&
196   - (flags & PAGE_WRITE) &&
197   - p->first_tb) {
198   - tb_invalidate_page(addr);
  193 + int i, j;
  194 + VirtPageDesc *p;
  195 +
  196 + virt_valid_tag++;
  197 +
  198 + if (virt_valid_tag == 0) {
  199 + virt_valid_tag = 1;
  200 + for(i = 0; i < L1_SIZE; i++) {
  201 + p = l1_virt_map[i];
  202 + if (p) {
  203 + for(j = 0; j < L2_SIZE; j++)
  204 + p[j].valid_tag = 0;
  205 + }
199 206 }
200   - p->flags = flags;
201 207 }
202   - spin_unlock(&tb_lock);
203 208 }
  209 +#else
  210 +static void virt_page_flush(void)
  211 +{
  212 +}
  213 +#endif
204 214  
205 215 void cpu_exec_init(void)
206 216 {
... ... @@ -211,6 +221,15 @@ void cpu_exec_init(void)
211 221 }
212 222 }
213 223  
  224 +static inline void invalidate_page_bitmap(PageDesc *p)
  225 +{
  226 + if (p->code_bitmap) {
  227 + free(p->code_bitmap);
  228 + p->code_bitmap = NULL;
  229 + }
  230 + p->code_write_count = 0;
  231 +}
  232 +
214 233 /* set to NULL all the 'first_tb' fields in all PageDescs */
215 234 static void page_flush_tb(void)
216 235 {
... ... @@ -220,8 +239,11 @@ static void page_flush_tb(void)
220 239 for(i = 0; i < L1_SIZE; i++) {
221 240 p = l1_map[i];
222 241 if (p) {
223   - for(j = 0; j < L2_SIZE; j++)
224   - p[j].first_tb = NULL;
  242 + for(j = 0; j < L2_SIZE; j++) {
  243 + p->first_tb = NULL;
  244 + invalidate_page_bitmap(p);
  245 + p++;
  246 + }
225 247 }
226 248 }
227 249 }
... ... @@ -244,7 +266,12 @@ void tb_flush(CPUState *env)
244 266 nb_tbs = 0;
245 267 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
246 268 tb_hash[i] = NULL;
  269 + virt_page_flush();
  270 +
  271 + for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
  272 + tb_phys_hash[i] = NULL;
247 273 page_flush_tb();
  274 +
248 275 code_gen_ptr = code_gen_buffer;
249 276 /* XXX: flush processor icache at this point if cache flush is
250 277 expensive */
... ... @@ -323,6 +350,23 @@ static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
323 350 }
324 351 }
325 352  
  353 +static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
  354 +{
  355 + TranslationBlock *tb1;
  356 + unsigned int n1;
  357 +
  358 + for(;;) {
  359 + tb1 = *ptb;
  360 + n1 = (long)tb1 & 3;
  361 + tb1 = (TranslationBlock *)((long)tb1 & ~3);
  362 + if (tb1 == tb) {
  363 + *ptb = tb1->page_next[n1];
  364 + break;
  365 + }
  366 + ptb = &tb1->page_next[n1];
  367 + }
  368 +}
  369 +
326 370 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
327 371 {
328 372 TranslationBlock *tb1, **ptb;
... ... @@ -358,31 +402,27 @@ static inline void tb_reset_jump(TranslationBlock *tb, int n)
358 402 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
359 403 }
360 404  
361   -static inline void tb_invalidate(TranslationBlock *tb, int parity)
  405 +static inline void tb_invalidate(TranslationBlock *tb)
362 406 {
363   - PageDesc *p;
364   - unsigned int page_index1, page_index2;
365 407 unsigned int h, n1;
366   - TranslationBlock *tb1, *tb2;
  408 + TranslationBlock *tb1, *tb2, **ptb;
367 409  
368 410 tb_invalidated_flag = 1;
369 411  
370 412 /* remove the TB from the hash list */
371 413 h = tb_hash_func(tb->pc);
372   - tb_remove(&tb_hash[h], tb,
373   - offsetof(TranslationBlock, hash_next));
374   - /* remove the TB from the page list */
375   - page_index1 = tb->pc >> TARGET_PAGE_BITS;
376   - if ((page_index1 & 1) == parity) {
377   - p = page_find(page_index1);
378   - tb_remove(&p->first_tb, tb,
379   - offsetof(TranslationBlock, page_next[page_index1 & 1]));
380   - }
381   - page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
382   - if ((page_index2 & 1) == parity) {
383   - p = page_find(page_index2);
384   - tb_remove(&p->first_tb, tb,
385   - offsetof(TranslationBlock, page_next[page_index2 & 1]));
  414 + ptb = &tb_hash[h];
  415 + for(;;) {
  416 + tb1 = *ptb;
  417 + /* NOTE: the TB is not necessarily linked in the hash. It
  418 + indicates that it is not currently used */
  419 + if (tb1 == NULL)
  420 + return;
  421 + if (tb1 == tb) {
  422 + *ptb = tb1->hash_next;
  423 + break;
  424 + }
  425 + ptb = &tb1->hash_next;
386 426 }
387 427  
388 428 /* suppress this TB from the two jump lists */
... ... @@ -404,66 +444,276 @@ static inline void tb_invalidate(TranslationBlock *tb, int parity)
404 444 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
405 445 }
406 446  
407   -/* invalidate all TBs which intersect with the target page starting at addr */
408   -void tb_invalidate_page(unsigned long address)
  447 +static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
409 448 {
410   - TranslationBlock *tb_next, *tb;
411   - unsigned int page_index;
412   - int parity1, parity2;
413 449 PageDesc *p;
414   -#ifdef DEBUG_TB_INVALIDATE
415   - printf("tb_invalidate_page: %lx\n", address);
  450 + unsigned int h;
  451 + target_ulong phys_pc;
  452 +
  453 + /* remove the TB from the hash list */
  454 + phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
  455 + h = tb_phys_hash_func(phys_pc);
  456 + tb_remove(&tb_phys_hash[h], tb,
  457 + offsetof(TranslationBlock, phys_hash_next));
  458 +
  459 + /* remove the TB from the page list */
  460 + if (tb->page_addr[0] != page_addr) {
  461 + p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
  462 + tb_page_remove(&p->first_tb, tb);
  463 + invalidate_page_bitmap(p);
  464 + }
  465 + if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
  466 + p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
  467 + tb_page_remove(&p->first_tb, tb);
  468 + invalidate_page_bitmap(p);
  469 + }
  470 +
  471 + tb_invalidate(tb);
  472 +}
  473 +
  474 +static inline void set_bits(uint8_t *tab, int start, int len)
  475 +{
  476 + int end, mask, end1;
  477 +
  478 + end = start + len;
  479 + tab += start >> 3;
  480 + mask = 0xff << (start & 7);
  481 + if ((start & ~7) == (end & ~7)) {
  482 + if (start < end) {
  483 + mask &= ~(0xff << (end & 7));
  484 + *tab |= mask;
  485 + }
  486 + } else {
  487 + *tab++ |= mask;
  488 + start = (start + 8) & ~7;
  489 + end1 = end & ~7;
  490 + while (start < end1) {
  491 + *tab++ = 0xff;
  492 + start += 8;
  493 + }
  494 + if (start < end) {
  495 + mask = ~(0xff << (end & 7));
  496 + *tab |= mask;
  497 + }
  498 + }
  499 +}
  500 +
  501 +static void build_page_bitmap(PageDesc *p)
  502 +{
  503 + int n, tb_start, tb_end;
  504 + TranslationBlock *tb;
  505 +
  506 + p->code_bitmap = malloc(TARGET_PAGE_SIZE / 8);
  507 + if (!p->code_bitmap)
  508 + return;
  509 + memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
  510 +
  511 + tb = p->first_tb;
  512 + while (tb != NULL) {
  513 + n = (long)tb & 3;
  514 + tb = (TranslationBlock *)((long)tb & ~3);
  515 + /* NOTE: this is subtle as a TB may span two physical pages */
  516 + if (n == 0) {
  517 + /* NOTE: tb_end may be after the end of the page, but
  518 + it is not a problem */
  519 + tb_start = tb->pc & ~TARGET_PAGE_MASK;
  520 + tb_end = tb_start + tb->size;
  521 + if (tb_end > TARGET_PAGE_SIZE)
  522 + tb_end = TARGET_PAGE_SIZE;
  523 + } else {
  524 + tb_start = 0;
  525 + tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
  526 + }
  527 + set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
  528 + tb = tb->page_next[n];
  529 + }
  530 +}
  531 +
  532 +/* invalidate all TBs which intersect with the target physical page
  533 + starting in range [start;end[. NOTE: start and end must refer to
  534 + the same physical page */
  535 +static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end)
  536 +{
  537 + int n;
  538 + PageDesc *p;
  539 + TranslationBlock *tb, *tb_next;
  540 + target_ulong tb_start, tb_end;
  541 +
  542 + p = page_find(start >> TARGET_PAGE_BITS);
  543 + if (!p)
  544 + return;
  545 + if (!p->code_bitmap &&
  546 + ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
  547 + /* build code bitmap */
  548 + build_page_bitmap(p);
  549 + }
  550 +
  551 + /* we remove all the TBs in the range [start, end[ */
  552 + /* XXX: see if in some cases it could be faster to invalidate all the code */
  553 + tb = p->first_tb;
  554 + while (tb != NULL) {
  555 + n = (long)tb & 3;
  556 + tb = (TranslationBlock *)((long)tb & ~3);
  557 + tb_next = tb->page_next[n];
  558 + /* NOTE: this is subtle as a TB may span two physical pages */
  559 + if (n == 0) {
  560 + /* NOTE: tb_end may be after the end of the page, but
  561 + it is not a problem */
  562 + tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
  563 + tb_end = tb_start + tb->size;
  564 + } else {
  565 + tb_start = tb->page_addr[1];
  566 + tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
  567 + }
  568 + if (!(tb_end <= start || tb_start >= end)) {
  569 + tb_phys_invalidate(tb, -1);
  570 + }
  571 + tb = tb_next;
  572 + }
  573 +#if !defined(CONFIG_USER_ONLY)
  574 + /* if no code remaining, no need to continue to use slow writes */
  575 + if (!p->first_tb) {
  576 + invalidate_page_bitmap(p);
  577 + tlb_unprotect_code_phys(cpu_single_env, start);
  578 + }
416 579 #endif
  580 +}
417 581  
418   - page_index = address >> TARGET_PAGE_BITS;
419   - p = page_find(page_index);
420   - if (!p)
  582 +/* len must be <= 8 and start must be a multiple of len */
  583 +static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
  584 +{
  585 + PageDesc *p;
  586 + int offset, b;
  587 +
  588 + p = page_find(start >> TARGET_PAGE_BITS);
  589 + if (!p)
  590 + return;
  591 + if (p->code_bitmap) {
  592 + offset = start & ~TARGET_PAGE_MASK;
  593 + b = p->code_bitmap[offset >> 3] >> (offset & 7);
  594 + if (b & ((1 << len) - 1))
  595 + goto do_invalidate;
  596 + } else {
  597 + do_invalidate:
  598 + tb_invalidate_phys_page_range(start, start + len);
  599 + }
  600 +}
  601 +
  602 +/* invalidate all TBs which intersect with the target virtual page
  603 + starting in range [start;end[. This function is usually used when
  604 + the target processor flushes its I-cache. NOTE: start and end must
  605 + refer to the same physical page */
  606 +void tb_invalidate_page_range(target_ulong start, target_ulong end)
  607 +{
  608 + int n;
  609 + PageDesc *p;
  610 + TranslationBlock *tb, *tb_next;
  611 + target_ulong pc;
  612 + target_ulong phys_start;
  613 +
  614 +#if !defined(CONFIG_USER_ONLY)
  615 + {
  616 + VirtPageDesc *vp;
  617 + vp = virt_page_find(start >> TARGET_PAGE_BITS);
  618 + if (!vp)
  619 + return;
  620 + if (vp->valid_tag != virt_valid_tag)
  621 + return;
  622 + phys_start = vp->phys_addr + (start & ~TARGET_PAGE_MASK);
  623 + }
  624 +#else
  625 + phys_start = start;
  626 +#endif
  627 + p = page_find(phys_start >> TARGET_PAGE_BITS);
  628 + if (!p)
421 629 return;
  630 + /* we remove all the TBs in the range [start, end[ */
  631 + /* XXX: see if in some cases it could be faster to invalidate all the code */
422 632 tb = p->first_tb;
423   - parity1 = page_index & 1;
424   - parity2 = parity1 ^ 1;
425 633 while (tb != NULL) {
426   - tb_next = tb->page_next[parity1];
427   - tb_invalidate(tb, parity2);
  634 + n = (long)tb & 3;
  635 + tb = (TranslationBlock *)((long)tb & ~3);
  636 + tb_next = tb->page_next[n];
  637 + pc = tb->pc;
  638 + if (!((pc + tb->size) <= start || pc >= end)) {
  639 + tb_phys_invalidate(tb, -1);
  640 + }
428 641 tb = tb_next;
429 642 }
  643 +#if !defined(CONFIG_USER_ONLY)
  644 + /* if no code remaining, no need to continue to use slow writes */
  645 + if (!p->first_tb)
  646 + tlb_unprotect_code(cpu_single_env, start);
  647 +#endif
  648 +}
  649 +
  650 +#if !defined(CONFIG_SOFTMMU)
  651 +static void tb_invalidate_phys_page(target_ulong addr)
  652 +{
  653 + int n;
  654 + PageDesc *p;
  655 + TranslationBlock *tb;
  656 +
  657 + addr &= TARGET_PAGE_MASK;
  658 + p = page_find(addr >> TARGET_PAGE_BITS);
  659 + if (!p)
  660 + return;
  661 + tb = p->first_tb;
  662 + while (tb != NULL) {
  663 + n = (long)tb & 3;
  664 + tb = (TranslationBlock *)((long)tb & ~3);
  665 + tb_phys_invalidate(tb, addr);
  666 + tb = tb->page_next[n];
  667 + }
430 668 p->first_tb = NULL;
431 669 }
  670 +#endif
432 671  
433 672 /* add the tb in the target page and protect it if necessary */
434   -static inline void tb_alloc_page(TranslationBlock *tb, unsigned int page_index)
  673 +static inline void tb_alloc_page(TranslationBlock *tb,
  674 + unsigned int n, unsigned int page_addr)
435 675 {
436 676 PageDesc *p;
437   - unsigned long host_start, host_end, addr, page_addr;
438   - int prot;
  677 + TranslationBlock *last_first_tb;
  678 +
  679 + tb->page_addr[n] = page_addr;
  680 + p = page_find(page_addr >> TARGET_PAGE_BITS);
  681 + tb->page_next[n] = p->first_tb;
  682 + last_first_tb = p->first_tb;
  683 + p->first_tb = (TranslationBlock *)((long)tb | n);
  684 + invalidate_page_bitmap(p);
439 685  
440   - p = page_find_alloc(page_index);
441   - tb->page_next[page_index & 1] = p->first_tb;
442   - p->first_tb = tb;
  686 +#if defined(CONFIG_USER_ONLY)
443 687 if (p->flags & PAGE_WRITE) {
  688 + unsigned long host_start, host_end, addr;
  689 + int prot;
  690 +
444 691 /* force the host page as non writable (writes will have a
445 692 page fault + mprotect overhead) */
446   - page_addr = (page_index << TARGET_PAGE_BITS);
447 693 host_start = page_addr & host_page_mask;
448 694 host_end = host_start + host_page_size;
449 695 prot = 0;
450 696 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
451 697 prot |= page_get_flags(addr);
452   -#if !defined(CONFIG_SOFTMMU)
453 698 mprotect((void *)host_start, host_page_size,
454 699 (prot & PAGE_BITS) & ~PAGE_WRITE);
455   -#endif
456   -#if !defined(CONFIG_USER_ONLY)
457   - /* suppress soft TLB */
458   - /* XXX: must flush on all processor with same address space */
459   - tlb_flush_page_write(cpu_single_env, host_start);
460   -#endif
461 700 #ifdef DEBUG_TB_INVALIDATE
462 701 printf("protecting code page: 0x%08lx\n",
463 702 host_start);
464 703 #endif
465 704 p->flags &= ~PAGE_WRITE;
466 705 }
  706 +#else
  707 + /* if some code is already present, then the pages are already
  708 + protected. So we handle the case where only the first TB is
  709 + allocated in a physical page */
  710 + if (!last_first_tb) {
  711 + target_ulong virt_addr;
  712 +
  713 + virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
  714 + tlb_protect_code(cpu_single_env, virt_addr);
  715 + }
  716 +#endif
467 717 }
468 718  
469 719 /* Allocate a new translation block. Flush the translation buffer if
... ... @@ -480,21 +730,54 @@ TranslationBlock *tb_alloc(unsigned long pc)
480 730 return tb;
481 731 }
482 732  
483   -/* link the tb with the other TBs */
484   -void tb_link(TranslationBlock *tb)
  733 +/* add a new TB and link it to the physical page tables. phys_page2 is
  734 + (-1) to indicate that only one page contains the TB. */
  735 +void tb_link_phys(TranslationBlock *tb,
  736 + target_ulong phys_pc, target_ulong phys_page2)
485 737 {
486   - unsigned int page_index1, page_index2;
  738 + unsigned int h;
  739 + TranslationBlock **ptb;
  740 +
  741 + /* add in the physical hash table */
  742 + h = tb_phys_hash_func(phys_pc);
  743 + ptb = &tb_phys_hash[h];
  744 + tb->phys_hash_next = *ptb;
  745 + *ptb = tb;
487 746  
488 747 /* add in the page list */
489   - page_index1 = tb->pc >> TARGET_PAGE_BITS;
490   - tb_alloc_page(tb, page_index1);
491   - page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
492   - if (page_index2 != page_index1) {
493   - tb_alloc_page(tb, page_index2);
494   - }
  748 + tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
  749 + if (phys_page2 != -1)
  750 + tb_alloc_page(tb, 1, phys_page2);
  751 + else
  752 + tb->page_addr[1] = -1;
495 753 #ifdef DEBUG_TB_CHECK
496 754 tb_page_check();
497 755 #endif
  756 +}
  757 +
  758 +/* link the tb with the other TBs */
  759 +void tb_link(TranslationBlock *tb)
  760 +{
  761 +#if !defined(CONFIG_USER_ONLY)
  762 + {
  763 + VirtPageDesc *vp;
  764 + target_ulong addr;
  765 +
  766 + /* save the code memory mappings (needed to invalidate the code) */
  767 + addr = tb->pc & TARGET_PAGE_MASK;
  768 + vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
  769 + vp->phys_addr = tb->page_addr[0];
  770 + vp->valid_tag = virt_valid_tag;
  771 +
  772 + if (tb->page_addr[1] != -1) {
  773 + addr += TARGET_PAGE_SIZE;
  774 + vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
  775 + vp->phys_addr = tb->page_addr[1];
  776 + vp->valid_tag = virt_valid_tag;
  777 + }
  778 + }
  779 +#endif
  780 +
498 781 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
499 782 tb->jmp_next[0] = NULL;
500 783 tb->jmp_next[1] = NULL;
... ... @@ -506,69 +789,13 @@ void tb_link(TranslationBlock *tb)
506 789 tb_reset_jump(tb, 1);
507 790 }
508 791  
509   -/* called from signal handler: invalidate the code and unprotect the
510   - page. Return TRUE if the fault was succesfully handled. */
511   -int page_unprotect(unsigned long address)
  792 +/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
  793 + tb[1].tc_ptr. Return NULL if not found */
  794 +TranslationBlock *tb_find_pc(unsigned long tc_ptr)
512 795 {
513   - unsigned int page_index, prot, pindex;
514   - PageDesc *p, *p1;
515   - unsigned long host_start, host_end, addr;
516   -
517   - host_start = address & host_page_mask;
518   - page_index = host_start >> TARGET_PAGE_BITS;
519   - p1 = page_find(page_index);
520   - if (!p1)
521   - return 0;
522   - host_end = host_start + host_page_size;
523   - p = p1;
524   - prot = 0;
525   - for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
526   - prot |= p->flags;
527   - p++;
528   - }
529   - /* if the page was really writable, then we change its
530   - protection back to writable */
531   - if (prot & PAGE_WRITE_ORG) {
532   - pindex = (address - host_start) >> TARGET_PAGE_BITS;
533   - if (!(p1[pindex].flags & PAGE_WRITE)) {
534   -#if !defined(CONFIG_SOFTMMU)
535   - mprotect((void *)host_start, host_page_size,
536   - (prot & PAGE_BITS) | PAGE_WRITE);
537   -#endif
538   - p1[pindex].flags |= PAGE_WRITE;
539   - /* and since the content will be modified, we must invalidate
540   - the corresponding translated code. */
541   - tb_invalidate_page(address);
542   -#ifdef DEBUG_TB_CHECK
543   - tb_invalidate_check(address);
544   -#endif
545   - return 1;
546   - }
547   - }
548   - return 0;
549   -}
550   -
551   -/* call this function when system calls directly modify a memory area */
552   -void page_unprotect_range(uint8_t *data, unsigned long data_size)
553   -{
554   - unsigned long start, end, addr;
555   -
556   - start = (unsigned long)data;
557   - end = start + data_size;
558   - start &= TARGET_PAGE_MASK;
559   - end = TARGET_PAGE_ALIGN(end);
560   - for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
561   - page_unprotect(addr);
562   - }
563   -}
564   -
565   -/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
566   - tb[1].tc_ptr. Return NULL if not found */
567   -TranslationBlock *tb_find_pc(unsigned long tc_ptr)
568   -{
569   - int m_min, m_max, m;
570   - unsigned long v;
571   - TranslationBlock *tb;
  796 + int m_min, m_max, m;
  797 + unsigned long v;
  798 + TranslationBlock *tb;
572 799  
573 800 if (nb_tbs <= 0)
574 801 return NULL;
... ... @@ -655,7 +882,7 @@ int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
655 882 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
656 883 return -1;
657 884 env->breakpoints[env->nb_breakpoints++] = pc;
658   - tb_invalidate_page(pc);
  885 + tb_invalidate_page_range(pc, pc + 1);
659 886 return 0;
660 887 #else
661 888 return -1;
... ... @@ -676,7 +903,7 @@ int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
676 903 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
677 904 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
678 905 env->nb_breakpoints--;
679   - tb_invalidate_page(pc);
  906 + tb_invalidate_page_range(pc, pc + 1);
680 907 return 0;
681 908 #else
682 909 return -1;
... ... @@ -691,6 +918,7 @@ void cpu_single_step(CPUState *env, int enabled)
691 918 if (env->singlestep_enabled != enabled) {
692 919 env->singlestep_enabled = enabled;
693 920 /* must flush all the translated code to avoid inconsistancies */
  921 + /* XXX: only flush what is necessary */
694 922 tb_flush(env);
695 923 }
696 924 #endif
... ... @@ -706,7 +934,15 @@ void cpu_set_log(int log_flags)
706 934 perror(logfilename);
707 935 _exit(1);
708 936 }
  937 +#if !defined(CONFIG_SOFTMMU)
  938 + /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
  939 + {
  940 + static uint8_t logfile_buf[4096];
  941 + setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
  942 + }
  943 +#else
709 944 setvbuf(logfile, NULL, _IOLBF, 0);
  945 +#endif
710 946 }
711 947 }
712 948  
... ... @@ -747,54 +983,13 @@ void cpu_abort(CPUState *env, const char *fmt, ...)
747 983  
748 984 #if !defined(CONFIG_USER_ONLY)
749 985  
750   -/* unmap all maped pages and flush all associated code */
751   -static void page_unmap(CPUState *env)
752   -{
753   - PageDesc *pmap;
754   - int i;
755   -
756   - for(i = 0; i < L1_SIZE; i++) {
757   - pmap = l1_map[i];
758   - if (pmap) {
759   -#if !defined(CONFIG_SOFTMMU)
760   - PageDesc *p;
761   - unsigned long addr;
762   - int j, ret, j1;
763   -
764   - p = pmap;
765   - for(j = 0;j < L2_SIZE;) {
766   - if (p->flags & PAGE_VALID) {
767   - addr = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
768   - /* we try to find a range to make less syscalls */
769   - j1 = j;
770   - p++;
771   - j++;
772   - while (j < L2_SIZE && (p->flags & PAGE_VALID)) {
773   - p++;
774   - j++;
775   - }
776   - ret = munmap((void *)addr, (j - j1) << TARGET_PAGE_BITS);
777   - if (ret != 0) {
778   - fprintf(stderr, "Could not unmap page 0x%08lx\n", addr);
779   - exit(1);
780   - }
781   - } else {
782   - p++;
783   - j++;
784   - }
785   - }
786   -#endif
787   - free(pmap);
788   - l1_map[i] = NULL;
789   - }
790   - }
791   - tb_flush(env);
792   -}
793   -
794 986 void tlb_flush(CPUState *env)
795 987 {
796 988 int i;
797 989  
  990 +#if defined(DEBUG_TLB)
  991 + printf("tlb_flush:\n");
  992 +#endif
798 993 /* must reset current TB so that interrupts cannot modify the
799 994 links while we are modifying them */
800 995 env->current_tb = NULL;
... ... @@ -805,8 +1000,14 @@ void tlb_flush(CPUState *env)
805 1000 env->tlb_read[1][i].address = -1;
806 1001 env->tlb_write[1][i].address = -1;
807 1002 }
808   - /* XXX: avoid flushing the TBs */
809   - page_unmap(env);
  1003 +
  1004 + virt_page_flush();
  1005 + for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
  1006 + tb_hash[i] = NULL;
  1007 +
  1008 +#if !defined(CONFIG_SOFTMMU)
  1009 + munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
  1010 +#endif
810 1011 }
811 1012  
812 1013 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
... ... @@ -818,8 +1019,14 @@ static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
818 1019  
819 1020 void tlb_flush_page(CPUState *env, uint32_t addr)
820 1021 {
821   - int i, flags;
  1022 + int i, n;
  1023 + VirtPageDesc *vp;
  1024 + PageDesc *p;
  1025 + TranslationBlock *tb;
822 1026  
  1027 +#if defined(DEBUG_TLB)
  1028 + printf("tlb_flush_page: 0x%08x\n", addr);
  1029 +#endif
823 1030 /* must reset current TB so that interrupts cannot modify the
824 1031 links while we are modifying them */
825 1032 env->current_tb = NULL;
... ... @@ -831,25 +1038,240 @@ void tlb_flush_page(CPUState *env, uint32_t addr)
831 1038 tlb_flush_entry(&env->tlb_read[1][i], addr);
832 1039 tlb_flush_entry(&env->tlb_write[1][i], addr);
833 1040  
834   - flags = page_get_flags(addr);
835   - if (flags & PAGE_VALID) {
  1041 + /* remove from the virtual pc hash table all the TB at this
  1042 + virtual address */
  1043 +
  1044 + vp = virt_page_find(addr >> TARGET_PAGE_BITS);
  1045 + if (vp && vp->valid_tag == virt_valid_tag) {
  1046 + p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
  1047 + if (p) {
  1048 + /* we remove all the links to the TBs in this virtual page */
  1049 + tb = p->first_tb;
  1050 + while (tb != NULL) {
  1051 + n = (long)tb & 3;
  1052 + tb = (TranslationBlock *)((long)tb & ~3);
  1053 + if ((tb->pc & TARGET_PAGE_MASK) == addr ||
  1054 + ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
  1055 + tb_invalidate(tb);
  1056 + }
  1057 + tb = tb->page_next[n];
  1058 + }
  1059 + }
  1060 + }
  1061 +
836 1062 #if !defined(CONFIG_SOFTMMU)
  1063 + if (addr < MMAP_AREA_END)
837 1064 munmap((void *)addr, TARGET_PAGE_SIZE);
838 1065 #endif
839   - page_set_flags(addr, addr + TARGET_PAGE_SIZE, 0);
  1066 +}
  1067 +
  1068 +static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
  1069 +{
  1070 + if (addr == (tlb_entry->address &
  1071 + (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
  1072 + (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
  1073 + tlb_entry->address |= IO_MEM_CODE;
  1074 + tlb_entry->addend -= (unsigned long)phys_ram_base;
840 1075 }
841 1076 }
842 1077  
843   -/* make all write to page 'addr' trigger a TLB exception to detect
844   - self modifying code */
845   -void tlb_flush_page_write(CPUState *env, uint32_t addr)
  1078 +/* update the TLBs so that writes to code in the virtual page 'addr'
  1079 + can be detected */
  1080 +static void tlb_protect_code(CPUState *env, uint32_t addr)
846 1081 {
847 1082 int i;
848 1083  
849 1084 addr &= TARGET_PAGE_MASK;
850 1085 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
851   - tlb_flush_entry(&env->tlb_write[0][i], addr);
852   - tlb_flush_entry(&env->tlb_write[1][i], addr);
  1086 + tlb_protect_code1(&env->tlb_write[0][i], addr);
  1087 + tlb_protect_code1(&env->tlb_write[1][i], addr);
  1088 +#if !defined(CONFIG_SOFTMMU)
  1089 + /* NOTE: as we generated the code for this page, it is already at
  1090 + least readable */
  1091 + if (addr < MMAP_AREA_END)
  1092 + mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
  1093 +#endif
  1094 +}
  1095 +
  1096 +static inline void tlb_unprotect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
  1097 +{
  1098 + if (addr == (tlb_entry->address &
  1099 + (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
  1100 + (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE) {
  1101 + tlb_entry->address &= TARGET_PAGE_MASK;
  1102 + tlb_entry->addend += (unsigned long)phys_ram_base;
  1103 + }
  1104 +}
  1105 +
  1106 +/* update the TLB so that writes in virtual page 'addr' are no longer
  1107 + tested self modifying code */
  1108 +static void tlb_unprotect_code(CPUState *env, uint32_t addr)
  1109 +{
  1110 + int i;
  1111 +
  1112 + addr &= TARGET_PAGE_MASK;
  1113 + i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
  1114 + tlb_unprotect_code1(&env->tlb_write[0][i], addr);
  1115 + tlb_unprotect_code1(&env->tlb_write[1][i], addr);
  1116 +}
  1117 +
  1118 +static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
  1119 + uint32_t phys_addr)
  1120 +{
  1121 + if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
  1122 + ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
  1123 + tlb_entry->address &= TARGET_PAGE_MASK;
  1124 + tlb_entry->addend += (unsigned long)phys_ram_base;
  1125 + }
  1126 +}
  1127 +
  1128 +/* update the TLB so that writes in physical page 'phys_addr' are no longer
  1129 + tested self modifying code */
  1130 +/* XXX: find a way to improve it */
  1131 +static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr)
  1132 +{
  1133 + int i;
  1134 +
  1135 + phys_addr &= TARGET_PAGE_MASK;
  1136 + for(i = 0; i < CPU_TLB_SIZE; i++)
  1137 + tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
  1138 + for(i = 0; i < CPU_TLB_SIZE; i++)
  1139 + tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
  1140 +}
  1141 +
  1142 +/* add a new TLB entry. At most a single entry for a given virtual
  1143 + address is permitted. */
  1144 +int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
  1145 + int is_user, int is_softmmu)
  1146 +{
  1147 + PageDesc *p;
  1148 + target_ulong pd;
  1149 + TranslationBlock *first_tb;
  1150 + unsigned int index;
  1151 + target_ulong address, addend;
  1152 + int ret;
  1153 +
  1154 + p = page_find(paddr >> TARGET_PAGE_BITS);
  1155 + if (!p) {
  1156 + pd = IO_MEM_UNASSIGNED;
  1157 + first_tb = NULL;
  1158 + } else {
  1159 + pd = p->phys_offset;
  1160 + first_tb = p->first_tb;
  1161 + }
  1162 +#if defined(DEBUG_TLB)
  1163 + printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
  1164 + vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
  1165 +#endif
  1166 +
  1167 + ret = 0;
  1168 +#if !defined(CONFIG_SOFTMMU)
  1169 + if (is_softmmu)
  1170 +#endif
  1171 + {
  1172 + if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
  1173 + /* IO memory case */
  1174 + address = vaddr | pd;
  1175 + addend = paddr;
  1176 + } else {
  1177 + /* standard memory */
  1178 + address = vaddr;
  1179 + addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
  1180 + }
  1181 +
  1182 + index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
  1183 + addend -= vaddr;
  1184 + if (prot & PROT_READ) {
  1185 + env->tlb_read[is_user][index].address = address;
  1186 + env->tlb_read[is_user][index].addend = addend;
  1187 + } else {
  1188 + env->tlb_read[is_user][index].address = -1;
  1189 + env->tlb_read[is_user][index].addend = -1;
  1190 + }
  1191 + if (prot & PROT_WRITE) {
  1192 + if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
  1193 + /* ROM: access is ignored (same as unassigned) */
  1194 + env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
  1195 + env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
  1196 + } else if (first_tb) {
  1197 + /* if code is present, we use a specific memory
  1198 + handler. It works only for physical memory access */
  1199 + env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
  1200 + env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
  1201 + } else {
  1202 + env->tlb_write[is_user][index].address = address;
  1203 + env->tlb_write[is_user][index].addend = addend;
  1204 + }
  1205 + } else {
  1206 + env->tlb_write[is_user][index].address = -1;
  1207 + env->tlb_write[is_user][index].addend = -1;
  1208 + }
  1209 + }
  1210 +#if !defined(CONFIG_SOFTMMU)
  1211 + else {
  1212 + if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
  1213 + /* IO access: no mapping is done as it will be handled by the
  1214 + soft MMU */
  1215 + if (!(env->hflags & HF_SOFTMMU_MASK))
  1216 + ret = 2;
  1217 + } else {
  1218 + void *map_addr;
  1219 + if (prot & PROT_WRITE) {
  1220 + if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || first_tb) {
  1221 + /* ROM: we do as if code was inside */
  1222 + /* if code is present, we only map as read only and save the
  1223 + original mapping */
  1224 + VirtPageDesc *vp;
  1225 +
  1226 + vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
  1227 + vp->phys_addr = pd;
  1228 + vp->prot = prot;
  1229 + vp->valid_tag = virt_valid_tag;
  1230 + prot &= ~PAGE_WRITE;
  1231 + }
  1232 + }
  1233 + map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
  1234 + MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
  1235 + if (map_addr == MAP_FAILED) {
  1236 + cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
  1237 + paddr, vaddr);
  1238 + }
  1239 + }
  1240 + }
  1241 +#endif
  1242 + return ret;
  1243 +}
  1244 +
  1245 +/* called from signal handler: invalidate the code and unprotect the
  1246 + page. Return TRUE if the fault was succesfully handled. */
  1247 +int page_unprotect(unsigned long addr)
  1248 +{
  1249 +#if !defined(CONFIG_SOFTMMU)
  1250 + VirtPageDesc *vp;
  1251 +
  1252 +#if defined(DEBUG_TLB)
  1253 + printf("page_unprotect: addr=0x%08x\n", addr);
  1254 +#endif
  1255 + addr &= TARGET_PAGE_MASK;
  1256 + vp = virt_page_find(addr >> TARGET_PAGE_BITS);
  1257 + if (!vp)
  1258 + return 0;
  1259 + /* NOTE: in this case, validate_tag is _not_ tested as it
  1260 + validates only the code TLB */
  1261 + if (vp->valid_tag != virt_valid_tag)
  1262 + return 0;
  1263 + if (!(vp->prot & PAGE_WRITE))
  1264 + return 0;
  1265 +#if defined(DEBUG_TLB)
  1266 + printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
  1267 + addr, vp->phys_addr, vp->prot);
  1268 +#endif
  1269 + tb_invalidate_phys_page(vp->phys_addr);
  1270 + mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot);
  1271 + return 1;
  1272 +#else
  1273 + return 0;
  1274 +#endif
853 1275 }
854 1276  
855 1277 #else
... ... @@ -866,38 +1288,148 @@ void tlb_flush_page_write(CPUState *env, uint32_t addr)
866 1288 {
867 1289 }
868 1290  
869   -#endif /* defined(CONFIG_USER_ONLY) */
  1291 +int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
  1292 + int is_user, int is_softmmu)
  1293 +{
  1294 + return 0;
  1295 +}
870 1296  
871   -static inline unsigned long *physpage_find_alloc(unsigned int page)
  1297 +/* dump memory mappings */
  1298 +void page_dump(FILE *f)
872 1299 {
873   - unsigned long **lp, *p;
874   - unsigned int index, i;
  1300 + unsigned long start, end;
  1301 + int i, j, prot, prot1;
  1302 + PageDesc *p;
875 1303  
876   - index = page >> TARGET_PAGE_BITS;
877   - lp = &l1_physmap[index >> L2_BITS];
878   - p = *lp;
879   - if (!p) {
880   - /* allocate if not found */
881   - p = malloc(sizeof(unsigned long) * L2_SIZE);
882   - for(i = 0; i < L2_SIZE; i++)
883   - p[i] = IO_MEM_UNASSIGNED;
884   - *lp = p;
  1304 + fprintf(f, "%-8s %-8s %-8s %s\n",
  1305 + "start", "end", "size", "prot");
  1306 + start = -1;
  1307 + end = -1;
  1308 + prot = 0;
  1309 + for(i = 0; i <= L1_SIZE; i++) {
  1310 + if (i < L1_SIZE)
  1311 + p = l1_map[i];
  1312 + else
  1313 + p = NULL;
  1314 + for(j = 0;j < L2_SIZE; j++) {
  1315 + if (!p)
  1316 + prot1 = 0;
  1317 + else
  1318 + prot1 = p[j].flags;
  1319 + if (prot1 != prot) {
  1320 + end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
  1321 + if (start != -1) {
  1322 + fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
  1323 + start, end, end - start,
  1324 + prot & PAGE_READ ? 'r' : '-',
  1325 + prot & PAGE_WRITE ? 'w' : '-',
  1326 + prot & PAGE_EXEC ? 'x' : '-');
  1327 + }
  1328 + if (prot1 != 0)
  1329 + start = end;
  1330 + else
  1331 + start = -1;
  1332 + prot = prot1;
  1333 + }
  1334 + if (!p)
  1335 + break;
  1336 + }
885 1337 }
886   - return p + (index & (L2_SIZE - 1));
887 1338 }
888 1339  
889   -/* return NULL if no page defined (unused memory) */
890   -unsigned long physpage_find(unsigned long page)
  1340 +int page_get_flags(unsigned long address)
891 1341 {
892   - unsigned long *p;
893   - unsigned int index;
894   - index = page >> TARGET_PAGE_BITS;
895   - p = l1_physmap[index >> L2_BITS];
  1342 + PageDesc *p;
  1343 +
  1344 + p = page_find(address >> TARGET_PAGE_BITS);
896 1345 if (!p)
897   - return IO_MEM_UNASSIGNED;
898   - return p[index & (L2_SIZE - 1)];
  1346 + return 0;
  1347 + return p->flags;
899 1348 }
900 1349  
  1350 +/* modify the flags of a page and invalidate the code if
  1351 + necessary. The flag PAGE_WRITE_ORG is positionned automatically
  1352 + depending on PAGE_WRITE */
  1353 +void page_set_flags(unsigned long start, unsigned long end, int flags)
  1354 +{
  1355 + PageDesc *p;
  1356 + unsigned long addr;
  1357 +
  1358 + start = start & TARGET_PAGE_MASK;
  1359 + end = TARGET_PAGE_ALIGN(end);
  1360 + if (flags & PAGE_WRITE)
  1361 + flags |= PAGE_WRITE_ORG;
  1362 + spin_lock(&tb_lock);
  1363 + for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
  1364 + p = page_find_alloc(addr >> TARGET_PAGE_BITS);
  1365 + /* if the write protection is set, then we invalidate the code
  1366 + inside */
  1367 + if (!(p->flags & PAGE_WRITE) &&
  1368 + (flags & PAGE_WRITE) &&
  1369 + p->first_tb) {
  1370 + tb_invalidate_phys_page(addr);
  1371 + }
  1372 + p->flags = flags;
  1373 + }
  1374 + spin_unlock(&tb_lock);
  1375 +}
  1376 +
  1377 +/* called from signal handler: invalidate the code and unprotect the
  1378 + page. Return TRUE if the fault was succesfully handled. */
  1379 +int page_unprotect(unsigned long address)
  1380 +{
  1381 + unsigned int page_index, prot, pindex;
  1382 + PageDesc *p, *p1;
  1383 + unsigned long host_start, host_end, addr;
  1384 +
  1385 + host_start = address & host_page_mask;
  1386 + page_index = host_start >> TARGET_PAGE_BITS;
  1387 + p1 = page_find(page_index);
  1388 + if (!p1)
  1389 + return 0;
  1390 + host_end = host_start + host_page_size;
  1391 + p = p1;
  1392 + prot = 0;
  1393 + for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
  1394 + prot |= p->flags;
  1395 + p++;
  1396 + }
  1397 + /* if the page was really writable, then we change its
  1398 + protection back to writable */
  1399 + if (prot & PAGE_WRITE_ORG) {
  1400 + pindex = (address - host_start) >> TARGET_PAGE_BITS;
  1401 + if (!(p1[pindex].flags & PAGE_WRITE)) {
  1402 + mprotect((void *)host_start, host_page_size,
  1403 + (prot & PAGE_BITS) | PAGE_WRITE);
  1404 + p1[pindex].flags |= PAGE_WRITE;
  1405 + /* and since the content will be modified, we must invalidate
  1406 + the corresponding translated code. */
  1407 + tb_invalidate_phys_page(address);
  1408 +#ifdef DEBUG_TB_CHECK
  1409 + tb_invalidate_check(address);
  1410 +#endif
  1411 + return 1;
  1412 + }
  1413 + }
  1414 + return 0;
  1415 +}
  1416 +
  1417 +/* call this function when system calls directly modify a memory area */
  1418 +void page_unprotect_range(uint8_t *data, unsigned long data_size)
  1419 +{
  1420 + unsigned long start, end, addr;
  1421 +
  1422 + start = (unsigned long)data;
  1423 + end = start + data_size;
  1424 + start &= TARGET_PAGE_MASK;
  1425 + end = TARGET_PAGE_ALIGN(end);
  1426 + for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
  1427 + page_unprotect(addr);
  1428 + }
  1429 +}
  1430 +
  1431 +#endif /* defined(CONFIG_USER_ONLY) */
  1432 +
901 1433 /* register physical memory. 'size' must be a multiple of the target
902 1434 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
903 1435 io memory page */
... ... @@ -905,13 +1437,13 @@ void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
905 1437 long phys_offset)
906 1438 {
907 1439 unsigned long addr, end_addr;
908   - unsigned long *p;
  1440 + PageDesc *p;
909 1441  
910 1442 end_addr = start_addr + size;
911 1443 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
912   - p = physpage_find_alloc(addr);
913   - *p = phys_offset;
914   - if ((phys_offset & ~TARGET_PAGE_MASK) == 0)
  1444 + p = page_find_alloc(addr >> TARGET_PAGE_BITS);
  1445 + p->phys_offset = phys_offset;
  1446 + if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
915 1447 phys_offset += TARGET_PAGE_SIZE;
916 1448 }
917 1449 }
... ... @@ -937,11 +1469,51 @@ static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
937 1469 unassigned_mem_writeb,
938 1470 };
939 1471  
  1472 +/* self modifying code support in soft mmu mode : writing to a page
  1473 + containing code comes to these functions */
  1474 +
  1475 +static void code_mem_writeb(uint32_t addr, uint32_t val)
  1476 +{
  1477 +#if !defined(CONFIG_USER_ONLY)
  1478 + tb_invalidate_phys_page_fast(addr, 1);
  1479 +#endif
  1480 + stb_raw(phys_ram_base + addr, val);
  1481 +}
  1482 +
  1483 +static void code_mem_writew(uint32_t addr, uint32_t val)
  1484 +{
  1485 +#if !defined(CONFIG_USER_ONLY)
  1486 + tb_invalidate_phys_page_fast(addr, 2);
  1487 +#endif
  1488 + stw_raw(phys_ram_base + addr, val);
  1489 +}
  1490 +
  1491 +static void code_mem_writel(uint32_t addr, uint32_t val)
  1492 +{
  1493 +#if !defined(CONFIG_USER_ONLY)
  1494 + tb_invalidate_phys_page_fast(addr, 4);
  1495 +#endif
  1496 + stl_raw(phys_ram_base + addr, val);
  1497 +}
  1498 +
  1499 +static CPUReadMemoryFunc *code_mem_read[3] = {
  1500 + NULL, /* never used */
  1501 + NULL, /* never used */
  1502 + NULL, /* never used */
  1503 +};
  1504 +
  1505 +static CPUWriteMemoryFunc *code_mem_write[3] = {
  1506 + code_mem_writeb,
  1507 + code_mem_writew,
  1508 + code_mem_writel,
  1509 +};
940 1510  
941 1511 static void io_mem_init(void)
942 1512 {
943   - io_mem_nb = 1;
944   - cpu_register_io_memory(0, unassigned_mem_read, unassigned_mem_write);
  1513 + cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
  1514 + cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
  1515 + cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
  1516 + io_mem_nb = 4;
945 1517 }
946 1518  
947 1519 /* mem_read and mem_write are arrays of functions containing the
... ...