Commit 436d8b892a84eed2144030a8a07affb94b5f15d7
1 parent
dc6f57fd
correct value for ADDSEG is real mode (fixes GRUB boot) - update static protecte…
…d mode state - use generic tlb_set_page() git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@506 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
1 changed file
with
14 additions
and
77 deletions
target-i386/helper2.c
... | ... | @@ -181,14 +181,9 @@ void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags) |
181 | 181 | |
182 | 182 | /* called when cr3 or PG bit are modified */ |
183 | 183 | static int last_pg_state = -1; |
184 | -static int last_pe_state = 0; | |
185 | 184 | static uint32_t a20_mask; |
186 | 185 | int a20_enabled; |
187 | 186 | |
188 | -int phys_ram_size; | |
189 | -int phys_ram_fd; | |
190 | -uint8_t *phys_ram_base; | |
191 | - | |
192 | 187 | void cpu_x86_set_a20(CPUX86State *env, int a20_state) |
193 | 188 | { |
194 | 189 | a20_state = (a20_state != 0); |
... | ... | @@ -223,11 +218,11 @@ void cpu_x86_update_cr0(CPUX86State *env) |
223 | 218 | tlb_flush(env); |
224 | 219 | last_pg_state = pg_state; |
225 | 220 | } |
226 | - pe_state = env->cr[0] & CR0_PE_MASK; | |
227 | - if (last_pe_state != pe_state) { | |
228 | - tb_flush(env); | |
229 | - last_pe_state = pe_state; | |
230 | - } | |
221 | + /* update PE flag in hidden flags */ | |
222 | + pe_state = (env->cr[0] & CR0_PE_MASK); | |
223 | + env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT); | |
224 | + /* ensure that ADDSEG is always set in real mode */ | |
225 | + env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT); | |
231 | 226 | } |
232 | 227 | |
233 | 228 | void cpu_x86_update_cr3(CPUX86State *env) |
... | ... | @@ -267,9 +262,9 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, |
267 | 262 | uint8_t *pde_ptr, *pte_ptr; |
268 | 263 | uint32_t pde, pte, virt_addr; |
269 | 264 | int error_code, is_dirty, prot, page_size, ret; |
270 | - unsigned long pd; | |
265 | + unsigned long paddr, vaddr, page_offset; | |
271 | 266 | |
272 | -#ifdef DEBUG_MMU | |
267 | +#if defined(DEBUG_MMU) | |
273 | 268 | printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n", |
274 | 269 | addr, is_write, is_user, env->eip); |
275 | 270 | #endif |
... | ... | @@ -366,72 +361,14 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, |
366 | 361 | |
367 | 362 | do_mapping: |
368 | 363 | pte = pte & a20_mask; |
369 | -#if !defined(CONFIG_SOFTMMU) | |
370 | - if (is_softmmu) | |
371 | -#endif | |
372 | - { | |
373 | - unsigned long paddr, vaddr, address, addend, page_offset; | |
374 | - int index; | |
375 | 364 | |
376 | - /* software MMU case. Even if 4MB pages, we map only one 4KB | |
377 | - page in the cache to avoid filling it too fast */ | |
378 | - page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); | |
379 | - paddr = (pte & TARGET_PAGE_MASK) + page_offset; | |
380 | - vaddr = virt_addr + page_offset; | |
381 | - index = (addr >> 12) & (CPU_TLB_SIZE - 1); | |
382 | - pd = physpage_find(paddr); | |
383 | - if (pd & 0xfff) { | |
384 | - /* IO memory case */ | |
385 | - address = vaddr | pd; | |
386 | - addend = paddr; | |
387 | - } else { | |
388 | - /* standard memory */ | |
389 | - address = vaddr; | |
390 | - addend = (unsigned long)phys_ram_base + pd; | |
391 | - } | |
392 | - addend -= vaddr; | |
393 | - env->tlb_read[is_user][index].address = address; | |
394 | - env->tlb_read[is_user][index].addend = addend; | |
395 | - if (prot & PROT_WRITE) { | |
396 | - env->tlb_write[is_user][index].address = address; | |
397 | - env->tlb_write[is_user][index].addend = addend; | |
398 | - } else { | |
399 | - env->tlb_write[is_user][index].address = -1; | |
400 | - env->tlb_write[is_user][index].addend = -1; | |
401 | - } | |
402 | - page_set_flags(vaddr, vaddr + TARGET_PAGE_SIZE, | |
403 | - PAGE_VALID | PAGE_EXEC | prot); | |
404 | - ret = 0; | |
405 | - } | |
406 | -#if !defined(CONFIG_SOFTMMU) | |
407 | - else { | |
408 | - ret = 0; | |
409 | - /* XXX: incorrect for 4MB pages */ | |
410 | - pd = physpage_find(pte & ~0xfff); | |
411 | - if ((pd & 0xfff) != 0) { | |
412 | - /* IO access: no mapping is done as it will be handled by the | |
413 | - soft MMU */ | |
414 | - if (!(env->hflags & HF_SOFTMMU_MASK)) | |
415 | - ret = 2; | |
416 | - } else { | |
417 | - void *map_addr; | |
418 | - map_addr = mmap((void *)virt_addr, page_size, prot, | |
419 | - MAP_SHARED | MAP_FIXED, phys_ram_fd, pd); | |
420 | - if (map_addr == MAP_FAILED) { | |
421 | - fprintf(stderr, | |
422 | - "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", | |
423 | - pte & ~0xfff, virt_addr); | |
424 | - exit(1); | |
425 | - } | |
426 | -#ifdef DEBUG_MMU | |
427 | - printf("mmaping 0x%08x to virt 0x%08x pse=%d\n", | |
428 | - pte & ~0xfff, virt_addr, (page_size != 4096)); | |
429 | -#endif | |
430 | - page_set_flags(virt_addr, virt_addr + page_size, | |
431 | - PAGE_VALID | PAGE_EXEC | prot); | |
432 | - } | |
433 | - } | |
434 | -#endif | |
365 | + /* Even if 4MB pages, we map only one 4KB page in the cache to | |
366 | + avoid filling it too fast */ | |
367 | + page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); | |
368 | + paddr = (pte & TARGET_PAGE_MASK) + page_offset; | |
369 | + vaddr = virt_addr + page_offset; | |
370 | + | |
371 | + ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu); | |
435 | 372 | return ret; |
436 | 373 | do_fault_protect: |
437 | 374 | error_code = PG_ERROR_P_MASK; | ... | ... |