Commit 8df1cd076cc14d1d4fc456c6d7d1ceb257781942

Authored by bellard
1 parent bb05683b

physical memory access functions


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1249 c046a42c-6fe2-441c-8c8c-71466251a162
cpu-all.h
@@ -725,6 +725,9 @@ static inline void cpu_physical_memory_write(target_phys_addr_t addr, @@ -725,6 +725,9 @@ static inline void cpu_physical_memory_write(target_phys_addr_t addr,
725 { 725 {
726 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1); 726 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
727 } 727 }
  728 +uint32_t ldl_phys(target_phys_addr_t addr);
  729 +void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
  730 +void stl_phys(target_phys_addr_t addr, uint32_t val);
728 731
729 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 732 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
730 uint8_t *buf, int len, int is_write); 733 uint8_t *buf, int len, int is_write);
@@ -2032,6 +2032,21 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, @@ -2032,6 +2032,21 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2032 addr += l; 2032 addr += l;
2033 } 2033 }
2034 } 2034 }
  2035 +
  2036 +/* never used */
  2037 +uint32_t ldl_phys(target_phys_addr_t addr)
  2038 +{
  2039 + return 0;
  2040 +}
  2041 +
  2042 +void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
  2043 +{
  2044 +}
  2045 +
  2046 +void stl_phys(target_phys_addr_t addr, uint32_t val)
  2047 +{
  2048 +}
  2049 +
2035 #else 2050 #else
2036 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 2051 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2037 int len, int is_write) 2052 int len, int is_write)
@@ -2118,6 +2133,96 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, @@ -2118,6 +2133,96 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2118 addr += l; 2133 addr += l;
2119 } 2134 }
2120 } 2135 }
  2136 +
  2137 +/* warning: addr must be aligned */
  2138 +uint32_t ldl_phys(target_phys_addr_t addr)
  2139 +{
  2140 + int io_index;
  2141 + uint8_t *ptr;
  2142 + uint32_t val;
  2143 + unsigned long pd;
  2144 + PhysPageDesc *p;
  2145 +
  2146 + p = phys_page_find(addr >> TARGET_PAGE_BITS);
  2147 + if (!p) {
  2148 + pd = IO_MEM_UNASSIGNED;
  2149 + } else {
  2150 + pd = p->phys_offset;
  2151 + }
  2152 +
  2153 + if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
  2154 + (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
  2155 + /* I/O case */
  2156 + io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  2157 + val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
  2158 + } else {
  2159 + /* RAM case */
  2160 + ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
  2161 + (addr & ~TARGET_PAGE_MASK);
  2162 + val = ldl_p(ptr);
  2163 + }
  2164 + return val;
  2165 +}
  2166 +
  2167 +/* warning: addr must be aligned. The ram page is not masked as dirty
  2168 + and the code inside is not invalidated. It is useful if the dirty
  2169 + bits are used to track modified PTEs */
  2170 +void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
  2171 +{
  2172 + int io_index;
  2173 + uint8_t *ptr;
  2174 + unsigned long pd;
  2175 + PhysPageDesc *p;
  2176 +
  2177 + p = phys_page_find(addr >> TARGET_PAGE_BITS);
  2178 + if (!p) {
  2179 + pd = IO_MEM_UNASSIGNED;
  2180 + } else {
  2181 + pd = p->phys_offset;
  2182 + }
  2183 +
  2184 + if ((pd & ~TARGET_PAGE_MASK) != 0) {
  2185 + io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  2186 + io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
  2187 + } else {
  2188 + ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
  2189 + (addr & ~TARGET_PAGE_MASK);
  2190 + stl_p(ptr, val);
  2191 + }
  2192 +}
  2193 +
  2194 +/* warning: addr must be aligned */
  2195 +/* XXX: optimize code invalidation test */
  2196 +void stl_phys(target_phys_addr_t addr, uint32_t val)
  2197 +{
  2198 + int io_index;
  2199 + uint8_t *ptr;
  2200 + unsigned long pd;
  2201 + PhysPageDesc *p;
  2202 +
  2203 + p = phys_page_find(addr >> TARGET_PAGE_BITS);
  2204 + if (!p) {
  2205 + pd = IO_MEM_UNASSIGNED;
  2206 + } else {
  2207 + pd = p->phys_offset;
  2208 + }
  2209 +
  2210 + if ((pd & ~TARGET_PAGE_MASK) != 0) {
  2211 + io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  2212 + io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
  2213 + } else {
  2214 + unsigned long addr1;
  2215 + addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
  2216 + /* RAM case */
  2217 + ptr = phys_ram_base + addr1;
  2218 + stl_p(ptr, val);
  2219 + /* invalidate code */
  2220 + tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
  2221 + /* set dirty bit */
  2222 + phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 1;
  2223 + }
  2224 +}
  2225 +
2121 #endif 2226 #endif
2122 2227
2123 /* virtual memory access for debug */ 2228 /* virtual memory access for debug */
target-i386/helper2.c
@@ -490,34 +490,26 @@ void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr) @@ -490,34 +490,26 @@ void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
490 tlb_flush_page(env, addr); 490 tlb_flush_page(env, addr);
491 } 491 }
492 492
493 -static inline uint8_t *get_phys_mem_ptr(target_phys_addr_t addr)  
494 -{  
495 - /* XXX: incorrect */  
496 - return phys_ram_base + addr;  
497 -} 493 +#if defined(CONFIG_USER_ONLY)
498 494
499 -/* WARNING: addr must be aligned */  
500 -uint32_t ldl_phys_aligned(target_phys_addr_t addr) 495 +int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
  496 + int is_write, int is_user, int is_softmmu)
501 { 497 {
502 - uint8_t *ptr;  
503 - uint32_t val;  
504 - ptr = get_phys_mem_ptr(addr);  
505 - if (!ptr)  
506 - val = 0;  
507 - else  
508 - val = ldl_raw(ptr);  
509 - return val; 498 + /* user mode only emulation */
  499 + is_write &= 1;
  500 + env->cr[2] = addr;
  501 + env->error_code = (is_write << PG_ERROR_W_BIT);
  502 + env->error_code |= PG_ERROR_U_MASK;
  503 + return 1;
510 } 504 }
511 505
512 -void stl_phys_aligned(target_phys_addr_t addr, uint32_t val) 506 +target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
513 { 507 {
514 - uint8_t *ptr;  
515 - ptr = get_phys_mem_ptr(addr);  
516 - if (!ptr)  
517 - return;  
518 - stl_raw(ptr, val); 508 + return addr;
519 } 509 }
520 510
  511 +#else
  512 +
521 /* return value: 513 /* return value:
522 -1 = cannot handle fault 514 -1 = cannot handle fault
523 0 = nothing more to do 515 0 = nothing more to do
@@ -539,12 +531,6 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -539,12 +531,6 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
539 #endif 531 #endif
540 is_write &= 1; 532 is_write &= 1;
541 533
542 - if (env->user_mode_only) {  
543 - /* user mode only emulation */  
544 - error_code = 0;  
545 - goto do_fault;  
546 - }  
547 -  
548 if (!(env->cr[0] & CR0_PG_MASK)) { 534 if (!(env->cr[0] & CR0_PG_MASK)) {
549 pte = addr; 535 pte = addr;
550 virt_addr = addr & TARGET_PAGE_MASK; 536 virt_addr = addr & TARGET_PAGE_MASK;
@@ -553,7 +539,6 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -553,7 +539,6 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
553 goto do_mapping; 539 goto do_mapping;
554 } 540 }
555 541
556 -  
557 if (env->cr[4] & CR4_PAE_MASK) { 542 if (env->cr[4] & CR4_PAE_MASK) {
558 /* XXX: we only use 32 bit physical addresses */ 543 /* XXX: we only use 32 bit physical addresses */
559 #ifdef TARGET_X86_64 544 #ifdef TARGET_X86_64
@@ -572,33 +557,33 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -572,33 +557,33 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
572 557
573 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 558 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
574 env->a20_mask; 559 env->a20_mask;
575 - pml4e = ldl_phys_aligned(pml4e_addr); 560 + pml4e = ldl_phys(pml4e_addr);
576 if (!(pml4e & PG_PRESENT_MASK)) { 561 if (!(pml4e & PG_PRESENT_MASK)) {
577 error_code = 0; 562 error_code = 0;
578 goto do_fault; 563 goto do_fault;
579 } 564 }
580 if (!(pml4e & PG_ACCESSED_MASK)) { 565 if (!(pml4e & PG_ACCESSED_MASK)) {
581 pml4e |= PG_ACCESSED_MASK; 566 pml4e |= PG_ACCESSED_MASK;
582 - stl_phys_aligned(pml4e_addr, pml4e); 567 + stl_phys_notdirty(pml4e_addr, pml4e);
583 } 568 }
584 569
585 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & 570 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
586 env->a20_mask; 571 env->a20_mask;
587 - pdpe = ldl_phys_aligned(pdpe_addr); 572 + pdpe = ldl_phys(pdpe_addr);
588 if (!(pdpe & PG_PRESENT_MASK)) { 573 if (!(pdpe & PG_PRESENT_MASK)) {
589 error_code = 0; 574 error_code = 0;
590 goto do_fault; 575 goto do_fault;
591 } 576 }
592 if (!(pdpe & PG_ACCESSED_MASK)) { 577 if (!(pdpe & PG_ACCESSED_MASK)) {
593 pdpe |= PG_ACCESSED_MASK; 578 pdpe |= PG_ACCESSED_MASK;
594 - stl_phys_aligned(pdpe_addr, pdpe); 579 + stl_phys_notdirty(pdpe_addr, pdpe);
595 } 580 }
596 } else 581 } else
597 #endif 582 #endif
598 { 583 {
599 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 584 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
600 env->a20_mask; 585 env->a20_mask;
601 - pdpe = ldl_phys_aligned(pdpe_addr); 586 + pdpe = ldl_phys(pdpe_addr);
602 if (!(pdpe & PG_PRESENT_MASK)) { 587 if (!(pdpe & PG_PRESENT_MASK)) {
603 error_code = 0; 588 error_code = 0;
604 goto do_fault; 589 goto do_fault;
@@ -607,7 +592,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -607,7 +592,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
607 592
608 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) & 593 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
609 env->a20_mask; 594 env->a20_mask;
610 - pde = ldl_phys_aligned(pde_addr); 595 + pde = ldl_phys(pde_addr);
611 if (!(pde & PG_PRESENT_MASK)) { 596 if (!(pde & PG_PRESENT_MASK)) {
612 error_code = 0; 597 error_code = 0;
613 goto do_fault; 598 goto do_fault;
@@ -620,7 +605,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -620,7 +605,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
620 /* 4 KB page */ 605 /* 4 KB page */
621 if (!(pde & PG_ACCESSED_MASK)) { 606 if (!(pde & PG_ACCESSED_MASK)) {
622 pde |= PG_ACCESSED_MASK; 607 pde |= PG_ACCESSED_MASK;
623 - stl_phys_aligned(pde_addr, pde); 608 + stl_phys_notdirty(pde_addr, pde);
624 } 609 }
625 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) & 610 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
626 env->a20_mask; 611 env->a20_mask;
@@ -630,7 +615,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -630,7 +615,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
630 /* page directory entry */ 615 /* page directory entry */
631 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & 616 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) &
632 env->a20_mask; 617 env->a20_mask;
633 - pde = ldl_phys_aligned(pde_addr); 618 + pde = ldl_phys(pde_addr);
634 if (!(pde & PG_PRESENT_MASK)) { 619 if (!(pde & PG_PRESENT_MASK)) {
635 error_code = 0; 620 error_code = 0;
636 goto do_fault; 621 goto do_fault;
@@ -654,7 +639,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -654,7 +639,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
654 pde |= PG_ACCESSED_MASK; 639 pde |= PG_ACCESSED_MASK;
655 if (is_dirty) 640 if (is_dirty)
656 pde |= PG_DIRTY_MASK; 641 pde |= PG_DIRTY_MASK;
657 - stl_phys_aligned(pde_addr, pde); 642 + stl_phys_notdirty(pde_addr, pde);
658 } 643 }
659 644
660 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */ 645 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
@@ -663,14 +648,14 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -663,14 +648,14 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
663 } else { 648 } else {
664 if (!(pde & PG_ACCESSED_MASK)) { 649 if (!(pde & PG_ACCESSED_MASK)) {
665 pde |= PG_ACCESSED_MASK; 650 pde |= PG_ACCESSED_MASK;
666 - stl_phys_aligned(pde_addr, pde); 651 + stl_phys_notdirty(pde_addr, pde);
667 } 652 }
668 653
669 /* page directory entry */ 654 /* page directory entry */
670 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & 655 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
671 env->a20_mask; 656 env->a20_mask;
672 handle_4k_page: 657 handle_4k_page:
673 - pte = ldl_phys_aligned(pte_addr); 658 + pte = ldl_phys(pte_addr);
674 if (!(pte & PG_PRESENT_MASK)) { 659 if (!(pte & PG_PRESENT_MASK)) {
675 error_code = 0; 660 error_code = 0;
676 goto do_fault; 661 goto do_fault;
@@ -692,7 +677,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -692,7 +677,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
692 pte |= PG_ACCESSED_MASK; 677 pte |= PG_ACCESSED_MASK;
693 if (is_dirty) 678 if (is_dirty)
694 pte |= PG_DIRTY_MASK; 679 pte |= PG_DIRTY_MASK;
695 - stl_phys_aligned(pte_addr, pte); 680 + stl_phys_notdirty(pte_addr, pte);
696 } 681 }
697 page_size = 4096; 682 page_size = 4096;
698 virt_addr = addr & ~0xfff; 683 virt_addr = addr & ~0xfff;
@@ -734,12 +719,6 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -734,12 +719,6 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
734 return 1; 719 return 1;
735 } 720 }
736 721
737 -#if defined(CONFIG_USER_ONLY)  
738 -target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)  
739 -{  
740 - return addr;  
741 -}  
742 -#else  
743 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) 722 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
744 { 723 {
745 uint32_t pde_addr, pte_addr; 724 uint32_t pde_addr, pte_addr;
@@ -762,13 +741,13 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) @@ -762,13 +741,13 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
762 741
763 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 742 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
764 env->a20_mask; 743 env->a20_mask;
765 - pml4e = ldl_phys_aligned(pml4e_addr); 744 + pml4e = ldl_phys(pml4e_addr);
766 if (!(pml4e & PG_PRESENT_MASK)) 745 if (!(pml4e & PG_PRESENT_MASK))
767 return -1; 746 return -1;
768 747
769 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & 748 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
770 env->a20_mask; 749 env->a20_mask;
771 - pdpe = ldl_phys_aligned(pdpe_addr); 750 + pdpe = ldl_phys(pdpe_addr);
772 if (!(pdpe & PG_PRESENT_MASK)) 751 if (!(pdpe & PG_PRESENT_MASK))
773 return -1; 752 return -1;
774 } else 753 } else
@@ -776,14 +755,14 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) @@ -776,14 +755,14 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
776 { 755 {
777 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 756 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
778 env->a20_mask; 757 env->a20_mask;
779 - pdpe = ldl_phys_aligned(pdpe_addr); 758 + pdpe = ldl_phys(pdpe_addr);
780 if (!(pdpe & PG_PRESENT_MASK)) 759 if (!(pdpe & PG_PRESENT_MASK))
781 return -1; 760 return -1;
782 } 761 }
783 762
784 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) & 763 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
785 env->a20_mask; 764 env->a20_mask;
786 - pde = ldl_phys_aligned(pde_addr); 765 + pde = ldl_phys(pde_addr);
787 if (!(pde & PG_PRESENT_MASK)) { 766 if (!(pde & PG_PRESENT_MASK)) {
788 return -1; 767 return -1;
789 } 768 }
@@ -796,7 +775,7 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) @@ -796,7 +775,7 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
796 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) & 775 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
797 env->a20_mask; 776 env->a20_mask;
798 page_size = 4096; 777 page_size = 4096;
799 - pte = ldl_phys_aligned(pte_addr); 778 + pte = ldl_phys(pte_addr);
800 } 779 }
801 } else { 780 } else {
802 if (!(env->cr[0] & CR0_PG_MASK)) { 781 if (!(env->cr[0] & CR0_PG_MASK)) {
@@ -805,7 +784,7 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) @@ -805,7 +784,7 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
805 } else { 784 } else {
806 /* page directory entry */ 785 /* page directory entry */
807 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask; 786 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
808 - pde = ldl_phys_aligned(pde_addr); 787 + pde = ldl_phys(pde_addr);
809 if (!(pde & PG_PRESENT_MASK)) 788 if (!(pde & PG_PRESENT_MASK))
810 return -1; 789 return -1;
811 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { 790 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
@@ -814,7 +793,7 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) @@ -814,7 +793,7 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
814 } else { 793 } else {
815 /* page directory entry */ 794 /* page directory entry */
816 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask; 795 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
817 - pte = ldl_phys_aligned(pte_addr); 796 + pte = ldl_phys(pte_addr);
818 if (!(pte & PG_PRESENT_MASK)) 797 if (!(pte & PG_PRESENT_MASK))
819 return -1; 798 return -1;
820 page_size = 4096; 799 page_size = 4096;
@@ -827,7 +806,7 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) @@ -827,7 +806,7 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
827 paddr = (pte & TARGET_PAGE_MASK) + page_offset; 806 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
828 return paddr; 807 return paddr;
829 } 808 }
830 -#endif 809 +#endif /* !CONFIG_USER_ONLY */
831 810
832 #if defined(USE_CODE_COPY) 811 #if defined(USE_CODE_COPY)
833 struct fpstate { 812 struct fpstate {
target-ppc/helper.c
@@ -129,8 +129,8 @@ static int find_pte (uint32_t *RPN, int *prot, uint32_t base, uint32_t va, @@ -129,8 +129,8 @@ static int find_pte (uint32_t *RPN, int *prot, uint32_t base, uint32_t va,
129 int ret = -1; /* No entry found */ 129 int ret = -1; /* No entry found */
130 130
131 for (i = 0; i < 8; i++) { 131 for (i = 0; i < 8; i++) {
132 - pte0 = ldl_raw(phys_ram_base + base + (i * 8));  
133 - pte1 = ldl_raw(phys_ram_base + base + (i * 8) + 4); 132 + pte0 = ldl_phys(base + (i * 8));
  133 + pte1 = ldl_phys(base + (i * 8) + 4);
134 #if defined (DEBUG_MMU) 134 #if defined (DEBUG_MMU)
135 if (loglevel > 0) { 135 if (loglevel > 0) {
136 fprintf(logfile, "Load pte from 0x%08x => 0x%08x 0x%08x " 136 fprintf(logfile, "Load pte from 0x%08x => 0x%08x 0x%08x "
@@ -220,7 +220,7 @@ static int find_pte (uint32_t *RPN, int *prot, uint32_t base, uint32_t va, @@ -220,7 +220,7 @@ static int find_pte (uint32_t *RPN, int *prot, uint32_t base, uint32_t va,
220 } 220 }
221 } 221 }
222 if (store) { 222 if (store) {
223 - stl_raw(phys_ram_base + base + (good * 8) + 4, keep); 223 + stl_phys_notdirty(base + (good * 8) + 4, keep);
224 } 224 }
225 } 225 }
226 226