Commit 0f459d16c378128a586cf161526721a81b629405

Authored by pbrook
1 parent f227f17d

Clean up MMIO TLB handling.

The IO index is now stored in its own field, instead of being wedged
into the vaddr field.  This eliminates the ROMD and watchpoint host
pointer weirdness.  The IO index space is expanded by 1 bit, and
several additional bits are made available in the TLB vaddr field.


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4704 c046a42c-6fe2-441c-8c8c-71466251a162
cpu-all.h
@@ -797,7 +797,7 @@ extern CPUState *cpu_single_env; @@ -797,7 +797,7 @@ extern CPUState *cpu_single_env;
797 void cpu_interrupt(CPUState *s, int mask); 797 void cpu_interrupt(CPUState *s, int mask);
798 void cpu_reset_interrupt(CPUState *env, int mask); 798 void cpu_reset_interrupt(CPUState *env, int mask);
799 799
800 -int cpu_watchpoint_insert(CPUState *env, target_ulong addr); 800 +int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type);
801 int cpu_watchpoint_remove(CPUState *env, target_ulong addr); 801 int cpu_watchpoint_remove(CPUState *env, target_ulong addr);
802 void cpu_watchpoint_remove_all(CPUState *env); 802 void cpu_watchpoint_remove_all(CPUState *env);
803 int cpu_breakpoint_insert(CPUState *env, target_ulong pc); 803 int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
@@ -868,21 +868,34 @@ extern uint8_t *phys_ram_dirty; @@ -868,21 +868,34 @@ extern uint8_t *phys_ram_dirty;
868 extern ram_addr_t ram_size; 868 extern ram_addr_t ram_size;
869 869
870 /* physical memory access */ 870 /* physical memory access */
871 -#define TLB_INVALID_MASK (1 << 3)  
872 -#define IO_MEM_SHIFT 4 871 +
  872 +/* MMIO pages are identified by a combination of an IO device index and
  873 + 3 flags. The ROMD code stores the page ram offset in iotlb entry,
  874 + so only a limited number of ids are avaiable. */
  875 +
  876 +#define IO_MEM_SHIFT 3
873 #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT)) 877 #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
874 878
875 #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */ 879 #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
876 #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */ 880 #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
877 #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT) 881 #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
878 -#define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */  
879 -/* acts like a ROM when read and like a device when written. As an  
880 - exception, the write memory callback gets the ram offset instead of  
881 - the physical address */ 882 +#define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
  883 +
  884 +/* Acts like a ROM when read and like a device when written. */
882 #define IO_MEM_ROMD (1) 885 #define IO_MEM_ROMD (1)
883 #define IO_MEM_SUBPAGE (2) 886 #define IO_MEM_SUBPAGE (2)
884 #define IO_MEM_SUBWIDTH (4) 887 #define IO_MEM_SUBWIDTH (4)
885 888
  889 +/* Flags stored in the low bits of the TLB virtual address. These are
  890 + defined so that fast path ram access is all zeros. */
  891 +/* Zero if TLB entry is valid. */
  892 +#define TLB_INVALID_MASK (1 << 3)
  893 +/* Set if TLB entry references a clean RAM page. The iotlb entry will
  894 + contain the page physical address. */
  895 +#define TLB_NOTDIRTY (1 << 4)
  896 +/* Set if TLB entry is an IO callback. */
  897 +#define TLB_MMIO (1 << 5)
  898 +
886 typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value); 899 typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
887 typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr); 900 typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
888 901
cpu-defs.h
@@ -106,16 +106,17 @@ typedef uint64_t target_phys_addr_t; @@ -106,16 +106,17 @@ typedef uint64_t target_phys_addr_t;
106 #endif 106 #endif
107 107
108 typedef struct CPUTLBEntry { 108 typedef struct CPUTLBEntry {
109 - /* bit 31 to TARGET_PAGE_BITS : virtual address  
110 - bit TARGET_PAGE_BITS-1..IO_MEM_SHIFT : if non zero, memory io  
111 - zone number 109 + /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
  110 + bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
  111 + go directly to ram.
112 bit 3 : indicates that the entry is invalid 112 bit 3 : indicates that the entry is invalid
113 bit 2..0 : zero 113 bit 2..0 : zero
114 */ 114 */
115 target_ulong addr_read; 115 target_ulong addr_read;
116 target_ulong addr_write; 116 target_ulong addr_write;
117 target_ulong addr_code; 117 target_ulong addr_code;
118 - /* addend to virtual address to get physical address */ 118 + /* Addend to virtual address to get physical address. IO accesses
  119 + use the correcponding iotlb value. */
119 #if TARGET_PHYS_ADDR_BITS == 64 120 #if TARGET_PHYS_ADDR_BITS == 64
120 /* on i386 Linux make sure it is aligned */ 121 /* on i386 Linux make sure it is aligned */
121 target_phys_addr_t addend __attribute__((aligned(8))); 122 target_phys_addr_t addend __attribute__((aligned(8)));
@@ -143,6 +144,7 @@ typedef struct CPUTLBEntry { @@ -143,6 +144,7 @@ typedef struct CPUTLBEntry {
143 int halted; /* TRUE if the CPU is in suspend state */ \ 144 int halted; /* TRUE if the CPU is in suspend state */ \
144 /* The meaning of the MMU modes is defined in the target code. */ \ 145 /* The meaning of the MMU modes is defined in the target code. */ \
145 CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ 146 CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
  147 + target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
146 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ 148 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
147 /* buffer for temporaries in the code generator */ \ 149 /* buffer for temporaries in the code generator */ \
148 long temp_buf[CPU_TEMP_BUF_NLONGS]; \ 150 long temp_buf[CPU_TEMP_BUF_NLONGS]; \
@@ -155,7 +157,7 @@ typedef struct CPUTLBEntry { @@ -155,7 +157,7 @@ typedef struct CPUTLBEntry {
155 \ 157 \
156 struct { \ 158 struct { \
157 target_ulong vaddr; \ 159 target_ulong vaddr; \
158 - target_phys_addr_t addend; \ 160 + int type; /* PAGE_READ/PAGE_WRITE */ \
159 } watchpoint[MAX_WATCHPOINTS]; \ 161 } watchpoint[MAX_WATCHPOINTS]; \
160 int nb_watchpoints; \ 162 int nb_watchpoints; \
161 int watchpoint_hit; \ 163 int watchpoint_hit; \
@@ -121,7 +121,7 @@ typedef struct PageDesc { @@ -121,7 +121,7 @@ typedef struct PageDesc {
121 } PageDesc; 121 } PageDesc;
122 122
123 typedef struct PhysPageDesc { 123 typedef struct PhysPageDesc {
124 - /* offset in host memory of the page + io_index in the low 12 bits */ 124 + /* offset in host memory of the page + io_index in the low bits */
125 ram_addr_t phys_offset; 125 ram_addr_t phys_offset;
126 } PhysPageDesc; 126 } PhysPageDesc;
127 127
@@ -1188,7 +1188,7 @@ static void breakpoint_invalidate(CPUState *env, target_ulong pc) @@ -1188,7 +1188,7 @@ static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1188 #endif 1188 #endif
1189 1189
1190 /* Add a watchpoint. */ 1190 /* Add a watchpoint. */
1191 -int cpu_watchpoint_insert(CPUState *env, target_ulong addr) 1191 +int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1192 { 1192 {
1193 int i; 1193 int i;
1194 1194
@@ -1201,6 +1201,7 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr) @@ -1201,6 +1201,7 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1201 1201
1202 i = env->nb_watchpoints++; 1202 i = env->nb_watchpoints++;
1203 env->watchpoint[i].vaddr = addr; 1203 env->watchpoint[i].vaddr = addr;
  1204 + env->watchpoint[i].type = type;
1204 tlb_flush_page(env, addr); 1205 tlb_flush_page(env, addr);
1205 /* FIXME: This flush is needed because of the hack to make memory ops 1206 /* FIXME: This flush is needed because of the hack to make memory ops
1206 terminate the TB. It can be removed once the proper IO trap and 1207 terminate the TB. It can be removed once the proper IO trap and
@@ -1617,7 +1618,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, @@ -1617,7 +1618,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1617 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { 1618 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1618 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; 1619 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1619 if ((addr - start) < length) { 1620 if ((addr - start) < length) {
1620 - tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; 1621 + tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1621 } 1622 }
1622 } 1623 }
1623 } 1624 }
@@ -1681,7 +1682,7 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) @@ -1681,7 +1682,7 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1681 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 1682 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1682 tlb_entry->addend - (unsigned long)phys_ram_base; 1683 tlb_entry->addend - (unsigned long)phys_ram_base;
1683 if (!cpu_physical_memory_is_dirty(ram_addr)) { 1684 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1684 - tlb_entry->addr_write |= IO_MEM_NOTDIRTY; 1685 + tlb_entry->addr_write |= TLB_NOTDIRTY;
1685 } 1686 }
1686 } 1687 }
1687 } 1688 }
@@ -1704,33 +1705,26 @@ void cpu_tlb_update_dirty(CPUState *env) @@ -1704,33 +1705,26 @@ void cpu_tlb_update_dirty(CPUState *env)
1704 #endif 1705 #endif
1705 } 1706 }
1706 1707
1707 -static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,  
1708 - unsigned long start) 1708 +static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1709 { 1709 {
1710 - unsigned long addr;  
1711 - if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {  
1712 - addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;  
1713 - if (addr == start) {  
1714 - tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;  
1715 - }  
1716 - } 1710 + if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
  1711 + tlb_entry->addr_write = vaddr;
1717 } 1712 }
1718 1713
1719 -/* update the TLB corresponding to virtual page vaddr and phys addr  
1720 - addr so that it is no longer dirty */  
1721 -static inline void tlb_set_dirty(CPUState *env,  
1722 - unsigned long addr, target_ulong vaddr) 1714 +/* update the TLB corresponding to virtual page vaddr
  1715 + so that it is no longer dirty */
  1716 +static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1723 { 1717 {
1724 int i; 1718 int i;
1725 1719
1726 - addr &= TARGET_PAGE_MASK; 1720 + vaddr &= TARGET_PAGE_MASK;
1727 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 1721 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1728 - tlb_set_dirty1(&env->tlb_table[0][i], addr);  
1729 - tlb_set_dirty1(&env->tlb_table[1][i], addr); 1722 + tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
  1723 + tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1730 #if (NB_MMU_MODES >= 3) 1724 #if (NB_MMU_MODES >= 3)
1731 - tlb_set_dirty1(&env->tlb_table[2][i], addr); 1725 + tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1732 #if (NB_MMU_MODES == 4) 1726 #if (NB_MMU_MODES == 4)
1733 - tlb_set_dirty1(&env->tlb_table[3][i], addr); 1727 + tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1734 #endif 1728 #endif
1735 #endif 1729 #endif
1736 } 1730 }
@@ -1747,10 +1741,12 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, @@ -1747,10 +1741,12 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1747 unsigned long pd; 1741 unsigned long pd;
1748 unsigned int index; 1742 unsigned int index;
1749 target_ulong address; 1743 target_ulong address;
  1744 + target_ulong code_address;
1750 target_phys_addr_t addend; 1745 target_phys_addr_t addend;
1751 int ret; 1746 int ret;
1752 CPUTLBEntry *te; 1747 CPUTLBEntry *te;
1753 int i; 1748 int i;
  1749 + target_phys_addr_t iotlb;
1754 1750
1755 p = phys_page_find(paddr >> TARGET_PAGE_BITS); 1751 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1756 if (!p) { 1752 if (!p) {
@@ -1764,64 +1760,69 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, @@ -1764,64 +1760,69 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1764 #endif 1760 #endif
1765 1761
1766 ret = 0; 1762 ret = 0;
1767 - {  
1768 - if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {  
1769 - /* IO memory case */  
1770 - address = vaddr | pd;  
1771 - addend = paddr;  
1772 - } else {  
1773 - /* standard memory */  
1774 - address = vaddr;  
1775 - addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);  
1776 - }  
1777 -  
1778 - /* Make accesses to pages with watchpoints go via the  
1779 - watchpoint trap routines. */  
1780 - for (i = 0; i < env->nb_watchpoints; i++) {  
1781 - if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {  
1782 - if (address & ~TARGET_PAGE_MASK) {  
1783 - env->watchpoint[i].addend = 0;  
1784 - address = vaddr | io_mem_watch;  
1785 - } else {  
1786 - env->watchpoint[i].addend = pd - paddr +  
1787 - (unsigned long) phys_ram_base;  
1788 - /* TODO: Figure out how to make read watchpoints coexist  
1789 - with code. */  
1790 - pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;  
1791 - }  
1792 - } 1763 + address = vaddr;
  1764 + if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
  1765 + /* IO memory case (romd handled later) */
  1766 + address |= TLB_MMIO;
  1767 + }
  1768 + addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
  1769 + if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
  1770 + /* Normal RAM. */
  1771 + iotlb = pd & TARGET_PAGE_MASK;
  1772 + if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
  1773 + iotlb |= IO_MEM_NOTDIRTY;
  1774 + else
  1775 + iotlb |= IO_MEM_ROM;
  1776 + } else {
  1777 + /* IO handlers are currently passed a phsical address.
  1778 + It would be nice to pass an offset from the base address
  1779 + of that region. This would avoid having to special case RAM,
  1780 + and avoid full address decoding in every device.
  1781 + We can't use the high bits of pd for this because
  1782 + IO_MEM_ROMD uses these as a ram address. */
  1783 + iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
  1784 + }
  1785 +
  1786 + code_address = address;
  1787 + /* Make accesses to pages with watchpoints go via the
  1788 + watchpoint trap routines. */
  1789 + for (i = 0; i < env->nb_watchpoints; i++) {
  1790 + if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
  1791 + iotlb = io_mem_watch + paddr;
  1792 + /* TODO: The memory case can be optimized by not trapping
  1793 + reads of pages with a write breakpoint. */
  1794 + address |= TLB_MMIO;
1793 } 1795 }
  1796 + }
1794 1797
1795 - index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);  
1796 - addend -= vaddr;  
1797 - te = &env->tlb_table[mmu_idx][index];  
1798 - te->addend = addend;  
1799 - if (prot & PAGE_READ) {  
1800 - te->addr_read = address;  
1801 - } else {  
1802 - te->addr_read = -1;  
1803 - } 1798 + index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
  1799 + env->iotlb[mmu_idx][index] = iotlb - vaddr;
  1800 + te = &env->tlb_table[mmu_idx][index];
  1801 + te->addend = addend - vaddr;
  1802 + if (prot & PAGE_READ) {
  1803 + te->addr_read = address;
  1804 + } else {
  1805 + te->addr_read = -1;
  1806 + }
1804 1807
1805 - if (prot & PAGE_EXEC) {  
1806 - te->addr_code = address;  
1807 - } else {  
1808 - te->addr_code = -1;  
1809 - }  
1810 - if (prot & PAGE_WRITE) {  
1811 - if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||  
1812 - (pd & IO_MEM_ROMD)) {  
1813 - /* write access calls the I/O callback */  
1814 - te->addr_write = vaddr |  
1815 - (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));  
1816 - } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&  
1817 - !cpu_physical_memory_is_dirty(pd)) {  
1818 - te->addr_write = vaddr | IO_MEM_NOTDIRTY;  
1819 - } else {  
1820 - te->addr_write = address;  
1821 - } 1808 + if (prot & PAGE_EXEC) {
  1809 + te->addr_code = code_address;
  1810 + } else {
  1811 + te->addr_code = -1;
  1812 + }
  1813 + if (prot & PAGE_WRITE) {
  1814 + if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
  1815 + (pd & IO_MEM_ROMD)) {
  1816 + /* Write access calls the I/O callback. */
  1817 + te->addr_write = address | TLB_MMIO;
  1818 + } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
  1819 + !cpu_physical_memory_is_dirty(pd)) {
  1820 + te->addr_write = address | TLB_NOTDIRTY;
1822 } else { 1821 } else {
1823 - te->addr_write = -1; 1822 + te->addr_write = address;
1824 } 1823 }
  1824 + } else {
  1825 + te->addr_write = -1;
1825 } 1826 }
1826 return ret; 1827 return ret;
1827 } 1828 }
@@ -2181,11 +2182,10 @@ static CPUWriteMemoryFunc *unassigned_mem_write[3] = { @@ -2181,11 +2182,10 @@ static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2181 unassigned_mem_writeb, 2182 unassigned_mem_writeb,
2182 }; 2183 };
2183 2184
2184 -static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) 2185 +static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
  2186 + uint32_t val)
2185 { 2187 {
2186 - unsigned long ram_addr;  
2187 int dirty_flags; 2188 int dirty_flags;
2188 - ram_addr = addr - (unsigned long)phys_ram_base;  
2189 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2189 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2190 if (!(dirty_flags & CODE_DIRTY_FLAG)) { 2190 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2191 #if !defined(CONFIG_USER_ONLY) 2191 #if !defined(CONFIG_USER_ONLY)
@@ -2193,7 +2193,7 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t @@ -2193,7 +2193,7 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t
2193 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2193 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2194 #endif 2194 #endif
2195 } 2195 }
2196 - stb_p((uint8_t *)(long)addr, val); 2196 + stb_p(phys_ram_base + ram_addr, val);
2197 #ifdef USE_KQEMU 2197 #ifdef USE_KQEMU
2198 if (cpu_single_env->kqemu_enabled && 2198 if (cpu_single_env->kqemu_enabled &&
2199 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) 2199 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
@@ -2204,14 +2204,13 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t @@ -2204,14 +2204,13 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t
2204 /* we remove the notdirty callback only if the code has been 2204 /* we remove the notdirty callback only if the code has been
2205 flushed */ 2205 flushed */
2206 if (dirty_flags == 0xff) 2206 if (dirty_flags == 0xff)
2207 - tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); 2207 + tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2208 } 2208 }
2209 2209
2210 -static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) 2210 +static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
  2211 + uint32_t val)
2211 { 2212 {
2212 - unsigned long ram_addr;  
2213 int dirty_flags; 2213 int dirty_flags;
2214 - ram_addr = addr - (unsigned long)phys_ram_base;  
2215 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2214 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2216 if (!(dirty_flags & CODE_DIRTY_FLAG)) { 2215 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2217 #if !defined(CONFIG_USER_ONLY) 2216 #if !defined(CONFIG_USER_ONLY)
@@ -2219,7 +2218,7 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t @@ -2219,7 +2218,7 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t
2219 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2218 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2220 #endif 2219 #endif
2221 } 2220 }
2222 - stw_p((uint8_t *)(long)addr, val); 2221 + stw_p(phys_ram_base + ram_addr, val);
2223 #ifdef USE_KQEMU 2222 #ifdef USE_KQEMU
2224 if (cpu_single_env->kqemu_enabled && 2223 if (cpu_single_env->kqemu_enabled &&
2225 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) 2224 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
@@ -2230,14 +2229,13 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t @@ -2230,14 +2229,13 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t
2230 /* we remove the notdirty callback only if the code has been 2229 /* we remove the notdirty callback only if the code has been
2231 flushed */ 2230 flushed */
2232 if (dirty_flags == 0xff) 2231 if (dirty_flags == 0xff)
2233 - tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); 2232 + tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2234 } 2233 }
2235 2234
2236 -static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) 2235 +static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
  2236 + uint32_t val)
2237 { 2237 {
2238 - unsigned long ram_addr;  
2239 int dirty_flags; 2238 int dirty_flags;
2240 - ram_addr = addr - (unsigned long)phys_ram_base;  
2241 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2239 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2242 if (!(dirty_flags & CODE_DIRTY_FLAG)) { 2240 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2243 #if !defined(CONFIG_USER_ONLY) 2241 #if !defined(CONFIG_USER_ONLY)
@@ -2245,7 +2243,7 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t @@ -2245,7 +2243,7 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t
2245 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2243 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2246 #endif 2244 #endif
2247 } 2245 }
2248 - stl_p((uint8_t *)(long)addr, val); 2246 + stl_p(phys_ram_base + ram_addr, val);
2249 #ifdef USE_KQEMU 2247 #ifdef USE_KQEMU
2250 if (cpu_single_env->kqemu_enabled && 2248 if (cpu_single_env->kqemu_enabled &&
2251 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) 2249 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
@@ -2256,7 +2254,7 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t @@ -2256,7 +2254,7 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t
2256 /* we remove the notdirty callback only if the code has been 2254 /* we remove the notdirty callback only if the code has been
2257 flushed */ 2255 flushed */
2258 if (dirty_flags == 0xff) 2256 if (dirty_flags == 0xff)
2259 - tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); 2257 + tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2260 } 2258 }
2261 2259
2262 static CPUReadMemoryFunc *error_mem_read[3] = { 2260 static CPUReadMemoryFunc *error_mem_read[3] = {
@@ -2271,67 +2269,63 @@ static CPUWriteMemoryFunc *notdirty_mem_write[3] = { @@ -2271,67 +2269,63 @@ static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2271 notdirty_mem_writel, 2269 notdirty_mem_writel,
2272 }; 2270 };
2273 2271
  2272 +/* Generate a debug exception if a watchpoint has been hit. */
  2273 +static void check_watchpoint(int offset, int flags)
  2274 +{
  2275 + CPUState *env = cpu_single_env;
  2276 + target_ulong vaddr;
  2277 + int i;
  2278 +
  2279 + vaddr = (env->mem_write_vaddr & TARGET_PAGE_MASK) + offset;
  2280 + for (i = 0; i < env->nb_watchpoints; i++) {
  2281 + if (vaddr == env->watchpoint[i].vaddr
  2282 + && (env->watchpoint[i].type & flags)) {
  2283 + env->watchpoint_hit = i + 1;
  2284 + cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
  2285 + break;
  2286 + }
  2287 + }
  2288 +}
  2289 +
2274 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, 2290 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2275 so these check for a hit then pass through to the normal out-of-line 2291 so these check for a hit then pass through to the normal out-of-line
2276 phys routines. */ 2292 phys routines. */
2277 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr) 2293 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2278 { 2294 {
  2295 + check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2279 return ldub_phys(addr); 2296 return ldub_phys(addr);
2280 } 2297 }
2281 2298
2282 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr) 2299 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2283 { 2300 {
  2301 + check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2284 return lduw_phys(addr); 2302 return lduw_phys(addr);
2285 } 2303 }
2286 2304
2287 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr) 2305 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2288 { 2306 {
  2307 + check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2289 return ldl_phys(addr); 2308 return ldl_phys(addr);
2290 } 2309 }
2291 2310
2292 -/* Generate a debug exception if a watchpoint has been hit.  
2293 - Returns the real physical address of the access. addr will be a host  
2294 - address in case of a RAM location. */  
2295 -static target_ulong check_watchpoint(target_phys_addr_t addr)  
2296 -{  
2297 - CPUState *env = cpu_single_env;  
2298 - target_ulong watch;  
2299 - target_ulong retaddr;  
2300 - int i;  
2301 -  
2302 - retaddr = addr;  
2303 - for (i = 0; i < env->nb_watchpoints; i++) {  
2304 - watch = env->watchpoint[i].vaddr;  
2305 - if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {  
2306 - retaddr = addr - env->watchpoint[i].addend;  
2307 - if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {  
2308 - cpu_single_env->watchpoint_hit = i + 1;  
2309 - cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);  
2310 - break;  
2311 - }  
2312 - }  
2313 - }  
2314 - return retaddr;  
2315 -}  
2316 -  
2317 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr, 2311 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2318 uint32_t val) 2312 uint32_t val)
2319 { 2313 {
2320 - addr = check_watchpoint(addr); 2314 + check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2321 stb_phys(addr, val); 2315 stb_phys(addr, val);
2322 } 2316 }
2323 2317
2324 static void watch_mem_writew(void *opaque, target_phys_addr_t addr, 2318 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2325 uint32_t val) 2319 uint32_t val)
2326 { 2320 {
2327 - addr = check_watchpoint(addr); 2321 + check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2328 stw_phys(addr, val); 2322 stw_phys(addr, val);
2329 } 2323 }
2330 2324
2331 static void watch_mem_writel(void *opaque, target_phys_addr_t addr, 2325 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2332 uint32_t val) 2326 uint32_t val)
2333 { 2327 {
2334 - addr = check_watchpoint(addr); 2328 + check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2335 stl_phys(addr, val); 2329 stl_phys(addr, val);
2336 } 2330 }
2337 2331
@@ -2501,7 +2495,7 @@ static void io_mem_init(void) @@ -2501,7 +2495,7 @@ static void io_mem_init(void)
2501 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL); 2495 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2502 io_mem_nb = 5; 2496 io_mem_nb = 5;
2503 2497
2504 - io_mem_watch = cpu_register_io_memory(-1, watch_mem_read, 2498 + io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2505 watch_mem_write, NULL); 2499 watch_mem_write, NULL);
2506 /* alloc dirty bits array */ 2500 /* alloc dirty bits array */
2507 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS); 2501 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
gdbstub.c
@@ -1117,21 +1117,37 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf) @@ -1117,21 +1117,37 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf)
1117 if (*p == ',') 1117 if (*p == ',')
1118 p++; 1118 p++;
1119 len = strtoull(p, (char **)&p, 16); 1119 len = strtoull(p, (char **)&p, 16);
1120 - if (type == 0 || type == 1) { 1120 + switch (type) {
  1121 + case 0:
  1122 + case 1:
1121 if (cpu_breakpoint_insert(env, addr) < 0) 1123 if (cpu_breakpoint_insert(env, addr) < 0)
1122 goto breakpoint_error; 1124 goto breakpoint_error;
1123 put_packet(s, "OK"); 1125 put_packet(s, "OK");
  1126 + break;
1124 #ifndef CONFIG_USER_ONLY 1127 #ifndef CONFIG_USER_ONLY
1125 - } else if (type == 2) {  
1126 - if (cpu_watchpoint_insert(env, addr) < 0) 1128 + case 2:
  1129 + type = PAGE_WRITE;
  1130 + goto insert_watchpoint;
  1131 + case 3:
  1132 + type = PAGE_READ;
  1133 + goto insert_watchpoint;
  1134 + case 4:
  1135 + type = PAGE_READ | PAGE_WRITE;
  1136 + insert_watchpoint:
  1137 + if (cpu_watchpoint_insert(env, addr, type) < 0)
1127 goto breakpoint_error; 1138 goto breakpoint_error;
1128 put_packet(s, "OK"); 1139 put_packet(s, "OK");
  1140 + break;
1129 #endif 1141 #endif
1130 - } else {  
1131 - breakpoint_error:  
1132 - put_packet(s, "E22"); 1142 + default:
  1143 + put_packet(s, "");
  1144 + break;
1133 } 1145 }
1134 break; 1146 break;
  1147 + breakpoint_error:
  1148 + put_packet(s, "E22");
  1149 + break;
  1150 +
1135 case 'z': 1151 case 'z':
1136 type = strtoul(p, (char **)&p, 16); 1152 type = strtoul(p, (char **)&p, 16);
1137 if (*p == ',') 1153 if (*p == ',')
@@ -1144,12 +1160,12 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf) @@ -1144,12 +1160,12 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf)
1144 cpu_breakpoint_remove(env, addr); 1160 cpu_breakpoint_remove(env, addr);
1145 put_packet(s, "OK"); 1161 put_packet(s, "OK");
1146 #ifndef CONFIG_USER_ONLY 1162 #ifndef CONFIG_USER_ONLY
1147 - } else if (type == 2) { 1163 + } else if (type >= 2 || type <= 4) {
1148 cpu_watchpoint_remove(env, addr); 1164 cpu_watchpoint_remove(env, addr);
1149 put_packet(s, "OK"); 1165 put_packet(s, "OK");
1150 #endif 1166 #endif
1151 } else { 1167 } else {
1152 - goto breakpoint_error; 1168 + put_packet(s, "");
1153 } 1169 }
1154 break; 1170 break;
1155 case 'q': 1171 case 'q':
hw/pflash_cfi01.c
@@ -202,14 +202,8 @@ static void pflash_write (pflash_t *pfl, target_ulong offset, uint32_t value, @@ -202,14 +202,8 @@ static void pflash_write (pflash_t *pfl, target_ulong offset, uint32_t value,
202 uint8_t *p; 202 uint8_t *p;
203 uint8_t cmd; 203 uint8_t cmd;
204 204
205 - /* WARNING: when the memory area is in ROMD mode, the offset is a  
206 - ram offset, not a physical address */  
207 cmd = value; 205 cmd = value;
208 -  
209 - if (pfl->wcycle == 0)  
210 - offset -= (target_ulong)(long)pfl->storage;  
211 - else  
212 - offset -= pfl->base; 206 + offset -= pfl->base;
213 207
214 DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d wcycle 0x%x\n", 208 DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d wcycle 0x%x\n",
215 __func__, offset, value, width, pfl->wcycle); 209 __func__, offset, value, width, pfl->wcycle);
hw/pflash_cfi02.c
@@ -112,13 +112,12 @@ static uint32_t pflash_read (pflash_t *pfl, uint32_t offset, int width) @@ -112,13 +112,12 @@ static uint32_t pflash_read (pflash_t *pfl, uint32_t offset, int width)
112 112
113 DPRINTF("%s: offset " TARGET_FMT_lx "\n", __func__, offset); 113 DPRINTF("%s: offset " TARGET_FMT_lx "\n", __func__, offset);
114 ret = -1; 114 ret = -1;
  115 + offset -= pfl->base;
115 if (pfl->rom_mode) { 116 if (pfl->rom_mode) {
116 - offset -= (uint32_t)(long)pfl->storage;  
117 /* Lazy reset of to ROMD mode */ 117 /* Lazy reset of to ROMD mode */
118 if (pfl->wcycle == 0) 118 if (pfl->wcycle == 0)
119 pflash_register_memory(pfl, 1); 119 pflash_register_memory(pfl, 1);
120 - } else  
121 - offset -= pfl->base; 120 + }
122 offset &= pfl->chip_len - 1; 121 offset &= pfl->chip_len - 1;
123 boff = offset & 0xFF; 122 boff = offset & 0xFF;
124 if (pfl->width == 2) 123 if (pfl->width == 2)
@@ -242,12 +241,7 @@ static void pflash_write (pflash_t *pfl, uint32_t offset, uint32_t value, @@ -242,12 +241,7 @@ static void pflash_write (pflash_t *pfl, uint32_t offset, uint32_t value,
242 } 241 }
243 DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d %d\n", __func__, 242 DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d %d\n", __func__,
244 offset, value, width, pfl->wcycle); 243 offset, value, width, pfl->wcycle);
245 - /* WARNING: when the memory area is in ROMD mode, the offset is a  
246 - ram offset, not a physical address */  
247 - if (pfl->rom_mode)  
248 - offset -= (uint32_t)(long)pfl->storage;  
249 - else  
250 - offset -= pfl->base; 244 + offset -= pfl->base;
251 offset &= pfl->chip_len - 1; 245 offset &= pfl->chip_len - 1;
252 246
253 DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d\n", __func__, 247 DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d\n", __func__,
softmmu_template.h
@@ -51,12 +51,13 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -51,12 +51,13 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
51 int mmu_idx, 51 int mmu_idx,
52 void *retaddr); 52 void *retaddr);
53 static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr, 53 static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
54 - target_ulong tlb_addr) 54 + target_ulong addr)
55 { 55 {
56 DATA_TYPE res; 56 DATA_TYPE res;
57 int index; 57 int index;
  58 + index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  59 + physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
58 60
59 - index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);  
60 #if SHIFT <= 2 61 #if SHIFT <= 2
61 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr); 62 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
62 #else 63 #else
@@ -81,7 +82,7 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -81,7 +82,7 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
81 DATA_TYPE res; 82 DATA_TYPE res;
82 int index; 83 int index;
83 target_ulong tlb_addr; 84 target_ulong tlb_addr;
84 - target_phys_addr_t physaddr; 85 + target_phys_addr_t addend;
85 void *retaddr; 86 void *retaddr;
86 87
87 /* test if there is match for unaligned or IO access */ 88 /* test if there is match for unaligned or IO access */
@@ -90,12 +91,12 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -90,12 +91,12 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
90 redo: 91 redo:
91 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; 92 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
92 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 93 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
93 - physaddr = addr + env->tlb_table[mmu_idx][index].addend;  
94 if (tlb_addr & ~TARGET_PAGE_MASK) { 94 if (tlb_addr & ~TARGET_PAGE_MASK) {
95 /* IO access */ 95 /* IO access */
96 if ((addr & (DATA_SIZE - 1)) != 0) 96 if ((addr & (DATA_SIZE - 1)) != 0)
97 goto do_unaligned_access; 97 goto do_unaligned_access;
98 - res = glue(io_read, SUFFIX)(physaddr, tlb_addr); 98 + addend = env->iotlb[mmu_idx][index];
  99 + res = glue(io_read, SUFFIX)(addend, addr);
99 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 100 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
100 /* slow unaligned access (it spans two pages or IO) */ 101 /* slow unaligned access (it spans two pages or IO) */
101 do_unaligned_access: 102 do_unaligned_access:
@@ -113,7 +114,8 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -113,7 +114,8 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
113 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr); 114 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
114 } 115 }
115 #endif 116 #endif
116 - res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr); 117 + addend = env->tlb_table[mmu_idx][index].addend;
  118 + res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
117 } 119 }
118 } else { 120 } else {
119 /* the page is not in the TLB : fill it */ 121 /* the page is not in the TLB : fill it */
@@ -135,19 +137,19 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -135,19 +137,19 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
135 { 137 {
136 DATA_TYPE res, res1, res2; 138 DATA_TYPE res, res1, res2;
137 int index, shift; 139 int index, shift;
138 - target_phys_addr_t physaddr; 140 + target_phys_addr_t addend;
139 target_ulong tlb_addr, addr1, addr2; 141 target_ulong tlb_addr, addr1, addr2;
140 142
141 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 143 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
142 redo: 144 redo:
143 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; 145 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
144 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 146 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
145 - physaddr = addr + env->tlb_table[mmu_idx][index].addend;  
146 if (tlb_addr & ~TARGET_PAGE_MASK) { 147 if (tlb_addr & ~TARGET_PAGE_MASK) {
147 /* IO access */ 148 /* IO access */
148 if ((addr & (DATA_SIZE - 1)) != 0) 149 if ((addr & (DATA_SIZE - 1)) != 0)
149 goto do_unaligned_access; 150 goto do_unaligned_access;
150 - res = glue(io_read, SUFFIX)(physaddr, tlb_addr); 151 + addend = env->iotlb[mmu_idx][index];
  152 + res = glue(io_read, SUFFIX)(addend, addr);
151 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 153 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
152 do_unaligned_access: 154 do_unaligned_access:
153 /* slow unaligned access (it spans two pages) */ 155 /* slow unaligned access (it spans two pages) */
@@ -166,7 +168,8 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -166,7 +168,8 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
166 res = (DATA_TYPE)res; 168 res = (DATA_TYPE)res;
167 } else { 169 } else {
168 /* unaligned/aligned access in the same page */ 170 /* unaligned/aligned access in the same page */
169 - res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr); 171 + addend = env->tlb_table[mmu_idx][index].addend;
  172 + res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
170 } 173 }
171 } else { 174 } else {
172 /* the page is not in the TLB : fill it */ 175 /* the page is not in the TLB : fill it */
@@ -185,13 +188,14 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -185,13 +188,14 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
185 188
186 static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr, 189 static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
187 DATA_TYPE val, 190 DATA_TYPE val,
188 - target_ulong tlb_addr, 191 + target_ulong addr,
189 void *retaddr) 192 void *retaddr)
190 { 193 {
191 int index; 194 int index;
  195 + index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  196 + physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
192 197
193 - index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);  
194 - env->mem_write_vaddr = tlb_addr; 198 + env->mem_write_vaddr = addr;
195 env->mem_write_pc = (unsigned long)retaddr; 199 env->mem_write_pc = (unsigned long)retaddr;
196 #if SHIFT <= 2 200 #if SHIFT <= 2
197 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val); 201 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
@@ -213,7 +217,7 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -213,7 +217,7 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
213 DATA_TYPE val, 217 DATA_TYPE val,
214 int mmu_idx) 218 int mmu_idx)
215 { 219 {
216 - target_phys_addr_t physaddr; 220 + target_phys_addr_t addend;
217 target_ulong tlb_addr; 221 target_ulong tlb_addr;
218 void *retaddr; 222 void *retaddr;
219 int index; 223 int index;
@@ -222,13 +226,13 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -222,13 +226,13 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
222 redo: 226 redo:
223 tlb_addr = env->tlb_table[mmu_idx][index].addr_write; 227 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
224 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 228 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
225 - physaddr = addr + env->tlb_table[mmu_idx][index].addend;  
226 if (tlb_addr & ~TARGET_PAGE_MASK) { 229 if (tlb_addr & ~TARGET_PAGE_MASK) {
227 /* IO access */ 230 /* IO access */
228 if ((addr & (DATA_SIZE - 1)) != 0) 231 if ((addr & (DATA_SIZE - 1)) != 0)
229 goto do_unaligned_access; 232 goto do_unaligned_access;
230 retaddr = GETPC(); 233 retaddr = GETPC();
231 - glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr); 234 + addend = env->iotlb[mmu_idx][index];
  235 + glue(io_write, SUFFIX)(addend, val, addr, retaddr);
232 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 236 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
233 do_unaligned_access: 237 do_unaligned_access:
234 retaddr = GETPC(); 238 retaddr = GETPC();
@@ -245,7 +249,8 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -245,7 +249,8 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
245 do_unaligned_access(addr, 1, mmu_idx, retaddr); 249 do_unaligned_access(addr, 1, mmu_idx, retaddr);
246 } 250 }
247 #endif 251 #endif
248 - glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val); 252 + addend = env->tlb_table[mmu_idx][index].addend;
  253 + glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
249 } 254 }
250 } else { 255 } else {
251 /* the page is not in the TLB : fill it */ 256 /* the page is not in the TLB : fill it */
@@ -265,7 +270,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -265,7 +270,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
265 int mmu_idx, 270 int mmu_idx,
266 void *retaddr) 271 void *retaddr)
267 { 272 {
268 - target_phys_addr_t physaddr; 273 + target_phys_addr_t addend;
269 target_ulong tlb_addr; 274 target_ulong tlb_addr;
270 int index, i; 275 int index, i;
271 276
@@ -273,12 +278,12 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -273,12 +278,12 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
273 redo: 278 redo:
274 tlb_addr = env->tlb_table[mmu_idx][index].addr_write; 279 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
275 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 280 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
276 - physaddr = addr + env->tlb_table[mmu_idx][index].addend;  
277 if (tlb_addr & ~TARGET_PAGE_MASK) { 281 if (tlb_addr & ~TARGET_PAGE_MASK) {
278 /* IO access */ 282 /* IO access */
279 if ((addr & (DATA_SIZE - 1)) != 0) 283 if ((addr & (DATA_SIZE - 1)) != 0)
280 goto do_unaligned_access; 284 goto do_unaligned_access;
281 - glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr); 285 + addend = env->iotlb[mmu_idx][index];
  286 + glue(io_write, SUFFIX)(addend, val, addr, retaddr);
282 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 287 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
283 do_unaligned_access: 288 do_unaligned_access:
284 /* XXX: not efficient, but simple */ 289 /* XXX: not efficient, but simple */
@@ -295,7 +300,8 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -295,7 +300,8 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
295 } 300 }
296 } else { 301 } else {
297 /* aligned/unaligned access in the same page */ 302 /* aligned/unaligned access in the same page */
298 - glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val); 303 + addend = env->tlb_table[mmu_idx][index].addend;
  304 + glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
299 } 305 }
300 } else { 306 } else {
301 /* the page is not in the TLB : fill it */ 307 /* the page is not in the TLB : fill it */