Commit 0f459d16c378128a586cf161526721a81b629405

Authored by pbrook
1 parent f227f17d

Clean up MMIO TLB handling.

The IO index is now stored in its own field, instead of being wedged
into the vaddr field.  This eliminates the ROMD and watchpoint host
pointer weirdness.  The IO index space is expanded by 1 bit, and
several additional bits are made available in the TLB vaddr field.


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4704 c046a42c-6fe2-441c-8c8c-71466251a162
cpu-all.h
... ... @@ -797,7 +797,7 @@ extern CPUState *cpu_single_env;
797 797 void cpu_interrupt(CPUState *s, int mask);
798 798 void cpu_reset_interrupt(CPUState *env, int mask);
799 799  
800   -int cpu_watchpoint_insert(CPUState *env, target_ulong addr);
  800 +int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type);
801 801 int cpu_watchpoint_remove(CPUState *env, target_ulong addr);
802 802 void cpu_watchpoint_remove_all(CPUState *env);
803 803 int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
... ... @@ -868,21 +868,34 @@ extern uint8_t *phys_ram_dirty;
868 868 extern ram_addr_t ram_size;
869 869  
870 870 /* physical memory access */
871   -#define TLB_INVALID_MASK (1 << 3)
872   -#define IO_MEM_SHIFT 4
  871 +
  872 +/* MMIO pages are identified by a combination of an IO device index and
  873 + 3 flags. The ROMD code stores the page ram offset in iotlb entry,
  874 + so only a limited number of ids are avaiable. */
  875 +
  876 +#define IO_MEM_SHIFT 3
873 877 #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
874 878  
875 879 #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
876 880 #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
877 881 #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
878   -#define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */
879   -/* acts like a ROM when read and like a device when written. As an
880   - exception, the write memory callback gets the ram offset instead of
881   - the physical address */
  882 +#define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
  883 +
  884 +/* Acts like a ROM when read and like a device when written. */
882 885 #define IO_MEM_ROMD (1)
883 886 #define IO_MEM_SUBPAGE (2)
884 887 #define IO_MEM_SUBWIDTH (4)
885 888  
  889 +/* Flags stored in the low bits of the TLB virtual address. These are
  890 + defined so that fast path ram access is all zeros. */
  891 +/* Zero if TLB entry is valid. */
  892 +#define TLB_INVALID_MASK (1 << 3)
  893 +/* Set if TLB entry references a clean RAM page. The iotlb entry will
  894 + contain the page physical address. */
  895 +#define TLB_NOTDIRTY (1 << 4)
  896 +/* Set if TLB entry is an IO callback. */
  897 +#define TLB_MMIO (1 << 5)
  898 +
886 899 typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
887 900 typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
888 901  
... ...
cpu-defs.h
... ... @@ -106,16 +106,17 @@ typedef uint64_t target_phys_addr_t;
106 106 #endif
107 107  
108 108 typedef struct CPUTLBEntry {
109   - /* bit 31 to TARGET_PAGE_BITS : virtual address
110   - bit TARGET_PAGE_BITS-1..IO_MEM_SHIFT : if non zero, memory io
111   - zone number
  109 + /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
  110 + bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
  111 + go directly to ram.
112 112 bit 3 : indicates that the entry is invalid
113 113 bit 2..0 : zero
114 114 */
115 115 target_ulong addr_read;
116 116 target_ulong addr_write;
117 117 target_ulong addr_code;
118   - /* addend to virtual address to get physical address */
  118 + /* Addend to virtual address to get physical address. IO accesses
  119 + use the correcponding iotlb value. */
119 120 #if TARGET_PHYS_ADDR_BITS == 64
120 121 /* on i386 Linux make sure it is aligned */
121 122 target_phys_addr_t addend __attribute__((aligned(8)));
... ... @@ -143,6 +144,7 @@ typedef struct CPUTLBEntry {
143 144 int halted; /* TRUE if the CPU is in suspend state */ \
144 145 /* The meaning of the MMU modes is defined in the target code. */ \
145 146 CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
  147 + target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
146 148 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
147 149 /* buffer for temporaries in the code generator */ \
148 150 long temp_buf[CPU_TEMP_BUF_NLONGS]; \
... ... @@ -155,7 +157,7 @@ typedef struct CPUTLBEntry {
155 157 \
156 158 struct { \
157 159 target_ulong vaddr; \
158   - target_phys_addr_t addend; \
  160 + int type; /* PAGE_READ/PAGE_WRITE */ \
159 161 } watchpoint[MAX_WATCHPOINTS]; \
160 162 int nb_watchpoints; \
161 163 int watchpoint_hit; \
... ...
... ... @@ -121,7 +121,7 @@ typedef struct PageDesc {
121 121 } PageDesc;
122 122  
123 123 typedef struct PhysPageDesc {
124   - /* offset in host memory of the page + io_index in the low 12 bits */
  124 + /* offset in host memory of the page + io_index in the low bits */
125 125 ram_addr_t phys_offset;
126 126 } PhysPageDesc;
127 127  
... ... @@ -1188,7 +1188,7 @@ static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1188 1188 #endif
1189 1189  
1190 1190 /* Add a watchpoint. */
1191   -int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
  1191 +int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1192 1192 {
1193 1193 int i;
1194 1194  
... ... @@ -1201,6 +1201,7 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1201 1201  
1202 1202 i = env->nb_watchpoints++;
1203 1203 env->watchpoint[i].vaddr = addr;
  1204 + env->watchpoint[i].type = type;
1204 1205 tlb_flush_page(env, addr);
1205 1206 /* FIXME: This flush is needed because of the hack to make memory ops
1206 1207 terminate the TB. It can be removed once the proper IO trap and
... ... @@ -1617,7 +1618,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1617 1618 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1618 1619 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1619 1620 if ((addr - start) < length) {
1620   - tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
  1621 + tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1621 1622 }
1622 1623 }
1623 1624 }
... ... @@ -1681,7 +1682,7 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1681 1682 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1682 1683 tlb_entry->addend - (unsigned long)phys_ram_base;
1683 1684 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1684   - tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
  1685 + tlb_entry->addr_write |= TLB_NOTDIRTY;
1685 1686 }
1686 1687 }
1687 1688 }
... ... @@ -1704,33 +1705,26 @@ void cpu_tlb_update_dirty(CPUState *env)
1704 1705 #endif
1705 1706 }
1706 1707  
1707   -static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1708   - unsigned long start)
  1708 +static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1709 1709 {
1710   - unsigned long addr;
1711   - if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1712   - addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1713   - if (addr == start) {
1714   - tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1715   - }
1716   - }
  1710 + if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
  1711 + tlb_entry->addr_write = vaddr;
1717 1712 }
1718 1713  
1719   -/* update the TLB corresponding to virtual page vaddr and phys addr
1720   - addr so that it is no longer dirty */
1721   -static inline void tlb_set_dirty(CPUState *env,
1722   - unsigned long addr, target_ulong vaddr)
  1714 +/* update the TLB corresponding to virtual page vaddr
  1715 + so that it is no longer dirty */
  1716 +static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1723 1717 {
1724 1718 int i;
1725 1719  
1726   - addr &= TARGET_PAGE_MASK;
  1720 + vaddr &= TARGET_PAGE_MASK;
1727 1721 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1728   - tlb_set_dirty1(&env->tlb_table[0][i], addr);
1729   - tlb_set_dirty1(&env->tlb_table[1][i], addr);
  1722 + tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
  1723 + tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1730 1724 #if (NB_MMU_MODES >= 3)
1731   - tlb_set_dirty1(&env->tlb_table[2][i], addr);
  1725 + tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1732 1726 #if (NB_MMU_MODES == 4)
1733   - tlb_set_dirty1(&env->tlb_table[3][i], addr);
  1727 + tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1734 1728 #endif
1735 1729 #endif
1736 1730 }
... ... @@ -1747,10 +1741,12 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1747 1741 unsigned long pd;
1748 1742 unsigned int index;
1749 1743 target_ulong address;
  1744 + target_ulong code_address;
1750 1745 target_phys_addr_t addend;
1751 1746 int ret;
1752 1747 CPUTLBEntry *te;
1753 1748 int i;
  1749 + target_phys_addr_t iotlb;
1754 1750  
1755 1751 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1756 1752 if (!p) {
... ... @@ -1764,64 +1760,69 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1764 1760 #endif
1765 1761  
1766 1762 ret = 0;
1767   - {
1768   - if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1769   - /* IO memory case */
1770   - address = vaddr | pd;
1771   - addend = paddr;
1772   - } else {
1773   - /* standard memory */
1774   - address = vaddr;
1775   - addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1776   - }
1777   -
1778   - /* Make accesses to pages with watchpoints go via the
1779   - watchpoint trap routines. */
1780   - for (i = 0; i < env->nb_watchpoints; i++) {
1781   - if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1782   - if (address & ~TARGET_PAGE_MASK) {
1783   - env->watchpoint[i].addend = 0;
1784   - address = vaddr | io_mem_watch;
1785   - } else {
1786   - env->watchpoint[i].addend = pd - paddr +
1787   - (unsigned long) phys_ram_base;
1788   - /* TODO: Figure out how to make read watchpoints coexist
1789   - with code. */
1790   - pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1791   - }
1792   - }
  1763 + address = vaddr;
  1764 + if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
  1765 + /* IO memory case (romd handled later) */
  1766 + address |= TLB_MMIO;
  1767 + }
  1768 + addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
  1769 + if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
  1770 + /* Normal RAM. */
  1771 + iotlb = pd & TARGET_PAGE_MASK;
  1772 + if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
  1773 + iotlb |= IO_MEM_NOTDIRTY;
  1774 + else
  1775 + iotlb |= IO_MEM_ROM;
  1776 + } else {
  1777 + /* IO handlers are currently passed a phsical address.
  1778 + It would be nice to pass an offset from the base address
  1779 + of that region. This would avoid having to special case RAM,
  1780 + and avoid full address decoding in every device.
  1781 + We can't use the high bits of pd for this because
  1782 + IO_MEM_ROMD uses these as a ram address. */
  1783 + iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
  1784 + }
  1785 +
  1786 + code_address = address;
  1787 + /* Make accesses to pages with watchpoints go via the
  1788 + watchpoint trap routines. */
  1789 + for (i = 0; i < env->nb_watchpoints; i++) {
  1790 + if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
  1791 + iotlb = io_mem_watch + paddr;
  1792 + /* TODO: The memory case can be optimized by not trapping
  1793 + reads of pages with a write breakpoint. */
  1794 + address |= TLB_MMIO;
1793 1795 }
  1796 + }
1794 1797  
1795   - index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1796   - addend -= vaddr;
1797   - te = &env->tlb_table[mmu_idx][index];
1798   - te->addend = addend;
1799   - if (prot & PAGE_READ) {
1800   - te->addr_read = address;
1801   - } else {
1802   - te->addr_read = -1;
1803   - }
  1798 + index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
  1799 + env->iotlb[mmu_idx][index] = iotlb - vaddr;
  1800 + te = &env->tlb_table[mmu_idx][index];
  1801 + te->addend = addend - vaddr;
  1802 + if (prot & PAGE_READ) {
  1803 + te->addr_read = address;
  1804 + } else {
  1805 + te->addr_read = -1;
  1806 + }
1804 1807  
1805   - if (prot & PAGE_EXEC) {
1806   - te->addr_code = address;
1807   - } else {
1808   - te->addr_code = -1;
1809   - }
1810   - if (prot & PAGE_WRITE) {
1811   - if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1812   - (pd & IO_MEM_ROMD)) {
1813   - /* write access calls the I/O callback */
1814   - te->addr_write = vaddr |
1815   - (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1816   - } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1817   - !cpu_physical_memory_is_dirty(pd)) {
1818   - te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1819   - } else {
1820   - te->addr_write = address;
1821   - }
  1808 + if (prot & PAGE_EXEC) {
  1809 + te->addr_code = code_address;
  1810 + } else {
  1811 + te->addr_code = -1;
  1812 + }
  1813 + if (prot & PAGE_WRITE) {
  1814 + if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
  1815 + (pd & IO_MEM_ROMD)) {
  1816 + /* Write access calls the I/O callback. */
  1817 + te->addr_write = address | TLB_MMIO;
  1818 + } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
  1819 + !cpu_physical_memory_is_dirty(pd)) {
  1820 + te->addr_write = address | TLB_NOTDIRTY;
1822 1821 } else {
1823   - te->addr_write = -1;
  1822 + te->addr_write = address;
1824 1823 }
  1824 + } else {
  1825 + te->addr_write = -1;
1825 1826 }
1826 1827 return ret;
1827 1828 }
... ... @@ -2181,11 +2182,10 @@ static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2181 2182 unassigned_mem_writeb,
2182 2183 };
2183 2184  
2184   -static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
  2185 +static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
  2186 + uint32_t val)
2185 2187 {
2186   - unsigned long ram_addr;
2187 2188 int dirty_flags;
2188   - ram_addr = addr - (unsigned long)phys_ram_base;
2189 2189 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2190 2190 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2191 2191 #if !defined(CONFIG_USER_ONLY)
... ... @@ -2193,7 +2193,7 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t
2193 2193 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2194 2194 #endif
2195 2195 }
2196   - stb_p((uint8_t *)(long)addr, val);
  2196 + stb_p(phys_ram_base + ram_addr, val);
2197 2197 #ifdef USE_KQEMU
2198 2198 if (cpu_single_env->kqemu_enabled &&
2199 2199 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
... ... @@ -2204,14 +2204,13 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t
2204 2204 /* we remove the notdirty callback only if the code has been
2205 2205 flushed */
2206 2206 if (dirty_flags == 0xff)
2207   - tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
  2207 + tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2208 2208 }
2209 2209  
2210   -static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
  2210 +static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
  2211 + uint32_t val)
2211 2212 {
2212   - unsigned long ram_addr;
2213 2213 int dirty_flags;
2214   - ram_addr = addr - (unsigned long)phys_ram_base;
2215 2214 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2216 2215 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2217 2216 #if !defined(CONFIG_USER_ONLY)
... ... @@ -2219,7 +2218,7 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t
2219 2218 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2220 2219 #endif
2221 2220 }
2222   - stw_p((uint8_t *)(long)addr, val);
  2221 + stw_p(phys_ram_base + ram_addr, val);
2223 2222 #ifdef USE_KQEMU
2224 2223 if (cpu_single_env->kqemu_enabled &&
2225 2224 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
... ... @@ -2230,14 +2229,13 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t
2230 2229 /* we remove the notdirty callback only if the code has been
2231 2230 flushed */
2232 2231 if (dirty_flags == 0xff)
2233   - tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
  2232 + tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2234 2233 }
2235 2234  
2236   -static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
  2235 +static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
  2236 + uint32_t val)
2237 2237 {
2238   - unsigned long ram_addr;
2239 2238 int dirty_flags;
2240   - ram_addr = addr - (unsigned long)phys_ram_base;
2241 2239 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2242 2240 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2243 2241 #if !defined(CONFIG_USER_ONLY)
... ... @@ -2245,7 +2243,7 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t
2245 2243 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2246 2244 #endif
2247 2245 }
2248   - stl_p((uint8_t *)(long)addr, val);
  2246 + stl_p(phys_ram_base + ram_addr, val);
2249 2247 #ifdef USE_KQEMU
2250 2248 if (cpu_single_env->kqemu_enabled &&
2251 2249 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
... ... @@ -2256,7 +2254,7 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t
2256 2254 /* we remove the notdirty callback only if the code has been
2257 2255 flushed */
2258 2256 if (dirty_flags == 0xff)
2259   - tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
  2257 + tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2260 2258 }
2261 2259  
2262 2260 static CPUReadMemoryFunc *error_mem_read[3] = {
... ... @@ -2271,67 +2269,63 @@ static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2271 2269 notdirty_mem_writel,
2272 2270 };
2273 2271  
  2272 +/* Generate a debug exception if a watchpoint has been hit. */
  2273 +static void check_watchpoint(int offset, int flags)
  2274 +{
  2275 + CPUState *env = cpu_single_env;
  2276 + target_ulong vaddr;
  2277 + int i;
  2278 +
  2279 + vaddr = (env->mem_write_vaddr & TARGET_PAGE_MASK) + offset;
  2280 + for (i = 0; i < env->nb_watchpoints; i++) {
  2281 + if (vaddr == env->watchpoint[i].vaddr
  2282 + && (env->watchpoint[i].type & flags)) {
  2283 + env->watchpoint_hit = i + 1;
  2284 + cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
  2285 + break;
  2286 + }
  2287 + }
  2288 +}
  2289 +
2274 2290 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2275 2291 so these check for a hit then pass through to the normal out-of-line
2276 2292 phys routines. */
2277 2293 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2278 2294 {
  2295 + check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2279 2296 return ldub_phys(addr);
2280 2297 }
2281 2298  
2282 2299 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2283 2300 {
  2301 + check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2284 2302 return lduw_phys(addr);
2285 2303 }
2286 2304  
2287 2305 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2288 2306 {
  2307 + check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2289 2308 return ldl_phys(addr);
2290 2309 }
2291 2310  
2292   -/* Generate a debug exception if a watchpoint has been hit.
2293   - Returns the real physical address of the access. addr will be a host
2294   - address in case of a RAM location. */
2295   -static target_ulong check_watchpoint(target_phys_addr_t addr)
2296   -{
2297   - CPUState *env = cpu_single_env;
2298   - target_ulong watch;
2299   - target_ulong retaddr;
2300   - int i;
2301   -
2302   - retaddr = addr;
2303   - for (i = 0; i < env->nb_watchpoints; i++) {
2304   - watch = env->watchpoint[i].vaddr;
2305   - if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2306   - retaddr = addr - env->watchpoint[i].addend;
2307   - if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2308   - cpu_single_env->watchpoint_hit = i + 1;
2309   - cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2310   - break;
2311   - }
2312   - }
2313   - }
2314   - return retaddr;
2315   -}
2316   -
2317 2311 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2318 2312 uint32_t val)
2319 2313 {
2320   - addr = check_watchpoint(addr);
  2314 + check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2321 2315 stb_phys(addr, val);
2322 2316 }
2323 2317  
2324 2318 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2325 2319 uint32_t val)
2326 2320 {
2327   - addr = check_watchpoint(addr);
  2321 + check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2328 2322 stw_phys(addr, val);
2329 2323 }
2330 2324  
2331 2325 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2332 2326 uint32_t val)
2333 2327 {
2334   - addr = check_watchpoint(addr);
  2328 + check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2335 2329 stl_phys(addr, val);
2336 2330 }
2337 2331  
... ... @@ -2501,7 +2495,7 @@ static void io_mem_init(void)
2501 2495 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2502 2496 io_mem_nb = 5;
2503 2497  
2504   - io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
  2498 + io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2505 2499 watch_mem_write, NULL);
2506 2500 /* alloc dirty bits array */
2507 2501 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
... ...
gdbstub.c
... ... @@ -1117,21 +1117,37 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf)
1117 1117 if (*p == ',')
1118 1118 p++;
1119 1119 len = strtoull(p, (char **)&p, 16);
1120   - if (type == 0 || type == 1) {
  1120 + switch (type) {
  1121 + case 0:
  1122 + case 1:
1121 1123 if (cpu_breakpoint_insert(env, addr) < 0)
1122 1124 goto breakpoint_error;
1123 1125 put_packet(s, "OK");
  1126 + break;
1124 1127 #ifndef CONFIG_USER_ONLY
1125   - } else if (type == 2) {
1126   - if (cpu_watchpoint_insert(env, addr) < 0)
  1128 + case 2:
  1129 + type = PAGE_WRITE;
  1130 + goto insert_watchpoint;
  1131 + case 3:
  1132 + type = PAGE_READ;
  1133 + goto insert_watchpoint;
  1134 + case 4:
  1135 + type = PAGE_READ | PAGE_WRITE;
  1136 + insert_watchpoint:
  1137 + if (cpu_watchpoint_insert(env, addr, type) < 0)
1127 1138 goto breakpoint_error;
1128 1139 put_packet(s, "OK");
  1140 + break;
1129 1141 #endif
1130   - } else {
1131   - breakpoint_error:
1132   - put_packet(s, "E22");
  1142 + default:
  1143 + put_packet(s, "");
  1144 + break;
1133 1145 }
1134 1146 break;
  1147 + breakpoint_error:
  1148 + put_packet(s, "E22");
  1149 + break;
  1150 +
1135 1151 case 'z':
1136 1152 type = strtoul(p, (char **)&p, 16);
1137 1153 if (*p == ',')
... ... @@ -1144,12 +1160,12 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf)
1144 1160 cpu_breakpoint_remove(env, addr);
1145 1161 put_packet(s, "OK");
1146 1162 #ifndef CONFIG_USER_ONLY
1147   - } else if (type == 2) {
  1163 + } else if (type >= 2 || type <= 4) {
1148 1164 cpu_watchpoint_remove(env, addr);
1149 1165 put_packet(s, "OK");
1150 1166 #endif
1151 1167 } else {
1152   - goto breakpoint_error;
  1168 + put_packet(s, "");
1153 1169 }
1154 1170 break;
1155 1171 case 'q':
... ...
hw/pflash_cfi01.c
... ... @@ -202,14 +202,8 @@ static void pflash_write (pflash_t *pfl, target_ulong offset, uint32_t value,
202 202 uint8_t *p;
203 203 uint8_t cmd;
204 204  
205   - /* WARNING: when the memory area is in ROMD mode, the offset is a
206   - ram offset, not a physical address */
207 205 cmd = value;
208   -
209   - if (pfl->wcycle == 0)
210   - offset -= (target_ulong)(long)pfl->storage;
211   - else
212   - offset -= pfl->base;
  206 + offset -= pfl->base;
213 207  
214 208 DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d wcycle 0x%x\n",
215 209 __func__, offset, value, width, pfl->wcycle);
... ...
hw/pflash_cfi02.c
... ... @@ -112,13 +112,12 @@ static uint32_t pflash_read (pflash_t *pfl, uint32_t offset, int width)
112 112  
113 113 DPRINTF("%s: offset " TARGET_FMT_lx "\n", __func__, offset);
114 114 ret = -1;
  115 + offset -= pfl->base;
115 116 if (pfl->rom_mode) {
116   - offset -= (uint32_t)(long)pfl->storage;
117 117 /* Lazy reset of to ROMD mode */
118 118 if (pfl->wcycle == 0)
119 119 pflash_register_memory(pfl, 1);
120   - } else
121   - offset -= pfl->base;
  120 + }
122 121 offset &= pfl->chip_len - 1;
123 122 boff = offset & 0xFF;
124 123 if (pfl->width == 2)
... ... @@ -242,12 +241,7 @@ static void pflash_write (pflash_t *pfl, uint32_t offset, uint32_t value,
242 241 }
243 242 DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d %d\n", __func__,
244 243 offset, value, width, pfl->wcycle);
245   - /* WARNING: when the memory area is in ROMD mode, the offset is a
246   - ram offset, not a physical address */
247   - if (pfl->rom_mode)
248   - offset -= (uint32_t)(long)pfl->storage;
249   - else
250   - offset -= pfl->base;
  244 + offset -= pfl->base;
251 245 offset &= pfl->chip_len - 1;
252 246  
253 247 DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d\n", __func__,
... ...
softmmu_template.h
... ... @@ -51,12 +51,13 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
51 51 int mmu_idx,
52 52 void *retaddr);
53 53 static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
54   - target_ulong tlb_addr)
  54 + target_ulong addr)
55 55 {
56 56 DATA_TYPE res;
57 57 int index;
  58 + index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  59 + physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
58 60  
59   - index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
60 61 #if SHIFT <= 2
61 62 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
62 63 #else
... ... @@ -81,7 +82,7 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
81 82 DATA_TYPE res;
82 83 int index;
83 84 target_ulong tlb_addr;
84   - target_phys_addr_t physaddr;
  85 + target_phys_addr_t addend;
85 86 void *retaddr;
86 87  
87 88 /* test if there is match for unaligned or IO access */
... ... @@ -90,12 +91,12 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
90 91 redo:
91 92 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
92 93 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
93   - physaddr = addr + env->tlb_table[mmu_idx][index].addend;
94 94 if (tlb_addr & ~TARGET_PAGE_MASK) {
95 95 /* IO access */
96 96 if ((addr & (DATA_SIZE - 1)) != 0)
97 97 goto do_unaligned_access;
98   - res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
  98 + addend = env->iotlb[mmu_idx][index];
  99 + res = glue(io_read, SUFFIX)(addend, addr);
99 100 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
100 101 /* slow unaligned access (it spans two pages or IO) */
101 102 do_unaligned_access:
... ... @@ -113,7 +114,8 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
113 114 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
114 115 }
115 116 #endif
116   - res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr);
  117 + addend = env->tlb_table[mmu_idx][index].addend;
  118 + res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
117 119 }
118 120 } else {
119 121 /* the page is not in the TLB : fill it */
... ... @@ -135,19 +137,19 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
135 137 {
136 138 DATA_TYPE res, res1, res2;
137 139 int index, shift;
138   - target_phys_addr_t physaddr;
  140 + target_phys_addr_t addend;
139 141 target_ulong tlb_addr, addr1, addr2;
140 142  
141 143 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
142 144 redo:
143 145 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
144 146 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
145   - physaddr = addr + env->tlb_table[mmu_idx][index].addend;
146 147 if (tlb_addr & ~TARGET_PAGE_MASK) {
147 148 /* IO access */
148 149 if ((addr & (DATA_SIZE - 1)) != 0)
149 150 goto do_unaligned_access;
150   - res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
  151 + addend = env->iotlb[mmu_idx][index];
  152 + res = glue(io_read, SUFFIX)(addend, addr);
151 153 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
152 154 do_unaligned_access:
153 155 /* slow unaligned access (it spans two pages) */
... ... @@ -166,7 +168,8 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
166 168 res = (DATA_TYPE)res;
167 169 } else {
168 170 /* unaligned/aligned access in the same page */
169   - res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr);
  171 + addend = env->tlb_table[mmu_idx][index].addend;
  172 + res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
170 173 }
171 174 } else {
172 175 /* the page is not in the TLB : fill it */
... ... @@ -185,13 +188,14 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
185 188  
186 189 static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
187 190 DATA_TYPE val,
188   - target_ulong tlb_addr,
  191 + target_ulong addr,
189 192 void *retaddr)
190 193 {
191 194 int index;
  195 + index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  196 + physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
192 197  
193   - index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
194   - env->mem_write_vaddr = tlb_addr;
  198 + env->mem_write_vaddr = addr;
195 199 env->mem_write_pc = (unsigned long)retaddr;
196 200 #if SHIFT <= 2
197 201 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
... ... @@ -213,7 +217,7 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
213 217 DATA_TYPE val,
214 218 int mmu_idx)
215 219 {
216   - target_phys_addr_t physaddr;
  220 + target_phys_addr_t addend;
217 221 target_ulong tlb_addr;
218 222 void *retaddr;
219 223 int index;
... ... @@ -222,13 +226,13 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
222 226 redo:
223 227 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
224 228 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
225   - physaddr = addr + env->tlb_table[mmu_idx][index].addend;
226 229 if (tlb_addr & ~TARGET_PAGE_MASK) {
227 230 /* IO access */
228 231 if ((addr & (DATA_SIZE - 1)) != 0)
229 232 goto do_unaligned_access;
230 233 retaddr = GETPC();
231   - glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
  234 + addend = env->iotlb[mmu_idx][index];
  235 + glue(io_write, SUFFIX)(addend, val, addr, retaddr);
232 236 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
233 237 do_unaligned_access:
234 238 retaddr = GETPC();
... ... @@ -245,7 +249,8 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
245 249 do_unaligned_access(addr, 1, mmu_idx, retaddr);
246 250 }
247 251 #endif
248   - glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val);
  252 + addend = env->tlb_table[mmu_idx][index].addend;
  253 + glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
249 254 }
250 255 } else {
251 256 /* the page is not in the TLB : fill it */
... ... @@ -265,7 +270,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
265 270 int mmu_idx,
266 271 void *retaddr)
267 272 {
268   - target_phys_addr_t physaddr;
  273 + target_phys_addr_t addend;
269 274 target_ulong tlb_addr;
270 275 int index, i;
271 276  
... ... @@ -273,12 +278,12 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
273 278 redo:
274 279 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
275 280 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
276   - physaddr = addr + env->tlb_table[mmu_idx][index].addend;
277 281 if (tlb_addr & ~TARGET_PAGE_MASK) {
278 282 /* IO access */
279 283 if ((addr & (DATA_SIZE - 1)) != 0)
280 284 goto do_unaligned_access;
281   - glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
  285 + addend = env->iotlb[mmu_idx][index];
  286 + glue(io_write, SUFFIX)(addend, val, addr, retaddr);
282 287 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
283 288 do_unaligned_access:
284 289 /* XXX: not efficient, but simple */
... ... @@ -295,7 +300,8 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
295 300 }
296 301 } else {
297 302 /* aligned/unaligned access in the same page */
298   - glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val);
  303 + addend = env->tlb_table[mmu_idx][index].addend;
  304 + glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
299 305 }
300 306 } else {
301 307 /* the page is not in the TLB : fill it */
... ...