Commit 3a7d929e62d82f48b4b93f8691007ae8d9daec57
1 parent
04c504cc
merge self modifying code handling in dirty ram page mecanism
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1544 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
1 changed file
with
118 additions
and
120 deletions
exec.c
@@ -262,8 +262,10 @@ static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) | @@ -262,8 +262,10 @@ static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) | ||
262 | } | 262 | } |
263 | 263 | ||
264 | #if !defined(CONFIG_USER_ONLY) | 264 | #if !defined(CONFIG_USER_ONLY) |
265 | -static void tlb_protect_code(CPUState *env, target_ulong addr); | ||
266 | -static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr); | 265 | +static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr, |
266 | + target_ulong vaddr); | ||
267 | +static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, | ||
268 | + target_ulong vaddr); | ||
267 | 269 | ||
268 | static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc) | 270 | static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc) |
269 | { | 271 | { |
@@ -945,7 +947,7 @@ static inline void tb_alloc_page(TranslationBlock *tb, | @@ -945,7 +947,7 @@ static inline void tb_alloc_page(TranslationBlock *tb, | ||
945 | TranslationBlock *last_first_tb; | 947 | TranslationBlock *last_first_tb; |
946 | 948 | ||
947 | tb->page_addr[n] = page_addr; | 949 | tb->page_addr[n] = page_addr; |
948 | - p = page_find(page_addr >> TARGET_PAGE_BITS); | 950 | + p = page_find_alloc(page_addr >> TARGET_PAGE_BITS); |
949 | tb->page_next[n] = p->first_tb; | 951 | tb->page_next[n] = p->first_tb; |
950 | last_first_tb = p->first_tb; | 952 | last_first_tb = p->first_tb; |
951 | p->first_tb = (TranslationBlock *)((long)tb | n); | 953 | p->first_tb = (TranslationBlock *)((long)tb | n); |
@@ -981,7 +983,7 @@ static inline void tb_alloc_page(TranslationBlock *tb, | @@ -981,7 +983,7 @@ static inline void tb_alloc_page(TranslationBlock *tb, | ||
981 | target_ulong virt_addr; | 983 | target_ulong virt_addr; |
982 | 984 | ||
983 | virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS); | 985 | virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS); |
984 | - tlb_protect_code(cpu_single_env, virt_addr); | 986 | + tlb_protect_code(cpu_single_env, page_addr, virt_addr); |
985 | } | 987 | } |
986 | #endif | 988 | #endif |
987 | 989 | ||
@@ -1473,50 +1475,44 @@ static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr) | @@ -1473,50 +1475,44 @@ static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr) | ||
1473 | { | 1475 | { |
1474 | if (addr == (tlb_entry->address & | 1476 | if (addr == (tlb_entry->address & |
1475 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) && | 1477 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) && |
1476 | - (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE && | ||
1477 | - (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) { | ||
1478 | - tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE; | 1478 | + (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { |
1479 | + tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; | ||
1479 | } | 1480 | } |
1480 | } | 1481 | } |
1481 | 1482 | ||
1482 | /* update the TLBs so that writes to code in the virtual page 'addr' | 1483 | /* update the TLBs so that writes to code in the virtual page 'addr' |
1483 | can be detected */ | 1484 | can be detected */ |
1484 | -static void tlb_protect_code(CPUState *env, target_ulong addr) | 1485 | +static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr, |
1486 | + target_ulong vaddr) | ||
1485 | { | 1487 | { |
1486 | int i; | 1488 | int i; |
1487 | 1489 | ||
1488 | - addr &= TARGET_PAGE_MASK; | ||
1489 | - i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | ||
1490 | - tlb_protect_code1(&env->tlb_write[0][i], addr); | ||
1491 | - tlb_protect_code1(&env->tlb_write[1][i], addr); | 1490 | + vaddr &= TARGET_PAGE_MASK; |
1491 | + i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | ||
1492 | + tlb_protect_code1(&env->tlb_write[0][i], vaddr); | ||
1493 | + tlb_protect_code1(&env->tlb_write[1][i], vaddr); | ||
1494 | + | ||
1495 | + phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] &= ~CODE_DIRTY_FLAG; | ||
1496 | +#ifdef USE_KQEMU | ||
1497 | + if (env->kqemu_enabled) { | ||
1498 | + kqemu_set_notdirty(env, ram_addr); | ||
1499 | + } | ||
1500 | +#endif | ||
1501 | + | ||
1492 | #if !defined(CONFIG_SOFTMMU) | 1502 | #if !defined(CONFIG_SOFTMMU) |
1493 | /* NOTE: as we generated the code for this page, it is already at | 1503 | /* NOTE: as we generated the code for this page, it is already at |
1494 | least readable */ | 1504 | least readable */ |
1495 | - if (addr < MMAP_AREA_END) | ||
1496 | - mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ); | 1505 | + if (vaddr < MMAP_AREA_END) |
1506 | + mprotect((void *)vaddr, TARGET_PAGE_SIZE, PROT_READ); | ||
1497 | #endif | 1507 | #endif |
1498 | } | 1508 | } |
1499 | 1509 | ||
1500 | -static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, | ||
1501 | - unsigned long phys_addr) | ||
1502 | -{ | ||
1503 | - if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE && | ||
1504 | - ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) { | ||
1505 | - tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; | ||
1506 | - } | ||
1507 | -} | ||
1508 | - | ||
1509 | /* update the TLB so that writes in physical page 'phys_addr' are no longer | 1510 | /* update the TLB so that writes in physical page 'phys_addr' are no longer |
1510 | - tested self modifying code */ | ||
1511 | -static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr) | 1511 | + tested for self modifying code */ |
1512 | +static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, | ||
1513 | + target_ulong vaddr) | ||
1512 | { | 1514 | { |
1513 | - int i; | ||
1514 | - | ||
1515 | - phys_addr &= TARGET_PAGE_MASK; | ||
1516 | - phys_addr += (long)phys_ram_base; | ||
1517 | - i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | ||
1518 | - tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr); | ||
1519 | - tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr); | 1515 | + phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG; |
1520 | } | 1516 | } |
1521 | 1517 | ||
1522 | static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, | 1518 | static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, |
@@ -1531,7 +1527,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, | @@ -1531,7 +1527,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, | ||
1531 | } | 1527 | } |
1532 | } | 1528 | } |
1533 | 1529 | ||
1534 | -void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end, | 1530 | +void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, |
1535 | int dirty_flags) | 1531 | int dirty_flags) |
1536 | { | 1532 | { |
1537 | CPUState *env; | 1533 | CPUState *env; |
@@ -1552,6 +1548,12 @@ void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end, | @@ -1552,6 +1548,12 @@ void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end, | ||
1552 | p[i] &= mask; | 1548 | p[i] &= mask; |
1553 | 1549 | ||
1554 | env = cpu_single_env; | 1550 | env = cpu_single_env; |
1551 | +#ifdef USE_KQEMU | ||
1552 | + if (env->kqemu_enabled) { | ||
1553 | + for(i = 0; i < len; i++) | ||
1554 | + kqemu_set_notdirty(env, (unsigned long)i << TARGET_PAGE_BITS); | ||
1555 | + } | ||
1556 | +#endif | ||
1555 | /* we modify the TLB cache so that the dirty bit will be set again | 1557 | /* we modify the TLB cache so that the dirty bit will be set again |
1556 | when accessing the range */ | 1558 | when accessing the range */ |
1557 | start1 = start + (unsigned long)phys_ram_base; | 1559 | start1 = start + (unsigned long)phys_ram_base; |
@@ -1589,6 +1591,29 @@ void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end, | @@ -1589,6 +1591,29 @@ void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end, | ||
1589 | #endif | 1591 | #endif |
1590 | } | 1592 | } |
1591 | 1593 | ||
1594 | +static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) | ||
1595 | +{ | ||
1596 | + ram_addr_t ram_addr; | ||
1597 | + | ||
1598 | + if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { | ||
1599 | + ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) + | ||
1600 | + tlb_entry->addend - (unsigned long)phys_ram_base; | ||
1601 | + if (!cpu_physical_memory_is_dirty(ram_addr)) { | ||
1602 | + tlb_entry->address |= IO_MEM_NOTDIRTY; | ||
1603 | + } | ||
1604 | + } | ||
1605 | +} | ||
1606 | + | ||
1607 | +/* update the TLB according to the current state of the dirty bits */ | ||
1608 | +void cpu_tlb_update_dirty(CPUState *env) | ||
1609 | +{ | ||
1610 | + int i; | ||
1611 | + for(i = 0; i < CPU_TLB_SIZE; i++) | ||
1612 | + tlb_update_dirty(&env->tlb_write[0][i]); | ||
1613 | + for(i = 0; i < CPU_TLB_SIZE; i++) | ||
1614 | + tlb_update_dirty(&env->tlb_write[1][i]); | ||
1615 | +} | ||
1616 | + | ||
1592 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, | 1617 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, |
1593 | unsigned long start) | 1618 | unsigned long start) |
1594 | { | 1619 | { |
@@ -1626,28 +1651,20 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, | @@ -1626,28 +1651,20 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, | ||
1626 | { | 1651 | { |
1627 | PhysPageDesc *p; | 1652 | PhysPageDesc *p; |
1628 | unsigned long pd; | 1653 | unsigned long pd; |
1629 | - TranslationBlock *first_tb; | ||
1630 | unsigned int index; | 1654 | unsigned int index; |
1631 | target_ulong address; | 1655 | target_ulong address; |
1632 | target_phys_addr_t addend; | 1656 | target_phys_addr_t addend; |
1633 | int ret; | 1657 | int ret; |
1634 | 1658 | ||
1635 | p = phys_page_find(paddr >> TARGET_PAGE_BITS); | 1659 | p = phys_page_find(paddr >> TARGET_PAGE_BITS); |
1636 | - first_tb = NULL; | ||
1637 | if (!p) { | 1660 | if (!p) { |
1638 | pd = IO_MEM_UNASSIGNED; | 1661 | pd = IO_MEM_UNASSIGNED; |
1639 | } else { | 1662 | } else { |
1640 | - PageDesc *p1; | ||
1641 | pd = p->phys_offset; | 1663 | pd = p->phys_offset; |
1642 | - if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) { | ||
1643 | - /* NOTE: we also allocate the page at this stage */ | ||
1644 | - p1 = page_find_alloc(pd >> TARGET_PAGE_BITS); | ||
1645 | - first_tb = p1->first_tb; | ||
1646 | - } | ||
1647 | } | 1664 | } |
1648 | #if defined(DEBUG_TLB) | 1665 | #if defined(DEBUG_TLB) |
1649 | - printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n", | ||
1650 | - vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd); | 1666 | + printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n", |
1667 | + vaddr, paddr, prot, is_user, is_softmmu, pd); | ||
1651 | #endif | 1668 | #endif |
1652 | 1669 | ||
1653 | ret = 0; | 1670 | ret = 0; |
@@ -1679,18 +1696,7 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, | @@ -1679,18 +1696,7 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, | ||
1679 | /* ROM: access is ignored (same as unassigned) */ | 1696 | /* ROM: access is ignored (same as unassigned) */ |
1680 | env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM; | 1697 | env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM; |
1681 | env->tlb_write[is_user][index].addend = addend; | 1698 | env->tlb_write[is_user][index].addend = addend; |
1682 | - } else | ||
1683 | - /* XXX: the PowerPC code seems not ready to handle | ||
1684 | - self modifying code with DCBI */ | ||
1685 | -#if defined(TARGET_HAS_SMC) || 1 | ||
1686 | - if (first_tb) { | ||
1687 | - /* if code is present, we use a specific memory | ||
1688 | - handler. It works only for physical memory access */ | ||
1689 | - env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE; | ||
1690 | - env->tlb_write[is_user][index].addend = addend; | ||
1691 | - } else | ||
1692 | -#endif | ||
1693 | - if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && | 1699 | + } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && |
1694 | !cpu_physical_memory_is_dirty(pd)) { | 1700 | !cpu_physical_memory_is_dirty(pd)) { |
1695 | env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY; | 1701 | env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY; |
1696 | env->tlb_write[is_user][index].addend = addend; | 1702 | env->tlb_write[is_user][index].addend = addend; |
@@ -1986,75 +1992,66 @@ static CPUWriteMemoryFunc *unassigned_mem_write[3] = { | @@ -1986,75 +1992,66 @@ static CPUWriteMemoryFunc *unassigned_mem_write[3] = { | ||
1986 | unassigned_mem_writeb, | 1992 | unassigned_mem_writeb, |
1987 | }; | 1993 | }; |
1988 | 1994 | ||
1989 | -/* self modifying code support in soft mmu mode : writing to a page | ||
1990 | - containing code comes to these functions */ | ||
1991 | - | ||
1992 | -static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) | 1995 | +static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) |
1993 | { | 1996 | { |
1994 | - unsigned long phys_addr; | ||
1995 | - | ||
1996 | - phys_addr = addr - (unsigned long)phys_ram_base; | 1997 | + unsigned long ram_addr; |
1998 | + int dirty_flags; | ||
1999 | + ram_addr = addr - (unsigned long)phys_ram_base; | ||
2000 | + dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | ||
2001 | + if (!(dirty_flags & CODE_DIRTY_FLAG)) { | ||
1997 | #if !defined(CONFIG_USER_ONLY) | 2002 | #if !defined(CONFIG_USER_ONLY) |
1998 | - tb_invalidate_phys_page_fast(phys_addr, 1); | 2003 | + tb_invalidate_phys_page_fast(ram_addr, 1); |
2004 | + dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | ||
1999 | #endif | 2005 | #endif |
2006 | + } | ||
2000 | stb_p((uint8_t *)(long)addr, val); | 2007 | stb_p((uint8_t *)(long)addr, val); |
2001 | - phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff; | 2008 | + /* we set the page as dirty only if the code has been flushed */ |
2009 | + if (dirty_flags & CODE_DIRTY_FLAG) | ||
2010 | + tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); | ||
2002 | } | 2011 | } |
2003 | 2012 | ||
2004 | -static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) | 2013 | +static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) |
2005 | { | 2014 | { |
2006 | - unsigned long phys_addr; | ||
2007 | - | ||
2008 | - phys_addr = addr - (unsigned long)phys_ram_base; | 2015 | + unsigned long ram_addr; |
2016 | + int dirty_flags; | ||
2017 | + ram_addr = addr - (unsigned long)phys_ram_base; | ||
2018 | + dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | ||
2019 | + if (!(dirty_flags & CODE_DIRTY_FLAG)) { | ||
2009 | #if !defined(CONFIG_USER_ONLY) | 2020 | #if !defined(CONFIG_USER_ONLY) |
2010 | - tb_invalidate_phys_page_fast(phys_addr, 2); | 2021 | + tb_invalidate_phys_page_fast(ram_addr, 2); |
2022 | + dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | ||
2011 | #endif | 2023 | #endif |
2024 | + } | ||
2012 | stw_p((uint8_t *)(long)addr, val); | 2025 | stw_p((uint8_t *)(long)addr, val); |
2013 | - phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff; | 2026 | + /* we set the page as dirty only if the code has been flushed */ |
2027 | + if (dirty_flags & CODE_DIRTY_FLAG) | ||
2028 | + tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); | ||
2014 | } | 2029 | } |
2015 | 2030 | ||
2016 | -static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) | 2031 | +static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) |
2017 | { | 2032 | { |
2018 | - unsigned long phys_addr; | ||
2019 | - | ||
2020 | - phys_addr = addr - (unsigned long)phys_ram_base; | 2033 | + unsigned long ram_addr; |
2034 | + int dirty_flags; | ||
2035 | + ram_addr = addr - (unsigned long)phys_ram_base; | ||
2036 | + dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | ||
2037 | + if (!(dirty_flags & CODE_DIRTY_FLAG)) { | ||
2021 | #if !defined(CONFIG_USER_ONLY) | 2038 | #if !defined(CONFIG_USER_ONLY) |
2022 | - tb_invalidate_phys_page_fast(phys_addr, 4); | 2039 | + tb_invalidate_phys_page_fast(ram_addr, 4); |
2040 | + dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | ||
2023 | #endif | 2041 | #endif |
2042 | + } | ||
2024 | stl_p((uint8_t *)(long)addr, val); | 2043 | stl_p((uint8_t *)(long)addr, val); |
2025 | - phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff; | 2044 | + /* we set the page as dirty only if the code has been flushed */ |
2045 | + if (dirty_flags & CODE_DIRTY_FLAG) | ||
2046 | + tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); | ||
2026 | } | 2047 | } |
2027 | 2048 | ||
2028 | -static CPUReadMemoryFunc *code_mem_read[3] = { | 2049 | +static CPUReadMemoryFunc *error_mem_read[3] = { |
2029 | NULL, /* never used */ | 2050 | NULL, /* never used */ |
2030 | NULL, /* never used */ | 2051 | NULL, /* never used */ |
2031 | NULL, /* never used */ | 2052 | NULL, /* never used */ |
2032 | }; | 2053 | }; |
2033 | 2054 | ||
2034 | -static CPUWriteMemoryFunc *code_mem_write[3] = { | ||
2035 | - code_mem_writeb, | ||
2036 | - code_mem_writew, | ||
2037 | - code_mem_writel, | ||
2038 | -}; | ||
2039 | - | ||
2040 | -static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) | ||
2041 | -{ | ||
2042 | - stb_p((uint8_t *)(long)addr, val); | ||
2043 | - tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); | ||
2044 | -} | ||
2045 | - | ||
2046 | -static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) | ||
2047 | -{ | ||
2048 | - stw_p((uint8_t *)(long)addr, val); | ||
2049 | - tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); | ||
2050 | -} | ||
2051 | - | ||
2052 | -static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) | ||
2053 | -{ | ||
2054 | - stl_p((uint8_t *)(long)addr, val); | ||
2055 | - tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); | ||
2056 | -} | ||
2057 | - | ||
2058 | static CPUWriteMemoryFunc *notdirty_mem_write[3] = { | 2055 | static CPUWriteMemoryFunc *notdirty_mem_write[3] = { |
2059 | notdirty_mem_writeb, | 2056 | notdirty_mem_writeb, |
2060 | notdirty_mem_writew, | 2057 | notdirty_mem_writew, |
@@ -2063,14 +2060,14 @@ static CPUWriteMemoryFunc *notdirty_mem_write[3] = { | @@ -2063,14 +2060,14 @@ static CPUWriteMemoryFunc *notdirty_mem_write[3] = { | ||
2063 | 2060 | ||
2064 | static void io_mem_init(void) | 2061 | static void io_mem_init(void) |
2065 | { | 2062 | { |
2066 | - cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL); | 2063 | + cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL); |
2067 | cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); | 2064 | cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); |
2068 | - cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL); | ||
2069 | - cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL); | 2065 | + cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL); |
2070 | io_mem_nb = 5; | 2066 | io_mem_nb = 5; |
2071 | 2067 | ||
2072 | /* alloc dirty bits array */ | 2068 | /* alloc dirty bits array */ |
2073 | phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS); | 2069 | phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS); |
2070 | + memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS); | ||
2074 | } | 2071 | } |
2075 | 2072 | ||
2076 | /* mem_read and mem_write are arrays of functions containing the | 2073 | /* mem_read and mem_write are arrays of functions containing the |
@@ -2182,7 +2179,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, | @@ -2182,7 +2179,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, | ||
2182 | } | 2179 | } |
2183 | 2180 | ||
2184 | if (is_write) { | 2181 | if (is_write) { |
2185 | - if ((pd & ~TARGET_PAGE_MASK) != 0) { | 2182 | + if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
2186 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | 2183 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
2187 | if (l >= 4 && ((addr & 3) == 0)) { | 2184 | if (l >= 4 && ((addr & 3) == 0)) { |
2188 | /* 32 bit read access */ | 2185 | /* 32 bit read access */ |
@@ -2206,14 +2203,15 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, | @@ -2206,14 +2203,15 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, | ||
2206 | /* RAM case */ | 2203 | /* RAM case */ |
2207 | ptr = phys_ram_base + addr1; | 2204 | ptr = phys_ram_base + addr1; |
2208 | memcpy(ptr, buf, l); | 2205 | memcpy(ptr, buf, l); |
2209 | - /* invalidate code */ | ||
2210 | - tb_invalidate_phys_page_range(addr1, addr1 + l, 0); | ||
2211 | - /* set dirty bit */ | ||
2212 | - phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff; | 2206 | + if (!cpu_physical_memory_is_dirty(addr1)) { |
2207 | + /* invalidate code */ | ||
2208 | + tb_invalidate_phys_page_range(addr1, addr1 + l, 0); | ||
2209 | + /* set dirty bit */ | ||
2210 | + phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff; | ||
2211 | + } | ||
2213 | } | 2212 | } |
2214 | } else { | 2213 | } else { |
2215 | - if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && | ||
2216 | - (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) { | 2214 | + if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { |
2217 | /* I/O case */ | 2215 | /* I/O case */ |
2218 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | 2216 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
2219 | if (l >= 4 && ((addr & 3) == 0)) { | 2217 | if (l >= 4 && ((addr & 3) == 0)) { |
@@ -2261,8 +2259,7 @@ uint32_t ldl_phys(target_phys_addr_t addr) | @@ -2261,8 +2259,7 @@ uint32_t ldl_phys(target_phys_addr_t addr) | ||
2261 | pd = p->phys_offset; | 2259 | pd = p->phys_offset; |
2262 | } | 2260 | } |
2263 | 2261 | ||
2264 | - if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && | ||
2265 | - (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) { | 2262 | + if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { |
2266 | /* I/O case */ | 2263 | /* I/O case */ |
2267 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | 2264 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
2268 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); | 2265 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); |
@@ -2292,7 +2289,7 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) | @@ -2292,7 +2289,7 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) | ||
2292 | pd = p->phys_offset; | 2289 | pd = p->phys_offset; |
2293 | } | 2290 | } |
2294 | 2291 | ||
2295 | - if ((pd & ~TARGET_PAGE_MASK) != 0) { | 2292 | + if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
2296 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | 2293 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
2297 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | 2294 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); |
2298 | } else { | 2295 | } else { |
@@ -2303,7 +2300,6 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) | @@ -2303,7 +2300,6 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) | ||
2303 | } | 2300 | } |
2304 | 2301 | ||
2305 | /* warning: addr must be aligned */ | 2302 | /* warning: addr must be aligned */ |
2306 | -/* XXX: optimize code invalidation test */ | ||
2307 | void stl_phys(target_phys_addr_t addr, uint32_t val) | 2303 | void stl_phys(target_phys_addr_t addr, uint32_t val) |
2308 | { | 2304 | { |
2309 | int io_index; | 2305 | int io_index; |
@@ -2318,7 +2314,7 @@ void stl_phys(target_phys_addr_t addr, uint32_t val) | @@ -2318,7 +2314,7 @@ void stl_phys(target_phys_addr_t addr, uint32_t val) | ||
2318 | pd = p->phys_offset; | 2314 | pd = p->phys_offset; |
2319 | } | 2315 | } |
2320 | 2316 | ||
2321 | - if ((pd & ~TARGET_PAGE_MASK) != 0) { | 2317 | + if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { |
2322 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | 2318 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); |
2323 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | 2319 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); |
2324 | } else { | 2320 | } else { |
@@ -2327,10 +2323,12 @@ void stl_phys(target_phys_addr_t addr, uint32_t val) | @@ -2327,10 +2323,12 @@ void stl_phys(target_phys_addr_t addr, uint32_t val) | ||
2327 | /* RAM case */ | 2323 | /* RAM case */ |
2328 | ptr = phys_ram_base + addr1; | 2324 | ptr = phys_ram_base + addr1; |
2329 | stl_p(ptr, val); | 2325 | stl_p(ptr, val); |
2330 | - /* invalidate code */ | ||
2331 | - tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); | ||
2332 | - /* set dirty bit */ | ||
2333 | - phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff; | 2326 | + if (!cpu_physical_memory_is_dirty(addr1)) { |
2327 | + /* invalidate code */ | ||
2328 | + tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); | ||
2329 | + /* set dirty bit */ | ||
2330 | + phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff; | ||
2331 | + } | ||
2334 | } | 2332 | } |
2335 | } | 2333 | } |
2336 | 2334 |