Commit cfde4bd93100c58c0bfaed76deefb144caac488f
Committed by
Anthony Liguori
1 parent
77d4db01
exec.c: remove unnecessary #if NB_MMU_MODES
remove unnecessary #if NB_MMU_MODES by using loop. Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp> Acked-by: Edgar E. Iglesias <edgar.iglesias@gmail.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Showing
1 changed file
with
23 additions
and
76 deletions
exec.c
... | ... | @@ -1762,28 +1762,12 @@ void tlb_flush(CPUState *env, int flush_global) |
1762 | 1762 | env->current_tb = NULL; |
1763 | 1763 | |
1764 | 1764 | for(i = 0; i < CPU_TLB_SIZE; i++) { |
1765 | - env->tlb_table[0][i].addr_read = -1; | |
1766 | - env->tlb_table[0][i].addr_write = -1; | |
1767 | - env->tlb_table[0][i].addr_code = -1; | |
1768 | - env->tlb_table[1][i].addr_read = -1; | |
1769 | - env->tlb_table[1][i].addr_write = -1; | |
1770 | - env->tlb_table[1][i].addr_code = -1; | |
1771 | -#if (NB_MMU_MODES >= 3) | |
1772 | - env->tlb_table[2][i].addr_read = -1; | |
1773 | - env->tlb_table[2][i].addr_write = -1; | |
1774 | - env->tlb_table[2][i].addr_code = -1; | |
1775 | -#endif | |
1776 | -#if (NB_MMU_MODES >= 4) | |
1777 | - env->tlb_table[3][i].addr_read = -1; | |
1778 | - env->tlb_table[3][i].addr_write = -1; | |
1779 | - env->tlb_table[3][i].addr_code = -1; | |
1780 | -#endif | |
1781 | -#if (NB_MMU_MODES >= 5) | |
1782 | - env->tlb_table[4][i].addr_read = -1; | |
1783 | - env->tlb_table[4][i].addr_write = -1; | |
1784 | - env->tlb_table[4][i].addr_code = -1; | |
1785 | -#endif | |
1786 | - | |
1765 | + int mmu_idx; | |
1766 | + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
1767 | + env->tlb_table[mmu_idx][i].addr_read = -1; | |
1768 | + env->tlb_table[mmu_idx][i].addr_write = -1; | |
1769 | + env->tlb_table[mmu_idx][i].addr_code = -1; | |
1770 | + } | |
1787 | 1771 | } |
1788 | 1772 | |
1789 | 1773 | memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
... | ... | @@ -1813,6 +1797,7 @@ static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) |
1813 | 1797 | void tlb_flush_page(CPUState *env, target_ulong addr) |
1814 | 1798 | { |
1815 | 1799 | int i; |
1800 | + int mmu_idx; | |
1816 | 1801 | |
1817 | 1802 | #if defined(DEBUG_TLB) |
1818 | 1803 | printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); |
... | ... | @@ -1823,17 +1808,8 @@ void tlb_flush_page(CPUState *env, target_ulong addr) |
1823 | 1808 | |
1824 | 1809 | addr &= TARGET_PAGE_MASK; |
1825 | 1810 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
1826 | - tlb_flush_entry(&env->tlb_table[0][i], addr); | |
1827 | - tlb_flush_entry(&env->tlb_table[1][i], addr); | |
1828 | -#if (NB_MMU_MODES >= 3) | |
1829 | - tlb_flush_entry(&env->tlb_table[2][i], addr); | |
1830 | -#endif | |
1831 | -#if (NB_MMU_MODES >= 4) | |
1832 | - tlb_flush_entry(&env->tlb_table[3][i], addr); | |
1833 | -#endif | |
1834 | -#if (NB_MMU_MODES >= 5) | |
1835 | - tlb_flush_entry(&env->tlb_table[4][i], addr); | |
1836 | -#endif | |
1811 | + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) | |
1812 | + tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); | |
1837 | 1813 | |
1838 | 1814 | tlb_flush_jmp_cache(env, addr); |
1839 | 1815 | |
... | ... | @@ -1917,22 +1893,12 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, |
1917 | 1893 | } |
1918 | 1894 | |
1919 | 1895 | for(env = first_cpu; env != NULL; env = env->next_cpu) { |
1920 | - for(i = 0; i < CPU_TLB_SIZE; i++) | |
1921 | - tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); | |
1922 | - for(i = 0; i < CPU_TLB_SIZE; i++) | |
1923 | - tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); | |
1924 | -#if (NB_MMU_MODES >= 3) | |
1925 | - for(i = 0; i < CPU_TLB_SIZE; i++) | |
1926 | - tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length); | |
1927 | -#endif | |
1928 | -#if (NB_MMU_MODES >= 4) | |
1929 | - for(i = 0; i < CPU_TLB_SIZE; i++) | |
1930 | - tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length); | |
1931 | -#endif | |
1932 | -#if (NB_MMU_MODES >= 5) | |
1933 | - for(i = 0; i < CPU_TLB_SIZE; i++) | |
1934 | - tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length); | |
1935 | -#endif | |
1896 | + int mmu_idx; | |
1897 | + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
1898 | + for(i = 0; i < CPU_TLB_SIZE; i++) | |
1899 | + tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], | |
1900 | + start1, length); | |
1901 | + } | |
1936 | 1902 | } |
1937 | 1903 | } |
1938 | 1904 | |
... | ... | @@ -1979,22 +1945,11 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) |
1979 | 1945 | void cpu_tlb_update_dirty(CPUState *env) |
1980 | 1946 | { |
1981 | 1947 | int i; |
1982 | - for(i = 0; i < CPU_TLB_SIZE; i++) | |
1983 | - tlb_update_dirty(&env->tlb_table[0][i]); | |
1984 | - for(i = 0; i < CPU_TLB_SIZE; i++) | |
1985 | - tlb_update_dirty(&env->tlb_table[1][i]); | |
1986 | -#if (NB_MMU_MODES >= 3) | |
1987 | - for(i = 0; i < CPU_TLB_SIZE; i++) | |
1988 | - tlb_update_dirty(&env->tlb_table[2][i]); | |
1989 | -#endif | |
1990 | -#if (NB_MMU_MODES >= 4) | |
1991 | - for(i = 0; i < CPU_TLB_SIZE; i++) | |
1992 | - tlb_update_dirty(&env->tlb_table[3][i]); | |
1993 | -#endif | |
1994 | -#if (NB_MMU_MODES >= 5) | |
1995 | - for(i = 0; i < CPU_TLB_SIZE; i++) | |
1996 | - tlb_update_dirty(&env->tlb_table[4][i]); | |
1997 | -#endif | |
1948 | + int mmu_idx; | |
1949 | + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | |
1950 | + for(i = 0; i < CPU_TLB_SIZE; i++) | |
1951 | + tlb_update_dirty(&env->tlb_table[mmu_idx][i]); | |
1952 | + } | |
1998 | 1953 | } |
1999 | 1954 | |
2000 | 1955 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) |
... | ... | @@ -2008,20 +1963,12 @@ static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) |
2008 | 1963 | static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr) |
2009 | 1964 | { |
2010 | 1965 | int i; |
1966 | + int mmu_idx; | |
2011 | 1967 | |
2012 | 1968 | vaddr &= TARGET_PAGE_MASK; |
2013 | 1969 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
2014 | - tlb_set_dirty1(&env->tlb_table[0][i], vaddr); | |
2015 | - tlb_set_dirty1(&env->tlb_table[1][i], vaddr); | |
2016 | -#if (NB_MMU_MODES >= 3) | |
2017 | - tlb_set_dirty1(&env->tlb_table[2][i], vaddr); | |
2018 | -#endif | |
2019 | -#if (NB_MMU_MODES >= 4) | |
2020 | - tlb_set_dirty1(&env->tlb_table[3][i], vaddr); | |
2021 | -#endif | |
2022 | -#if (NB_MMU_MODES >= 5) | |
2023 | - tlb_set_dirty1(&env->tlb_table[4][i], vaddr); | |
2024 | -#endif | |
1970 | + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) | |
1971 | + tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); | |
2025 | 1972 | } |
2026 | 1973 | |
2027 | 1974 | /* add a new TLB entry. At most one entry for a given virtual address | ... | ... |