Commit 6fa4cea9e8e904f7aac0c3d4f73a883c9e1e53bd
1 parent
876d4b07
Infrastructure to support more than 2 MMU modes.
Add example for Alpha and PowerPC hypervisor mode. git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@2596 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
2 changed files
with
48 additions
and
1 deletions
cpu-defs.h
@@ -108,6 +108,15 @@ typedef struct CPUTLBEntry { | @@ -108,6 +108,15 @@ typedef struct CPUTLBEntry { | ||
108 | target_phys_addr_t addend; | 108 | target_phys_addr_t addend; |
109 | } CPUTLBEntry; | 109 | } CPUTLBEntry; |
110 | 110 | ||
111 | +/* Alpha has 4 different running levels */ | ||
112 | +#if defined(TARGET_ALPHA) | ||
113 | +#define NB_MMU_MODES 4 | ||
114 | +#elif defined(TARGET_PPC64H) /* PowerPC 64 with hypervisor mode support */ | ||
115 | +#define NB_MMU_MODES 3 | ||
116 | +#else | ||
117 | +#define NB_MMU_MODES 2 | ||
118 | +#endif | ||
119 | + | ||
111 | #define CPU_COMMON \ | 120 | #define CPU_COMMON \ |
112 | struct TranslationBlock *current_tb; /* currently executing TB */ \ | 121 | struct TranslationBlock *current_tb; /* currently executing TB */ \ |
113 | /* soft mmu support */ \ | 122 | /* soft mmu support */ \ |
@@ -119,7 +128,7 @@ typedef struct CPUTLBEntry { | @@ -119,7 +128,7 @@ typedef struct CPUTLBEntry { | ||
119 | target_ulong mem_write_vaddr; /* target virtual addr at which the \ | 128 | target_ulong mem_write_vaddr; /* target virtual addr at which the \ |
120 | memory was written */ \ | 129 | memory was written */ \ |
121 | /* 0 = kernel, 1 = user */ \ | 130 | /* 0 = kernel, 1 = user */ \ |
122 | - CPUTLBEntry tlb_table[2][CPU_TLB_SIZE]; \ | 131 | + CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ |
123 | struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ | 132 | struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ |
124 | \ | 133 | \ |
125 | /* from this point: preserved by CPU reset */ \ | 134 | /* from this point: preserved by CPU reset */ \ |
exec.c
@@ -1300,6 +1300,16 @@ void tlb_flush(CPUState *env, int flush_global) | @@ -1300,6 +1300,16 @@ void tlb_flush(CPUState *env, int flush_global) | ||
1300 | env->tlb_table[1][i].addr_read = -1; | 1300 | env->tlb_table[1][i].addr_read = -1; |
1301 | env->tlb_table[1][i].addr_write = -1; | 1301 | env->tlb_table[1][i].addr_write = -1; |
1302 | env->tlb_table[1][i].addr_code = -1; | 1302 | env->tlb_table[1][i].addr_code = -1; |
1303 | +#if (NB_MMU_MODES >= 3) | ||
1304 | + env->tlb_table[2][i].addr_read = -1; | ||
1305 | + env->tlb_table[2][i].addr_write = -1; | ||
1306 | + env->tlb_table[2][i].addr_code = -1; | ||
1307 | +#if (NB_MMU_MODES == 4) | ||
1308 | + env->tlb_table[3][i].addr_read = -1; | ||
1309 | + env->tlb_table[3][i].addr_write = -1; | ||
1310 | + env->tlb_table[3][i].addr_code = -1; | ||
1311 | +#endif | ||
1312 | +#endif | ||
1303 | } | 1313 | } |
1304 | 1314 | ||
1305 | memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); | 1315 | memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); |
@@ -1345,6 +1355,12 @@ void tlb_flush_page(CPUState *env, target_ulong addr) | @@ -1345,6 +1355,12 @@ void tlb_flush_page(CPUState *env, target_ulong addr) | ||
1345 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | 1355 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
1346 | tlb_flush_entry(&env->tlb_table[0][i], addr); | 1356 | tlb_flush_entry(&env->tlb_table[0][i], addr); |
1347 | tlb_flush_entry(&env->tlb_table[1][i], addr); | 1357 | tlb_flush_entry(&env->tlb_table[1][i], addr); |
1358 | +#if (NB_MMU_MODES >= 3) | ||
1359 | + tlb_flush_entry(&env->tlb_table[2][i], addr); | ||
1360 | +#if (NB_MMU_MODES == 4) | ||
1361 | + tlb_flush_entry(&env->tlb_table[3][i], addr); | ||
1362 | +#endif | ||
1363 | +#endif | ||
1348 | 1364 | ||
1349 | /* Discard jump cache entries for any tb which might potentially | 1365 | /* Discard jump cache entries for any tb which might potentially |
1350 | overlap the flushed page. */ | 1366 | overlap the flushed page. */ |
@@ -1434,6 +1450,14 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, | @@ -1434,6 +1450,14 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, | ||
1434 | tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); | 1450 | tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); |
1435 | for(i = 0; i < CPU_TLB_SIZE; i++) | 1451 | for(i = 0; i < CPU_TLB_SIZE; i++) |
1436 | tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); | 1452 | tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); |
1453 | +#if (NB_MMU_MODES >= 3) | ||
1454 | + for(i = 0; i < CPU_TLB_SIZE; i++) | ||
1455 | + tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length); | ||
1456 | +#if (NB_MMU_MODES == 4) | ||
1457 | + for(i = 0; i < CPU_TLB_SIZE; i++) | ||
1458 | + tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length); | ||
1459 | +#endif | ||
1460 | +#endif | ||
1437 | } | 1461 | } |
1438 | 1462 | ||
1439 | #if !defined(CONFIG_SOFTMMU) | 1463 | #if !defined(CONFIG_SOFTMMU) |
@@ -1486,6 +1510,14 @@ void cpu_tlb_update_dirty(CPUState *env) | @@ -1486,6 +1510,14 @@ void cpu_tlb_update_dirty(CPUState *env) | ||
1486 | tlb_update_dirty(&env->tlb_table[0][i]); | 1510 | tlb_update_dirty(&env->tlb_table[0][i]); |
1487 | for(i = 0; i < CPU_TLB_SIZE; i++) | 1511 | for(i = 0; i < CPU_TLB_SIZE; i++) |
1488 | tlb_update_dirty(&env->tlb_table[1][i]); | 1512 | tlb_update_dirty(&env->tlb_table[1][i]); |
1513 | +#if (NB_MMU_MODES >= 3) | ||
1514 | + for(i = 0; i < CPU_TLB_SIZE; i++) | ||
1515 | + tlb_update_dirty(&env->tlb_table[2][i]); | ||
1516 | +#if (NB_MMU_MODES == 4) | ||
1517 | + for(i = 0; i < CPU_TLB_SIZE; i++) | ||
1518 | + tlb_update_dirty(&env->tlb_table[3][i]); | ||
1519 | +#endif | ||
1520 | +#endif | ||
1489 | } | 1521 | } |
1490 | 1522 | ||
1491 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, | 1523 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, |
@@ -1511,6 +1543,12 @@ static inline void tlb_set_dirty(CPUState *env, | @@ -1511,6 +1543,12 @@ static inline void tlb_set_dirty(CPUState *env, | ||
1511 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | 1543 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
1512 | tlb_set_dirty1(&env->tlb_table[0][i], addr); | 1544 | tlb_set_dirty1(&env->tlb_table[0][i], addr); |
1513 | tlb_set_dirty1(&env->tlb_table[1][i], addr); | 1545 | tlb_set_dirty1(&env->tlb_table[1][i], addr); |
1546 | +#if (NB_MMU_MODES >= 3) | ||
1547 | + tlb_set_dirty1(&env->tlb_table[2][i], addr); | ||
1548 | +#if (NB_MMU_MODES == 4) | ||
1549 | + tlb_set_dirty1(&env->tlb_table[3][i], addr); | ||
1550 | +#endif | ||
1551 | +#endif | ||
1514 | } | 1552 | } |
1515 | 1553 | ||
1516 | /* add a new TLB entry. At most one entry for a given virtual address | 1554 | /* add a new TLB entry. At most one entry for a given virtual address |