Commit 90f18422d9d54237cbec19461321a9de1011ad8c
1 parent
95293972
64 bit virtual addressing fix
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1525 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
1 changed file
with
96 additions
and
26 deletions
exec.c
| @@ -83,6 +83,8 @@ typedef struct PhysPageDesc { | @@ -83,6 +83,8 @@ typedef struct PhysPageDesc { | ||
| 83 | uint32_t phys_offset; | 83 | uint32_t phys_offset; |
| 84 | } PhysPageDesc; | 84 | } PhysPageDesc; |
| 85 | 85 | ||
| 86 | +/* Note: the VirtPage handling is absolete and will be suppressed | ||
| 87 | + ASAP */ | ||
| 86 | typedef struct VirtPageDesc { | 88 | typedef struct VirtPageDesc { |
| 87 | /* physical address of code page. It is valid only if 'valid_tag' | 89 | /* physical address of code page. It is valid only if 'valid_tag' |
| 88 | matches 'virt_valid_tag' */ | 90 | matches 'virt_valid_tag' */ |
| @@ -113,7 +115,13 @@ static PageDesc *l1_map[L1_SIZE]; | @@ -113,7 +115,13 @@ static PageDesc *l1_map[L1_SIZE]; | ||
| 113 | PhysPageDesc **l1_phys_map; | 115 | PhysPageDesc **l1_phys_map; |
| 114 | 116 | ||
| 115 | #if !defined(CONFIG_USER_ONLY) | 117 | #if !defined(CONFIG_USER_ONLY) |
| 118 | +#if TARGET_LONG_BITS > 32 | ||
| 119 | +#define VIRT_L_BITS 9 | ||
| 120 | +#define VIRT_L_SIZE (1 << VIRT_L_BITS) | ||
| 121 | +static void *l1_virt_map[VIRT_L_SIZE]; | ||
| 122 | +#else | ||
| 116 | static VirtPageDesc *l1_virt_map[L1_SIZE]; | 123 | static VirtPageDesc *l1_virt_map[L1_SIZE]; |
| 124 | +#endif | ||
| 117 | static unsigned int virt_valid_tag; | 125 | static unsigned int virt_valid_tag; |
| 118 | #endif | 126 | #endif |
| 119 | 127 | ||
| @@ -234,51 +242,113 @@ static inline PhysPageDesc *phys_page_find(unsigned int index) | @@ -234,51 +242,113 @@ static inline PhysPageDesc *phys_page_find(unsigned int index) | ||
| 234 | static void tlb_protect_code(CPUState *env, target_ulong addr); | 242 | static void tlb_protect_code(CPUState *env, target_ulong addr); |
| 235 | static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr); | 243 | static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr); |
| 236 | 244 | ||
| 237 | -static inline VirtPageDesc *virt_page_find_alloc(unsigned int index) | 245 | +static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc) |
| 238 | { | 246 | { |
| 239 | - VirtPageDesc **lp, *p; | ||
| 240 | - | ||
| 241 | - /* XXX: should not truncate for 64 bit addresses */ | ||
| 242 | #if TARGET_LONG_BITS > 32 | 247 | #if TARGET_LONG_BITS > 32 |
| 243 | - index &= (L1_SIZE - 1); | ||
| 244 | -#endif | 248 | + void **p, **lp; |
| 249 | + | ||
| 250 | + p = l1_virt_map; | ||
| 251 | + lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); | ||
| 252 | + p = *lp; | ||
| 253 | + if (!p) { | ||
| 254 | + if (!alloc) | ||
| 255 | + return NULL; | ||
| 256 | + p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE); | ||
| 257 | + *lp = p; | ||
| 258 | + } | ||
| 259 | + lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); | ||
| 260 | + p = *lp; | ||
| 261 | + if (!p) { | ||
| 262 | + if (!alloc) | ||
| 263 | + return NULL; | ||
| 264 | + p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE); | ||
| 265 | + *lp = p; | ||
| 266 | + } | ||
| 267 | + lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); | ||
| 268 | + p = *lp; | ||
| 269 | + if (!p) { | ||
| 270 | + if (!alloc) | ||
| 271 | + return NULL; | ||
| 272 | + p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE); | ||
| 273 | + *lp = p; | ||
| 274 | + } | ||
| 275 | + lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); | ||
| 276 | + p = *lp; | ||
| 277 | + if (!p) { | ||
| 278 | + if (!alloc) | ||
| 279 | + return NULL; | ||
| 280 | + p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE); | ||
| 281 | + *lp = p; | ||
| 282 | + } | ||
| 283 | + lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1)); | ||
| 284 | + p = *lp; | ||
| 285 | + if (!p) { | ||
| 286 | + if (!alloc) | ||
| 287 | + return NULL; | ||
| 288 | + p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE); | ||
| 289 | + *lp = p; | ||
| 290 | + } | ||
| 291 | + return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1)); | ||
| 292 | +#else | ||
| 293 | + VirtPageDesc *p, **lp; | ||
| 294 | + | ||
| 245 | lp = &l1_virt_map[index >> L2_BITS]; | 295 | lp = &l1_virt_map[index >> L2_BITS]; |
| 246 | p = *lp; | 296 | p = *lp; |
| 247 | if (!p) { | 297 | if (!p) { |
| 248 | /* allocate if not found */ | 298 | /* allocate if not found */ |
| 249 | - p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE); | ||
| 250 | - memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE); | 299 | + if (!alloc) |
| 300 | + return NULL; | ||
| 301 | + p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE); | ||
| 251 | *lp = p; | 302 | *lp = p; |
| 252 | } | 303 | } |
| 253 | return p + (index & (L2_SIZE - 1)); | 304 | return p + (index & (L2_SIZE - 1)); |
| 305 | +#endif | ||
| 254 | } | 306 | } |
| 255 | 307 | ||
| 256 | -static inline VirtPageDesc *virt_page_find(unsigned int index) | 308 | +static inline VirtPageDesc *virt_page_find(target_ulong index) |
| 257 | { | 309 | { |
| 258 | - VirtPageDesc *p; | 310 | + return virt_page_find_alloc(index, 0); |
| 311 | +} | ||
| 259 | 312 | ||
| 260 | - p = l1_virt_map[index >> L2_BITS]; | ||
| 261 | - if (!p) | ||
| 262 | - return 0; | ||
| 263 | - return p + (index & (L2_SIZE - 1)); | 313 | +#if TARGET_LONG_BITS > 32 |
| 314 | +static void virt_page_flush_internal(void **p, int level) | ||
| 315 | +{ | ||
| 316 | + int i; | ||
| 317 | + if (level == 0) { | ||
| 318 | + VirtPageDesc *q = (VirtPageDesc *)p; | ||
| 319 | + for(i = 0; i < VIRT_L_SIZE; i++) | ||
| 320 | + q[i].valid_tag = 0; | ||
| 321 | + } else { | ||
| 322 | + level--; | ||
| 323 | + for(i = 0; i < VIRT_L_SIZE; i++) { | ||
| 324 | + if (p[i]) | ||
| 325 | + virt_page_flush_internal(p[i], level); | ||
| 326 | + } | ||
| 327 | + } | ||
| 264 | } | 328 | } |
| 329 | +#endif | ||
| 265 | 330 | ||
| 266 | static void virt_page_flush(void) | 331 | static void virt_page_flush(void) |
| 267 | { | 332 | { |
| 268 | - int i, j; | ||
| 269 | - VirtPageDesc *p; | ||
| 270 | - | ||
| 271 | virt_valid_tag++; | 333 | virt_valid_tag++; |
| 272 | 334 | ||
| 273 | if (virt_valid_tag == 0) { | 335 | if (virt_valid_tag == 0) { |
| 274 | virt_valid_tag = 1; | 336 | virt_valid_tag = 1; |
| 275 | - for(i = 0; i < L1_SIZE; i++) { | ||
| 276 | - p = l1_virt_map[i]; | ||
| 277 | - if (p) { | ||
| 278 | - for(j = 0; j < L2_SIZE; j++) | ||
| 279 | - p[j].valid_tag = 0; | 337 | +#if TARGET_LONG_BITS > 32 |
| 338 | + virt_page_flush_internal(l1_virt_map, 5); | ||
| 339 | +#else | ||
| 340 | + { | ||
| 341 | + int i, j; | ||
| 342 | + VirtPageDesc *p; | ||
| 343 | + for(i = 0; i < L1_SIZE; i++) { | ||
| 344 | + p = l1_virt_map[i]; | ||
| 345 | + if (p) { | ||
| 346 | + for(j = 0; j < L2_SIZE; j++) | ||
| 347 | + p[j].valid_tag = 0; | ||
| 348 | + } | ||
| 280 | } | 349 | } |
| 281 | } | 350 | } |
| 351 | +#endif | ||
| 282 | } | 352 | } |
| 283 | } | 353 | } |
| 284 | #else | 354 | #else |
| @@ -945,7 +1015,7 @@ void tb_link(TranslationBlock *tb) | @@ -945,7 +1015,7 @@ void tb_link(TranslationBlock *tb) | ||
| 945 | 1015 | ||
| 946 | /* save the code memory mappings (needed to invalidate the code) */ | 1016 | /* save the code memory mappings (needed to invalidate the code) */ |
| 947 | addr = tb->pc & TARGET_PAGE_MASK; | 1017 | addr = tb->pc & TARGET_PAGE_MASK; |
| 948 | - vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS); | 1018 | + vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); |
| 949 | #ifdef DEBUG_TLB_CHECK | 1019 | #ifdef DEBUG_TLB_CHECK |
| 950 | if (vp->valid_tag == virt_valid_tag && | 1020 | if (vp->valid_tag == virt_valid_tag && |
| 951 | vp->phys_addr != tb->page_addr[0]) { | 1021 | vp->phys_addr != tb->page_addr[0]) { |
| @@ -963,7 +1033,7 @@ void tb_link(TranslationBlock *tb) | @@ -963,7 +1033,7 @@ void tb_link(TranslationBlock *tb) | ||
| 963 | 1033 | ||
| 964 | if (tb->page_addr[1] != -1) { | 1034 | if (tb->page_addr[1] != -1) { |
| 965 | addr += TARGET_PAGE_SIZE; | 1035 | addr += TARGET_PAGE_SIZE; |
| 966 | - vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS); | 1036 | + vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); |
| 967 | #ifdef DEBUG_TLB_CHECK | 1037 | #ifdef DEBUG_TLB_CHECK |
| 968 | if (vp->valid_tag == virt_valid_tag && | 1038 | if (vp->valid_tag == virt_valid_tag && |
| 969 | vp->phys_addr != tb->page_addr[1]) { | 1039 | vp->phys_addr != tb->page_addr[1]) { |
| @@ -1572,7 +1642,7 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, | @@ -1572,7 +1642,7 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, | ||
| 1572 | addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); | 1642 | addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); |
| 1573 | } | 1643 | } |
| 1574 | 1644 | ||
| 1575 | - index = (vaddr >> 12) & (CPU_TLB_SIZE - 1); | 1645 | + index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 1576 | addend -= vaddr; | 1646 | addend -= vaddr; |
| 1577 | if (prot & PAGE_READ) { | 1647 | if (prot & PAGE_READ) { |
| 1578 | env->tlb_read[is_user][index].address = address; | 1648 | env->tlb_read[is_user][index].address = address; |
| @@ -1635,7 +1705,7 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, | @@ -1635,7 +1705,7 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, | ||
| 1635 | original mapping */ | 1705 | original mapping */ |
| 1636 | VirtPageDesc *vp; | 1706 | VirtPageDesc *vp; |
| 1637 | 1707 | ||
| 1638 | - vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS); | 1708 | + vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1); |
| 1639 | vp->phys_addr = pd; | 1709 | vp->phys_addr = pd; |
| 1640 | vp->prot = prot; | 1710 | vp->prot = prot; |
| 1641 | vp->valid_tag = virt_valid_tag; | 1711 | vp->valid_tag = virt_valid_tag; |