Commit 59817ccb2c220732d3dc282b47a74faa1c4d06ce
1 parent
bf3e8bf1
use qemu memory allocation - added dirty bit support when using host MMU
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@619 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
1 changed file
with
98 additions
and
35 deletions
exec.c
| ... | ... | @@ -143,7 +143,7 @@ static inline PageDesc *page_find_alloc(unsigned int index) |
| 143 | 143 | p = *lp; |
| 144 | 144 | if (!p) { |
| 145 | 145 | /* allocate if not found */ |
| 146 | - p = malloc(sizeof(PageDesc) * L2_SIZE); | |
| 146 | + p = qemu_malloc(sizeof(PageDesc) * L2_SIZE); | |
| 147 | 147 | memset(p, 0, sizeof(PageDesc) * L2_SIZE); |
| 148 | 148 | *lp = p; |
| 149 | 149 | } |
| ... | ... | @@ -173,7 +173,7 @@ static inline VirtPageDesc *virt_page_find_alloc(unsigned int index) |
| 173 | 173 | p = *lp; |
| 174 | 174 | if (!p) { |
| 175 | 175 | /* allocate if not found */ |
| 176 | - p = malloc(sizeof(VirtPageDesc) * L2_SIZE); | |
| 176 | + p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE); | |
| 177 | 177 | memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE); |
| 178 | 178 | *lp = p; |
| 179 | 179 | } |
| ... | ... | @@ -226,7 +226,7 @@ void cpu_exec_init(void) |
| 226 | 226 | static inline void invalidate_page_bitmap(PageDesc *p) |
| 227 | 227 | { |
| 228 | 228 | if (p->code_bitmap) { |
| 229 | - free(p->code_bitmap); | |
| 229 | + qemu_free(p->code_bitmap); | |
| 230 | 230 | p->code_bitmap = NULL; |
| 231 | 231 | } |
| 232 | 232 | p->code_write_count = 0; |
| ... | ... | @@ -406,7 +406,7 @@ static inline void tb_invalidate(TranslationBlock *tb) |
| 406 | 406 | TranslationBlock *tb1, *tb2, **ptb; |
| 407 | 407 | |
| 408 | 408 | tb_invalidated_flag = 1; |
| 409 | - | |
| 409 | + | |
| 410 | 410 | /* remove the TB from the hash list */ |
| 411 | 411 | h = tb_hash_func(tb->pc); |
| 412 | 412 | ptb = &tb_hash[h]; |
| ... | ... | @@ -501,7 +501,7 @@ static void build_page_bitmap(PageDesc *p) |
| 501 | 501 | int n, tb_start, tb_end; |
| 502 | 502 | TranslationBlock *tb; |
| 503 | 503 | |
| 504 | - p->code_bitmap = malloc(TARGET_PAGE_SIZE / 8); | |
| 504 | + p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8); | |
| 505 | 505 | if (!p->code_bitmap) |
| 506 | 506 | return; |
| 507 | 507 | memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8); |
| ... | ... | @@ -585,7 +585,13 @@ static inline void tb_invalidate_phys_page_fast(target_ulong start, int len, tar |
| 585 | 585 | { |
| 586 | 586 | PageDesc *p; |
| 587 | 587 | int offset, b; |
| 588 | - | |
| 588 | +#if 0 | |
| 589 | + if (cpu_single_env->cr[0] & CR0_PE_MASK) { | |
| 590 | + printf("modifying code at 0x%x size=%d EIP=%x\n", | |
| 591 | + (vaddr & TARGET_PAGE_MASK) | (start & ~TARGET_PAGE_MASK), len, | |
| 592 | + cpu_single_env->eip); | |
| 593 | + } | |
| 594 | +#endif | |
| 589 | 595 | p = page_find(start >> TARGET_PAGE_BITS); |
| 590 | 596 | if (!p) |
| 591 | 597 | return; |
| ... | ... | @@ -775,7 +781,12 @@ void tb_link(TranslationBlock *tb) |
| 775 | 781 | } |
| 776 | 782 | #endif |
| 777 | 783 | vp->phys_addr = tb->page_addr[0]; |
| 778 | - vp->valid_tag = virt_valid_tag; | |
| 784 | + if (vp->valid_tag != virt_valid_tag) { | |
| 785 | + vp->valid_tag = virt_valid_tag; | |
| 786 | +#if !defined(CONFIG_SOFTMMU) | |
| 787 | + vp->prot = 0; | |
| 788 | +#endif | |
| 789 | + } | |
| 779 | 790 | |
| 780 | 791 | if (tb->page_addr[1] != -1) { |
| 781 | 792 | addr += TARGET_PAGE_SIZE; |
| ... | ... | @@ -788,7 +799,12 @@ void tb_link(TranslationBlock *tb) |
| 788 | 799 | } |
| 789 | 800 | #endif |
| 790 | 801 | vp->phys_addr = tb->page_addr[1]; |
| 791 | - vp->valid_tag = virt_valid_tag; | |
| 802 | + if (vp->valid_tag != virt_valid_tag) { | |
| 803 | + vp->valid_tag = virt_valid_tag; | |
| 804 | +#if !defined(CONFIG_SOFTMMU) | |
| 805 | + vp->prot = 0; | |
| 806 | +#endif | |
| 807 | + } | |
| 792 | 808 | } |
| 793 | 809 | } |
| 794 | 810 | #endif |
| ... | ... | @@ -971,7 +987,7 @@ void cpu_interrupt(CPUState *env, int mask) |
| 971 | 987 | { |
| 972 | 988 | TranslationBlock *tb; |
| 973 | 989 | static int interrupt_lock; |
| 974 | - | |
| 990 | + | |
| 975 | 991 | env->interrupt_request |= mask; |
| 976 | 992 | /* if the cpu is currently executing code, we must unlink it and |
| 977 | 993 | all the potentially executing TB */ |
| ... | ... | @@ -1172,7 +1188,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, |
| 1172 | 1188 | void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end) |
| 1173 | 1189 | { |
| 1174 | 1190 | CPUState *env; |
| 1175 | - target_ulong length; | |
| 1191 | + target_ulong length, start1; | |
| 1176 | 1192 | int i; |
| 1177 | 1193 | |
| 1178 | 1194 | start &= TARGET_PAGE_MASK; |
| ... | ... | @@ -1186,11 +1202,39 @@ void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end) |
| 1186 | 1202 | env = cpu_single_env; |
| 1187 | 1203 | /* we modify the TLB cache so that the dirty bit will be set again |
| 1188 | 1204 | when accessing the range */ |
| 1189 | - start += (unsigned long)phys_ram_base; | |
| 1205 | + start1 = start + (unsigned long)phys_ram_base; | |
| 1190 | 1206 | for(i = 0; i < CPU_TLB_SIZE; i++) |
| 1191 | - tlb_reset_dirty_range(&env->tlb_write[0][i], start, length); | |
| 1207 | + tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length); | |
| 1192 | 1208 | for(i = 0; i < CPU_TLB_SIZE; i++) |
| 1193 | - tlb_reset_dirty_range(&env->tlb_write[1][i], start, length); | |
| 1209 | + tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length); | |
| 1210 | + | |
| 1211 | +#if !defined(CONFIG_SOFTMMU) | |
| 1212 | + /* XXX: this is expensive */ | |
| 1213 | + { | |
| 1214 | + VirtPageDesc *p; | |
| 1215 | + int j; | |
| 1216 | + target_ulong addr; | |
| 1217 | + | |
| 1218 | + for(i = 0; i < L1_SIZE; i++) { | |
| 1219 | + p = l1_virt_map[i]; | |
| 1220 | + if (p) { | |
| 1221 | + addr = i << (TARGET_PAGE_BITS + L2_BITS); | |
| 1222 | + for(j = 0; j < L2_SIZE; j++) { | |
| 1223 | + if (p->valid_tag == virt_valid_tag && | |
| 1224 | + p->phys_addr >= start && p->phys_addr < end && | |
| 1225 | + (p->prot & PROT_WRITE)) { | |
| 1226 | + if (addr < MMAP_AREA_END) { | |
| 1227 | + mprotect((void *)addr, TARGET_PAGE_SIZE, | |
| 1228 | + p->prot & ~PROT_WRITE); | |
| 1229 | + } | |
| 1230 | + } | |
| 1231 | + addr += TARGET_PAGE_SIZE; | |
| 1232 | + p++; | |
| 1233 | + } | |
| 1234 | + } | |
| 1235 | + } | |
| 1236 | + } | |
| 1237 | +#endif | |
| 1194 | 1238 | } |
| 1195 | 1239 | |
| 1196 | 1240 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, |
| ... | ... | @@ -1220,8 +1264,10 @@ static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr) |
| 1220 | 1264 | tlb_set_dirty1(&env->tlb_write[1][i], addr); |
| 1221 | 1265 | } |
| 1222 | 1266 | |
| 1223 | -/* add a new TLB entry. At most one entry for a given virtual | |
| 1224 | - address is permitted. */ | |
| 1267 | +/* add a new TLB entry. At most one entry for a given virtual address | |
| 1268 | + is permitted. Return 0 if OK or 2 if the page could not be mapped | |
| 1269 | + (can only happen in non SOFTMMU mode for I/O pages or pages | |
| 1270 | + conflicting with the host address space). */ | |
| 1225 | 1271 | int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot, |
| 1226 | 1272 | int is_user, int is_softmmu) |
| 1227 | 1273 | { |
| ... | ... | @@ -1301,25 +1347,33 @@ int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot, |
| 1301 | 1347 | ret = 2; |
| 1302 | 1348 | } else { |
| 1303 | 1349 | void *map_addr; |
| 1304 | - if (prot & PROT_WRITE) { | |
| 1305 | - if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || first_tb) { | |
| 1306 | - /* ROM: we do as if code was inside */ | |
| 1307 | - /* if code is present, we only map as read only and save the | |
| 1308 | - original mapping */ | |
| 1309 | - VirtPageDesc *vp; | |
| 1310 | - | |
| 1311 | - vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS); | |
| 1312 | - vp->phys_addr = pd; | |
| 1313 | - vp->prot = prot; | |
| 1314 | - vp->valid_tag = virt_valid_tag; | |
| 1315 | - prot &= ~PAGE_WRITE; | |
| 1350 | + | |
| 1351 | + if (vaddr >= MMAP_AREA_END) { | |
| 1352 | + ret = 2; | |
| 1353 | + } else { | |
| 1354 | + if (prot & PROT_WRITE) { | |
| 1355 | + if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || | |
| 1356 | + first_tb || | |
| 1357 | + ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && | |
| 1358 | + !cpu_physical_memory_is_dirty(pd))) { | |
| 1359 | + /* ROM: we do as if code was inside */ | |
| 1360 | + /* if code is present, we only map as read only and save the | |
| 1361 | + original mapping */ | |
| 1362 | + VirtPageDesc *vp; | |
| 1363 | + | |
| 1364 | + vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS); | |
| 1365 | + vp->phys_addr = pd; | |
| 1366 | + vp->prot = prot; | |
| 1367 | + vp->valid_tag = virt_valid_tag; | |
| 1368 | + prot &= ~PAGE_WRITE; | |
| 1369 | + } | |
| 1370 | + } | |
| 1371 | + map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, | |
| 1372 | + MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); | |
| 1373 | + if (map_addr == MAP_FAILED) { | |
| 1374 | + cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", | |
| 1375 | + paddr, vaddr); | |
| 1316 | 1376 | } |
| 1317 | - } | |
| 1318 | - map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, | |
| 1319 | - MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); | |
| 1320 | - if (map_addr == MAP_FAILED) { | |
| 1321 | - cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", | |
| 1322 | - paddr, vaddr); | |
| 1323 | 1377 | } |
| 1324 | 1378 | } |
| 1325 | 1379 | } |
| ... | ... | @@ -1338,6 +1392,10 @@ int page_unprotect(unsigned long addr) |
| 1338 | 1392 | printf("page_unprotect: addr=0x%08x\n", addr); |
| 1339 | 1393 | #endif |
| 1340 | 1394 | addr &= TARGET_PAGE_MASK; |
| 1395 | + | |
| 1396 | + /* if it is not mapped, no need to worry here */ | |
| 1397 | + if (addr >= MMAP_AREA_END) | |
| 1398 | + return 0; | |
| 1341 | 1399 | vp = virt_page_find(addr >> TARGET_PAGE_BITS); |
| 1342 | 1400 | if (!vp) |
| 1343 | 1401 | return 0; |
| ... | ... | @@ -1351,8 +1409,13 @@ int page_unprotect(unsigned long addr) |
| 1351 | 1409 | printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", |
| 1352 | 1410 | addr, vp->phys_addr, vp->prot); |
| 1353 | 1411 | #endif |
| 1412 | + /* set the dirty bit */ | |
| 1413 | + phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1; | |
| 1414 | + /* flush the code inside */ | |
| 1354 | 1415 | tb_invalidate_phys_page(vp->phys_addr); |
| 1355 | - mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot); | |
| 1416 | + if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0) | |
| 1417 | + cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n", | |
| 1418 | + (unsigned long)addr, vp->prot); | |
| 1356 | 1419 | return 1; |
| 1357 | 1420 | #else |
| 1358 | 1421 | return 0; |
| ... | ... | @@ -1642,7 +1705,7 @@ static void io_mem_init(void) |
| 1642 | 1705 | io_mem_nb = 5; |
| 1643 | 1706 | |
| 1644 | 1707 | /* alloc dirty bits array */ |
| 1645 | - phys_ram_dirty = malloc(phys_ram_size >> TARGET_PAGE_BITS); | |
| 1708 | + phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS); | |
| 1646 | 1709 | } |
| 1647 | 1710 | |
| 1648 | 1711 | /* mem_read and mem_write are arrays of functions containing the | ... | ... |