Commit 5579c7f37ebba03f6732b4353cc7dc9fa497b7c2

Authored by pbrook
1 parent 31fc12df

Remove code phys_ram_base uses.

Signed-off-by: Paul Brook <paul@codesourcery.com>


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@7085 c046a42c-6fe2-441c-8c8c-71466251a162
cpu-all.h
@@ -908,6 +908,9 @@ ram_addr_t qemu_ram_alloc(ram_addr_t); @@ -908,6 +908,9 @@ ram_addr_t qemu_ram_alloc(ram_addr_t);
908 void qemu_ram_free(ram_addr_t addr); 908 void qemu_ram_free(ram_addr_t addr);
909 /* This should only be used for ram local to a device. */ 909 /* This should only be used for ram local to a device. */
910 void *qemu_get_ram_ptr(ram_addr_t addr); 910 void *qemu_get_ram_ptr(ram_addr_t addr);
  911 +/* This should not be used by devices. */
  912 +ram_addr_t qemu_ram_addr_from_host(void *ptr);
  913 +
911 int cpu_register_io_memory(int io_index, 914 int cpu_register_io_memory(int io_index,
912 CPUReadMemoryFunc **mem_read, 915 CPUReadMemoryFunc **mem_read,
913 CPUWriteMemoryFunc **mem_write, 916 CPUWriteMemoryFunc **mem_write,
exec-all.h
@@ -316,6 +316,7 @@ static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr) @@ -316,6 +316,7 @@ static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
316 static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr) 316 static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
317 { 317 {
318 int mmu_idx, page_index, pd; 318 int mmu_idx, page_index, pd;
  319 + void *p;
319 320
320 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 321 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
321 mmu_idx = cpu_mmu_index(env1); 322 mmu_idx = cpu_mmu_index(env1);
@@ -331,7 +332,9 @@ static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr) @@ -331,7 +332,9 @@ static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
331 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); 332 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
332 #endif 333 #endif
333 } 334 }
334 - return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base; 335 + p = (void *)(unsigned long)addr
  336 + + env1->tlb_table[mmu_idx][page_index].addend;
  337 + return qemu_ram_addr_from_host(p);
335 } 338 }
336 339
337 /* Deterministic execution requires that IO only be performed on the last 340 /* Deterministic execution requires that IO only be performed on the last
@@ -1835,6 +1835,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, @@ -1835,6 +1835,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1835 } 1835 }
1836 } 1836 }
1837 1837
  1838 +/* Note: start and end must be within the same ram block. */
1838 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, 1839 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1839 int dirty_flags) 1840 int dirty_flags)
1840 { 1841 {
@@ -1869,7 +1870,14 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, @@ -1869,7 +1870,14 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1869 1870
1870 /* we modify the TLB cache so that the dirty bit will be set again 1871 /* we modify the TLB cache so that the dirty bit will be set again
1871 when accessing the range */ 1872 when accessing the range */
1872 - start1 = start + (unsigned long)phys_ram_base; 1873 + start1 = (unsigned long)qemu_get_ram_ptr(start);
  1874 + /* Chek that we don't span multiple blocks - this breaks the
  1875 + address comparisons below. */
  1876 + if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
  1877 + != (end - 1) - start) {
  1878 + abort();
  1879 + }
  1880 +
1873 for(env = first_cpu; env != NULL; env = env->next_cpu) { 1881 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1874 for(i = 0; i < CPU_TLB_SIZE; i++) 1882 for(i = 0; i < CPU_TLB_SIZE; i++)
1875 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); 1883 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
@@ -1910,10 +1918,12 @@ void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_a @@ -1910,10 +1918,12 @@ void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_a
1910 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) 1918 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1911 { 1919 {
1912 ram_addr_t ram_addr; 1920 ram_addr_t ram_addr;
  1921 + void *p;
1913 1922
1914 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { 1923 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1915 - ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +  
1916 - tlb_entry->addend - (unsigned long)phys_ram_base; 1924 + p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
  1925 + + tlb_entry->addend);
  1926 + ram_addr = qemu_ram_addr_from_host(p);
1917 if (!cpu_physical_memory_is_dirty(ram_addr)) { 1927 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1918 tlb_entry->addr_write |= TLB_NOTDIRTY; 1928 tlb_entry->addr_write |= TLB_NOTDIRTY;
1919 } 1929 }
@@ -2005,7 +2015,7 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, @@ -2005,7 +2015,7 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2005 /* IO memory case (romd handled later) */ 2015 /* IO memory case (romd handled later) */
2006 address |= TLB_MMIO; 2016 address |= TLB_MMIO;
2007 } 2017 }
2008 - addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); 2018 + addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2009 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) { 2019 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2010 /* Normal RAM. */ 2020 /* Normal RAM. */
2011 iotlb = pd & TARGET_PAGE_MASK; 2021 iotlb = pd & TARGET_PAGE_MASK;
@@ -2428,13 +2438,25 @@ void qemu_ram_free(ram_addr_t addr) @@ -2428,13 +2438,25 @@ void qemu_ram_free(ram_addr_t addr)
2428 } 2438 }
2429 2439
2430 /* Return a host pointer to ram allocated with qemu_ram_alloc. 2440 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2431 - This may only be used if you actually allocated the ram, and  
2432 - aready know how but the ram block is. */ 2441 + With the exception of the softmmu code in this file, this should
  2442 + only be used for local memory (e.g. video ram) that the device owns,
  2443 + and knows it isn't going to access beyond the end of the block.
  2444 +
  2445 + It should not be used for general purpose DMA.
  2446 + Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
  2447 + */
2433 void *qemu_get_ram_ptr(ram_addr_t addr) 2448 void *qemu_get_ram_ptr(ram_addr_t addr)
2434 { 2449 {
2435 return phys_ram_base + addr; 2450 return phys_ram_base + addr;
2436 } 2451 }
2437 2452
  2453 +/* Some of the softmmu routines need to translate from a host pointer
  2454 + (typically a TLB entry) back to a ram offset. */
  2455 +ram_addr_t qemu_ram_addr_from_host(void *ptr)
  2456 +{
  2457 + return (uint8_t *)ptr - phys_ram_base;
  2458 +}
  2459 +
2438 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) 2460 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2439 { 2461 {
2440 #ifdef DEBUG_UNASSIGNED 2462 #ifdef DEBUG_UNASSIGNED
@@ -2521,7 +2543,7 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr, @@ -2521,7 +2543,7 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2521 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2543 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2522 #endif 2544 #endif
2523 } 2545 }
2524 - stb_p(phys_ram_base + ram_addr, val); 2546 + stb_p(qemu_get_ram_ptr(ram_addr), val);
2525 #ifdef USE_KQEMU 2547 #ifdef USE_KQEMU
2526 if (cpu_single_env->kqemu_enabled && 2548 if (cpu_single_env->kqemu_enabled &&
2527 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) 2549 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
@@ -2546,7 +2568,7 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr, @@ -2546,7 +2568,7 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2546 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2568 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2547 #endif 2569 #endif
2548 } 2570 }
2549 - stw_p(phys_ram_base + ram_addr, val); 2571 + stw_p(qemu_get_ram_ptr(ram_addr), val);
2550 #ifdef USE_KQEMU 2572 #ifdef USE_KQEMU
2551 if (cpu_single_env->kqemu_enabled && 2573 if (cpu_single_env->kqemu_enabled &&
2552 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) 2574 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
@@ -2571,7 +2593,7 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr, @@ -2571,7 +2593,7 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2571 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2593 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2572 #endif 2594 #endif
2573 } 2595 }
2574 - stl_p(phys_ram_base + ram_addr, val); 2596 + stl_p(qemu_get_ram_ptr(ram_addr), val);
2575 #ifdef USE_KQEMU 2597 #ifdef USE_KQEMU
2576 if (cpu_single_env->kqemu_enabled && 2598 if (cpu_single_env->kqemu_enabled &&
2577 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) 2599 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
@@ -3030,7 +3052,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, @@ -3030,7 +3052,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3030 unsigned long addr1; 3052 unsigned long addr1;
3031 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3053 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3032 /* RAM case */ 3054 /* RAM case */
3033 - ptr = phys_ram_base + addr1; 3055 + ptr = qemu_get_ram_ptr(addr1);
3034 memcpy(ptr, buf, l); 3056 memcpy(ptr, buf, l);
3035 if (!cpu_physical_memory_is_dirty(addr1)) { 3057 if (!cpu_physical_memory_is_dirty(addr1)) {
3036 /* invalidate code */ 3058 /* invalidate code */
@@ -3066,7 +3088,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, @@ -3066,7 +3088,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3066 } 3088 }
3067 } else { 3089 } else {
3068 /* RAM case */ 3090 /* RAM case */
3069 - ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 3091 + ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3070 (addr & ~TARGET_PAGE_MASK); 3092 (addr & ~TARGET_PAGE_MASK);
3071 memcpy(buf, ptr, l); 3093 memcpy(buf, ptr, l);
3072 } 3094 }
@@ -3107,7 +3129,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr, @@ -3107,7 +3129,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3107 unsigned long addr1; 3129 unsigned long addr1;
3108 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3130 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3109 /* ROM/RAM case */ 3131 /* ROM/RAM case */
3110 - ptr = phys_ram_base + addr1; 3132 + ptr = qemu_get_ram_ptr(addr1);
3111 memcpy(ptr, buf, l); 3133 memcpy(ptr, buf, l);
3112 } 3134 }
3113 len -= l; 3135 len -= l;
@@ -3207,7 +3229,7 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, @@ -3207,7 +3229,7 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
3207 ptr = bounce.buffer; 3229 ptr = bounce.buffer;
3208 } else { 3230 } else {
3209 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3231 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3210 - ptr = phys_ram_base + addr1; 3232 + ptr = qemu_get_ram_ptr(addr1);
3211 } 3233 }
3212 if (!done) { 3234 if (!done) {
3213 ret = ptr; 3235 ret = ptr;
@@ -3232,7 +3254,7 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, @@ -3232,7 +3254,7 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3232 { 3254 {
3233 if (buffer != bounce.buffer) { 3255 if (buffer != bounce.buffer) {
3234 if (is_write) { 3256 if (is_write) {
3235 - unsigned long addr1 = (uint8_t *)buffer - phys_ram_base; 3257 + ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3236 while (access_len) { 3258 while (access_len) {
3237 unsigned l; 3259 unsigned l;
3238 l = TARGET_PAGE_SIZE; 3260 l = TARGET_PAGE_SIZE;
@@ -3284,7 +3306,7 @@ uint32_t ldl_phys(target_phys_addr_t addr) @@ -3284,7 +3306,7 @@ uint32_t ldl_phys(target_phys_addr_t addr)
3284 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); 3306 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3285 } else { 3307 } else {
3286 /* RAM case */ 3308 /* RAM case */
3287 - ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 3309 + ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3288 (addr & ~TARGET_PAGE_MASK); 3310 (addr & ~TARGET_PAGE_MASK);
3289 val = ldl_p(ptr); 3311 val = ldl_p(ptr);
3290 } 3312 }
@@ -3322,7 +3344,7 @@ uint64_t ldq_phys(target_phys_addr_t addr) @@ -3322,7 +3344,7 @@ uint64_t ldq_phys(target_phys_addr_t addr)
3322 #endif 3344 #endif
3323 } else { 3345 } else {
3324 /* RAM case */ 3346 /* RAM case */
3325 - ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 3347 + ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3326 (addr & ~TARGET_PAGE_MASK); 3348 (addr & ~TARGET_PAGE_MASK);
3327 val = ldq_p(ptr); 3349 val = ldq_p(ptr);
3328 } 3350 }
@@ -3369,7 +3391,7 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) @@ -3369,7 +3391,7 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3369 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 3391 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3370 } else { 3392 } else {
3371 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3393 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3372 - ptr = phys_ram_base + addr1; 3394 + ptr = qemu_get_ram_ptr(addr1);
3373 stl_p(ptr, val); 3395 stl_p(ptr, val);
3374 3396
3375 if (unlikely(in_migration)) { 3397 if (unlikely(in_migration)) {
@@ -3410,7 +3432,7 @@ void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val) @@ -3410,7 +3432,7 @@ void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3410 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32); 3432 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3411 #endif 3433 #endif
3412 } else { 3434 } else {
3413 - ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 3435 + ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3414 (addr & ~TARGET_PAGE_MASK); 3436 (addr & ~TARGET_PAGE_MASK);
3415 stq_p(ptr, val); 3437 stq_p(ptr, val);
3416 } 3438 }
@@ -3440,7 +3462,7 @@ void stl_phys(target_phys_addr_t addr, uint32_t val) @@ -3440,7 +3462,7 @@ void stl_phys(target_phys_addr_t addr, uint32_t val)
3440 unsigned long addr1; 3462 unsigned long addr1;
3441 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3463 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3442 /* RAM case */ 3464 /* RAM case */
3443 - ptr = phys_ram_base + addr1; 3465 + ptr = qemu_get_ram_ptr(addr1);
3444 stl_p(ptr, val); 3466 stl_p(ptr, val);
3445 if (!cpu_physical_memory_is_dirty(addr1)) { 3467 if (!cpu_physical_memory_is_dirty(addr1)) {
3446 /* invalidate code */ 3468 /* invalidate code */
kvm-all.c
@@ -101,7 +101,7 @@ static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) @@ -101,7 +101,7 @@ static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
101 mem.slot = slot->slot; 101 mem.slot = slot->slot;
102 mem.guest_phys_addr = slot->start_addr; 102 mem.guest_phys_addr = slot->start_addr;
103 mem.memory_size = slot->memory_size; 103 mem.memory_size = slot->memory_size;
104 - mem.userspace_addr = (unsigned long)phys_ram_base + slot->phys_offset; 104 + mem.userspace_addr = (unsigned long)qemu_get_ram_ptr(slot->phys_offset);
105 mem.flags = slot->flags; 105 mem.flags = slot->flags;
106 106
107 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); 107 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
@@ -329,7 +329,7 @@ int kvm_init(int smp_cpus) @@ -329,7 +329,7 @@ int kvm_init(int smp_cpus)
329 329
330 /* initially, KVM allocated its own memory and we had to jump through 330 /* initially, KVM allocated its own memory and we had to jump through
331 * hooks to make phys_ram_base point to this. Modern versions of KVM 331 * hooks to make phys_ram_base point to this. Modern versions of KVM
332 - * just use a user allocated buffer so we can use phys_ram_base 332 + * just use a user allocated buffer so we can use regular pages
333 * unmodified. Make sure we have a sufficiently modern version of KVM. 333 * unmodified. Make sure we have a sufficiently modern version of KVM.
334 */ 334 */
335 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY); 335 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
@@ -3097,7 +3097,7 @@ static int ram_load_v1(QEMUFile *f, void *opaque) @@ -3097,7 +3097,7 @@ static int ram_load_v1(QEMUFile *f, void *opaque)
3097 if (qemu_get_be32(f) != phys_ram_size) 3097 if (qemu_get_be32(f) != phys_ram_size)
3098 return -EINVAL; 3098 return -EINVAL;
3099 for(i = 0; i < phys_ram_size; i+= TARGET_PAGE_SIZE) { 3099 for(i = 0; i < phys_ram_size; i+= TARGET_PAGE_SIZE) {
3100 - ret = ram_get_page(f, phys_ram_base + i, TARGET_PAGE_SIZE); 3100 + ret = ram_get_page(f, qemu_get_ram_ptr(i), TARGET_PAGE_SIZE);
3101 if (ret) 3101 if (ret)
3102 return ret; 3102 return ret;
3103 } 3103 }
@@ -3184,20 +3184,20 @@ static int ram_save_block(QEMUFile *f) @@ -3184,20 +3184,20 @@ static int ram_save_block(QEMUFile *f)
3184 3184
3185 while (addr < phys_ram_size) { 3185 while (addr < phys_ram_size) {
3186 if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) { 3186 if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) {
3187 - uint8_t ch; 3187 + uint8_t *p;
3188 3188
3189 cpu_physical_memory_reset_dirty(current_addr, 3189 cpu_physical_memory_reset_dirty(current_addr,
3190 current_addr + TARGET_PAGE_SIZE, 3190 current_addr + TARGET_PAGE_SIZE,
3191 MIGRATION_DIRTY_FLAG); 3191 MIGRATION_DIRTY_FLAG);
3192 3192
3193 - ch = *(phys_ram_base + current_addr); 3193 + p = qemu_get_ram_ptr(current_addr);
3194 3194
3195 - if (is_dup_page(phys_ram_base + current_addr, ch)) { 3195 + if (is_dup_page(p, *p)) {
3196 qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_COMPRESS); 3196 qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_COMPRESS);
3197 - qemu_put_byte(f, ch); 3197 + qemu_put_byte(f, *p);
3198 } else { 3198 } else {
3199 qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_PAGE); 3199 qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_PAGE);
3200 - qemu_put_buffer(f, phys_ram_base + current_addr, TARGET_PAGE_SIZE); 3200 + qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
3201 } 3201 }
3202 3202
3203 found = 1; 3203 found = 1;
@@ -3278,7 +3278,8 @@ static int ram_load_dead(QEMUFile *f, void *opaque) @@ -3278,7 +3278,8 @@ static int ram_load_dead(QEMUFile *f, void *opaque)
3278 goto error; 3278 goto error;
3279 } 3279 }
3280 if (buf[0] == 0) { 3280 if (buf[0] == 0) {
3281 - if (ram_decompress_buf(s, phys_ram_base + i, BDRV_HASH_BLOCK_SIZE) < 0) { 3281 + if (ram_decompress_buf(s, qemu_get_ram_ptr(i),
  3282 + BDRV_HASH_BLOCK_SIZE) < 0) {
3282 fprintf(stderr, "Error while reading ram block address=0x%08" PRIx64, (uint64_t)i); 3283 fprintf(stderr, "Error while reading ram block address=0x%08" PRIx64, (uint64_t)i);
3283 goto error; 3284 goto error;
3284 } 3285 }
@@ -3328,9 +3329,9 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) @@ -3328,9 +3329,9 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
3328 3329
3329 if (flags & RAM_SAVE_FLAG_COMPRESS) { 3330 if (flags & RAM_SAVE_FLAG_COMPRESS) {
3330 uint8_t ch = qemu_get_byte(f); 3331 uint8_t ch = qemu_get_byte(f);
3331 - memset(phys_ram_base + addr, ch, TARGET_PAGE_SIZE); 3332 + memset(qemu_get_ram_ptr(addr), ch, TARGET_PAGE_SIZE);
3332 } else if (flags & RAM_SAVE_FLAG_PAGE) 3333 } else if (flags & RAM_SAVE_FLAG_PAGE)
3333 - qemu_get_buffer(f, phys_ram_base + addr, TARGET_PAGE_SIZE); 3334 + qemu_get_buffer(f, qemu_get_ram_ptr(addr), TARGET_PAGE_SIZE);
3334 } while (!(flags & RAM_SAVE_FLAG_EOS)); 3335 } while (!(flags & RAM_SAVE_FLAG_EOS));
3335 3336
3336 return 0; 3337 return 0;