Commit 6d16c2f88f2a866bec27c4d170ddd97ee8e41a0e
1 parent
d268de04
Add target memory mapping API (Avi Kivity)
Devices accessing large amounts of memory (as with DMA) will wish to obtain a pointer to guest memory rather than access it indirectly via cpu_physical_memory_rw(). Add a new API to convert target addresses to host pointers. In case the target address does not correspond to RAM, a bounce buffer is allocated. To prevent the guest from causing the host to allocate unbounded amounts of bounce buffer, this memory is limited (currently to one page). Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6394 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
2 changed files
with
108 additions
and
0 deletions
cpu-all.h
@@ -923,6 +923,12 @@ static inline void cpu_physical_memory_write(target_phys_addr_t addr, | @@ -923,6 +923,12 @@ static inline void cpu_physical_memory_write(target_phys_addr_t addr, | ||
923 | { | 923 | { |
924 | cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1); | 924 | cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1); |
925 | } | 925 | } |
926 | +void *cpu_physical_memory_map(target_phys_addr_t addr, | ||
927 | + target_phys_addr_t *plen, | ||
928 | + int is_write); | ||
929 | +void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, | ||
930 | + int is_write, target_phys_addr_t access_len); | ||
931 | + | ||
926 | uint32_t ldub_phys(target_phys_addr_t addr); | 932 | uint32_t ldub_phys(target_phys_addr_t addr); |
927 | uint32_t lduw_phys(target_phys_addr_t addr); | 933 | uint32_t lduw_phys(target_phys_addr_t addr); |
928 | uint32_t ldl_phys(target_phys_addr_t addr); | 934 | uint32_t ldl_phys(target_phys_addr_t addr); |
exec.c
@@ -3045,6 +3045,108 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr, | @@ -3045,6 +3045,108 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr, | ||
3045 | } | 3045 | } |
3046 | } | 3046 | } |
3047 | 3047 | ||
3048 | +typedef struct { | ||
3049 | + void *buffer; | ||
3050 | + target_phys_addr_t addr; | ||
3051 | + target_phys_addr_t len; | ||
3052 | +} BounceBuffer; | ||
3053 | + | ||
3054 | +static BounceBuffer bounce; | ||
3055 | + | ||
3056 | +/* Map a physical memory region into a host virtual address. | ||
3057 | + * May map a subset of the requested range, given by and returned in *plen. | ||
3058 | + * May return NULL if resources needed to perform the mapping are exhausted. | ||
3059 | + * Use only for reads OR writes - not for read-modify-write operations. | ||
3060 | + */ | ||
3061 | +void *cpu_physical_memory_map(target_phys_addr_t addr, | ||
3062 | + target_phys_addr_t *plen, | ||
3063 | + int is_write) | ||
3064 | +{ | ||
3065 | + target_phys_addr_t len = *plen; | ||
3066 | + target_phys_addr_t done = 0; | ||
3067 | + int l; | ||
3068 | + uint8_t *ret = NULL; | ||
3069 | + uint8_t *ptr; | ||
3070 | + target_phys_addr_t page; | ||
3071 | + unsigned long pd; | ||
3072 | + PhysPageDesc *p; | ||
3073 | + unsigned long addr1; | ||
3074 | + | ||
3075 | + while (len > 0) { | ||
3076 | + page = addr & TARGET_PAGE_MASK; | ||
3077 | + l = (page + TARGET_PAGE_SIZE) - addr; | ||
3078 | + if (l > len) | ||
3079 | + l = len; | ||
3080 | + p = phys_page_find(page >> TARGET_PAGE_BITS); | ||
3081 | + if (!p) { | ||
3082 | + pd = IO_MEM_UNASSIGNED; | ||
3083 | + } else { | ||
3084 | + pd = p->phys_offset; | ||
3085 | + } | ||
3086 | + | ||
3087 | + if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { | ||
3088 | + if (done || bounce.buffer) { | ||
3089 | + break; | ||
3090 | + } | ||
3091 | + bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); | ||
3092 | + bounce.addr = addr; | ||
3093 | + bounce.len = l; | ||
3094 | + if (!is_write) { | ||
3095 | + cpu_physical_memory_rw(addr, bounce.buffer, l, 0); | ||
3096 | + } | ||
3097 | + ptr = bounce.buffer; | ||
3098 | + } else { | ||
3099 | + addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | ||
3100 | + ptr = phys_ram_base + addr1; | ||
3101 | + } | ||
3102 | + if (!done) { | ||
3103 | + ret = ptr; | ||
3104 | + } else if (ret + done != ptr) { | ||
3105 | + break; | ||
3106 | + } | ||
3107 | + | ||
3108 | + len -= l; | ||
3109 | + addr += l; | ||
3110 | + done += l; | ||
3111 | + } | ||
3112 | + *plen = done; | ||
3113 | + return ret; | ||
3114 | +} | ||
3115 | + | ||
3116 | +/* Unmaps a memory region previously mapped by cpu_physical_memory_map(). | ||
3117 | + * Will also mark the memory as dirty if is_write == 1. access_len gives | ||
3118 | + * the amount of memory that was actually read or written by the caller. | ||
3119 | + */ | ||
3120 | +void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, | ||
3121 | + int is_write, target_phys_addr_t access_len) | ||
3122 | +{ | ||
3123 | + if (buffer != bounce.buffer) { | ||
3124 | + if (is_write) { | ||
3125 | + unsigned long addr1 = (uint8_t *)buffer - phys_ram_base; | ||
3126 | + while (access_len) { | ||
3127 | + unsigned l; | ||
3128 | + l = TARGET_PAGE_SIZE; | ||
3129 | + if (l > access_len) | ||
3130 | + l = access_len; | ||
3131 | + if (!cpu_physical_memory_is_dirty(addr1)) { | ||
3132 | + /* invalidate code */ | ||
3133 | + tb_invalidate_phys_page_range(addr1, addr1 + l, 0); | ||
3134 | + /* set dirty bit */ | ||
3135 | + phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= | ||
3136 | + (0xff & ~CODE_DIRTY_FLAG); | ||
3137 | + } | ||
3138 | + addr1 += l; | ||
3139 | + access_len -= l; | ||
3140 | + } | ||
3141 | + } | ||
3142 | + return; | ||
3143 | + } | ||
3144 | + if (is_write) { | ||
3145 | + cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len); | ||
3146 | + } | ||
3147 | + qemu_free(bounce.buffer); | ||
3148 | + bounce.buffer = NULL; | ||
3149 | +} | ||
3048 | 3150 | ||
3049 | /* warning: addr must be aligned */ | 3151 | /* warning: addr must be aligned */ |
3050 | uint32_t ldl_phys(target_phys_addr_t addr) | 3152 | uint32_t ldl_phys(target_phys_addr_t addr) |