Commit 61382a500a9e54ef96ca28e0f221151f569cbb6e
1 parent
3a51dee6
full softmmu support
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@410 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
10 changed files
with
594 additions
and
339 deletions
cpu-all.h
| ... | ... | @@ -20,18 +20,19 @@ |
| 20 | 20 | #ifndef CPU_ALL_H |
| 21 | 21 | #define CPU_ALL_H |
| 22 | 22 | |
| 23 | -/* all CPU memory access use these macros */ | |
| 24 | -static inline int ldub(void *ptr) | |
| 23 | +/* CPU memory access without any memory or io remapping */ | |
| 24 | + | |
| 25 | +static inline int ldub_raw(void *ptr) | |
| 25 | 26 | { |
| 26 | 27 | return *(uint8_t *)ptr; |
| 27 | 28 | } |
| 28 | 29 | |
| 29 | -static inline int ldsb(void *ptr) | |
| 30 | +static inline int ldsb_raw(void *ptr) | |
| 30 | 31 | { |
| 31 | 32 | return *(int8_t *)ptr; |
| 32 | 33 | } |
| 33 | 34 | |
| 34 | -static inline void stb(void *ptr, int v) | |
| 35 | +static inline void stb_raw(void *ptr, int v) | |
| 35 | 36 | { |
| 36 | 37 | *(uint8_t *)ptr = v; |
| 37 | 38 | } |
| ... | ... | @@ -42,7 +43,7 @@ static inline void stb(void *ptr, int v) |
| 42 | 43 | #if defined(WORDS_BIGENDIAN) || defined(__arm__) |
| 43 | 44 | |
| 44 | 45 | /* conservative code for little endian unaligned accesses */ |
| 45 | -static inline int lduw(void *ptr) | |
| 46 | +static inline int lduw_raw(void *ptr) | |
| 46 | 47 | { |
| 47 | 48 | #ifdef __powerpc__ |
| 48 | 49 | int val; |
| ... | ... | @@ -54,7 +55,7 @@ static inline int lduw(void *ptr) |
| 54 | 55 | #endif |
| 55 | 56 | } |
| 56 | 57 | |
| 57 | -static inline int ldsw(void *ptr) | |
| 58 | +static inline int ldsw_raw(void *ptr) | |
| 58 | 59 | { |
| 59 | 60 | #ifdef __powerpc__ |
| 60 | 61 | int val; |
| ... | ... | @@ -66,7 +67,7 @@ static inline int ldsw(void *ptr) |
| 66 | 67 | #endif |
| 67 | 68 | } |
| 68 | 69 | |
| 69 | -static inline int ldl(void *ptr) | |
| 70 | +static inline int ldl_raw(void *ptr) | |
| 70 | 71 | { |
| 71 | 72 | #ifdef __powerpc__ |
| 72 | 73 | int val; |
| ... | ... | @@ -78,16 +79,16 @@ static inline int ldl(void *ptr) |
| 78 | 79 | #endif |
| 79 | 80 | } |
| 80 | 81 | |
| 81 | -static inline uint64_t ldq(void *ptr) | |
| 82 | +static inline uint64_t ldq_raw(void *ptr) | |
| 82 | 83 | { |
| 83 | 84 | uint8_t *p = ptr; |
| 84 | 85 | uint32_t v1, v2; |
| 85 | - v1 = ldl(p); | |
| 86 | - v2 = ldl(p + 4); | |
| 86 | + v1 = ldl_raw(p); | |
| 87 | + v2 = ldl_raw(p + 4); | |
| 87 | 88 | return v1 | ((uint64_t)v2 << 32); |
| 88 | 89 | } |
| 89 | 90 | |
| 90 | -static inline void stw(void *ptr, int v) | |
| 91 | +static inline void stw_raw(void *ptr, int v) | |
| 91 | 92 | { |
| 92 | 93 | #ifdef __powerpc__ |
| 93 | 94 | __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); |
| ... | ... | @@ -98,7 +99,7 @@ static inline void stw(void *ptr, int v) |
| 98 | 99 | #endif |
| 99 | 100 | } |
| 100 | 101 | |
| 101 | -static inline void stl(void *ptr, int v) | |
| 102 | +static inline void stl_raw(void *ptr, int v) | |
| 102 | 103 | { |
| 103 | 104 | #ifdef __powerpc__ |
| 104 | 105 | __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); |
| ... | ... | @@ -111,104 +112,104 @@ static inline void stl(void *ptr, int v) |
| 111 | 112 | #endif |
| 112 | 113 | } |
| 113 | 114 | |
| 114 | -static inline void stq(void *ptr, uint64_t v) | |
| 115 | +static inline void stq_raw(void *ptr, uint64_t v) | |
| 115 | 116 | { |
| 116 | 117 | uint8_t *p = ptr; |
| 117 | - stl(p, (uint32_t)v); | |
| 118 | - stl(p + 4, v >> 32); | |
| 118 | + stl_raw(p, (uint32_t)v); | |
| 119 | + stl_raw(p + 4, v >> 32); | |
| 119 | 120 | } |
| 120 | 121 | |
| 121 | 122 | /* float access */ |
| 122 | 123 | |
| 123 | -static inline float ldfl(void *ptr) | |
| 124 | +static inline float ldfl_raw(void *ptr) | |
| 124 | 125 | { |
| 125 | 126 | union { |
| 126 | 127 | float f; |
| 127 | 128 | uint32_t i; |
| 128 | 129 | } u; |
| 129 | - u.i = ldl(ptr); | |
| 130 | + u.i = ldl_raw(ptr); | |
| 130 | 131 | return u.f; |
| 131 | 132 | } |
| 132 | 133 | |
| 133 | -static inline void stfl(void *ptr, float v) | |
| 134 | +static inline void stfl_raw(void *ptr, float v) | |
| 134 | 135 | { |
| 135 | 136 | union { |
| 136 | 137 | float f; |
| 137 | 138 | uint32_t i; |
| 138 | 139 | } u; |
| 139 | 140 | u.f = v; |
| 140 | - stl(ptr, u.i); | |
| 141 | + stl_raw(ptr, u.i); | |
| 141 | 142 | } |
| 142 | 143 | |
| 143 | 144 | |
| 144 | 145 | #if defined(__arm__) && !defined(WORDS_BIGENDIAN) |
| 145 | 146 | |
| 146 | 147 | /* NOTE: arm is horrible as double 32 bit words are stored in big endian ! */ |
| 147 | -static inline double ldfq(void *ptr) | |
| 148 | +static inline double ldfq_raw(void *ptr) | |
| 148 | 149 | { |
| 149 | 150 | union { |
| 150 | 151 | double d; |
| 151 | 152 | uint32_t tab[2]; |
| 152 | 153 | } u; |
| 153 | - u.tab[1] = ldl(ptr); | |
| 154 | - u.tab[0] = ldl(ptr + 4); | |
| 154 | + u.tab[1] = ldl_raw(ptr); | |
| 155 | + u.tab[0] = ldl_raw(ptr + 4); | |
| 155 | 156 | return u.d; |
| 156 | 157 | } |
| 157 | 158 | |
| 158 | -static inline void stfq(void *ptr, double v) | |
| 159 | +static inline void stfq_raw(void *ptr, double v) | |
| 159 | 160 | { |
| 160 | 161 | union { |
| 161 | 162 | double d; |
| 162 | 163 | uint32_t tab[2]; |
| 163 | 164 | } u; |
| 164 | 165 | u.d = v; |
| 165 | - stl(ptr, u.tab[1]); | |
| 166 | - stl(ptr + 4, u.tab[0]); | |
| 166 | + stl_raw(ptr, u.tab[1]); | |
| 167 | + stl_raw(ptr + 4, u.tab[0]); | |
| 167 | 168 | } |
| 168 | 169 | |
| 169 | 170 | #else |
| 170 | -static inline double ldfq(void *ptr) | |
| 171 | +static inline double ldfq_raw(void *ptr) | |
| 171 | 172 | { |
| 172 | 173 | union { |
| 173 | 174 | double d; |
| 174 | 175 | uint64_t i; |
| 175 | 176 | } u; |
| 176 | - u.i = ldq(ptr); | |
| 177 | + u.i = ldq_raw(ptr); | |
| 177 | 178 | return u.d; |
| 178 | 179 | } |
| 179 | 180 | |
| 180 | -static inline void stfq(void *ptr, double v) | |
| 181 | +static inline void stfq_raw(void *ptr, double v) | |
| 181 | 182 | { |
| 182 | 183 | union { |
| 183 | 184 | double d; |
| 184 | 185 | uint64_t i; |
| 185 | 186 | } u; |
| 186 | 187 | u.d = v; |
| 187 | - stq(ptr, u.i); | |
| 188 | + stq_raw(ptr, u.i); | |
| 188 | 189 | } |
| 189 | 190 | #endif |
| 190 | 191 | |
| 191 | 192 | #elif defined(TARGET_WORDS_BIGENDIAN) && !defined(WORDS_BIGENDIAN) |
| 192 | 193 | |
| 193 | -static inline int lduw(void *ptr) | |
| 194 | +static inline int lduw_raw(void *ptr) | |
| 194 | 195 | { |
| 195 | 196 | uint8_t *b = (uint8_t *) ptr; |
| 196 | 197 | return (b[0]<<8|b[1]); |
| 197 | 198 | } |
| 198 | 199 | |
| 199 | -static inline int ldsw(void *ptr) | |
| 200 | +static inline int ldsw_raw(void *ptr) | |
| 200 | 201 | { |
| 201 | 202 | int8_t *b = (int8_t *) ptr; |
| 202 | 203 | return (b[0]<<8|b[1]); |
| 203 | 204 | } |
| 204 | 205 | |
| 205 | -static inline int ldl(void *ptr) | |
| 206 | +static inline int ldl_raw(void *ptr) | |
| 206 | 207 | { |
| 207 | 208 | uint8_t *b = (uint8_t *) ptr; |
| 208 | 209 | return (b[0]<<24|b[1]<<16|b[2]<<8|b[3]); |
| 209 | 210 | } |
| 210 | 211 | |
| 211 | -static inline uint64_t ldq(void *ptr) | |
| 212 | +static inline uint64_t ldq_raw(void *ptr) | |
| 212 | 213 | { |
| 213 | 214 | uint32_t a,b; |
| 214 | 215 | a = ldl (ptr); |
| ... | ... | @@ -216,14 +217,14 @@ static inline uint64_t ldq(void *ptr) |
| 216 | 217 | return (((uint64_t)a<<32)|b); |
| 217 | 218 | } |
| 218 | 219 | |
| 219 | -static inline void stw(void *ptr, int v) | |
| 220 | +static inline void stw_raw(void *ptr, int v) | |
| 220 | 221 | { |
| 221 | 222 | uint8_t *d = (uint8_t *) ptr; |
| 222 | 223 | d[0] = v >> 8; |
| 223 | 224 | d[1] = v; |
| 224 | 225 | } |
| 225 | 226 | |
| 226 | -static inline void stl(void *ptr, int v) | |
| 227 | +static inline void stl_raw(void *ptr, int v) | |
| 227 | 228 | { |
| 228 | 229 | uint8_t *d = (uint8_t *) ptr; |
| 229 | 230 | d[0] = v >> 24; |
| ... | ... | @@ -232,7 +233,7 @@ static inline void stl(void *ptr, int v) |
| 232 | 233 | d[3] = v; |
| 233 | 234 | } |
| 234 | 235 | |
| 235 | -static inline void stq(void *ptr, uint64_t v) | |
| 236 | +static inline void stq_raw(void *ptr, uint64_t v) | |
| 236 | 237 | { |
| 237 | 238 | stl (ptr, v); |
| 238 | 239 | stl (ptr+4, v >> 32); |
| ... | ... | @@ -240,64 +241,102 @@ static inline void stq(void *ptr, uint64_t v) |
| 240 | 241 | |
| 241 | 242 | #else |
| 242 | 243 | |
| 243 | -static inline int lduw(void *ptr) | |
| 244 | +static inline int lduw_raw(void *ptr) | |
| 244 | 245 | { |
| 245 | 246 | return *(uint16_t *)ptr; |
| 246 | 247 | } |
| 247 | 248 | |
| 248 | -static inline int ldsw(void *ptr) | |
| 249 | +static inline int ldsw_raw(void *ptr) | |
| 249 | 250 | { |
| 250 | 251 | return *(int16_t *)ptr; |
| 251 | 252 | } |
| 252 | 253 | |
| 253 | -static inline int ldl(void *ptr) | |
| 254 | +static inline int ldl_raw(void *ptr) | |
| 254 | 255 | { |
| 255 | 256 | return *(uint32_t *)ptr; |
| 256 | 257 | } |
| 257 | 258 | |
| 258 | -static inline uint64_t ldq(void *ptr) | |
| 259 | +static inline uint64_t ldq_raw(void *ptr) | |
| 259 | 260 | { |
| 260 | 261 | return *(uint64_t *)ptr; |
| 261 | 262 | } |
| 262 | 263 | |
| 263 | -static inline void stw(void *ptr, int v) | |
| 264 | +static inline void stw_raw(void *ptr, int v) | |
| 264 | 265 | { |
| 265 | 266 | *(uint16_t *)ptr = v; |
| 266 | 267 | } |
| 267 | 268 | |
| 268 | -static inline void stl(void *ptr, int v) | |
| 269 | +static inline void stl_raw(void *ptr, int v) | |
| 269 | 270 | { |
| 270 | 271 | *(uint32_t *)ptr = v; |
| 271 | 272 | } |
| 272 | 273 | |
| 273 | -static inline void stq(void *ptr, uint64_t v) | |
| 274 | +static inline void stq_raw(void *ptr, uint64_t v) | |
| 274 | 275 | { |
| 275 | 276 | *(uint64_t *)ptr = v; |
| 276 | 277 | } |
| 277 | 278 | |
| 278 | 279 | /* float access */ |
| 279 | 280 | |
| 280 | -static inline float ldfl(void *ptr) | |
| 281 | +static inline float ldfl_raw(void *ptr) | |
| 281 | 282 | { |
| 282 | 283 | return *(float *)ptr; |
| 283 | 284 | } |
| 284 | 285 | |
| 285 | -static inline double ldfq(void *ptr) | |
| 286 | +static inline double ldfq_raw(void *ptr) | |
| 286 | 287 | { |
| 287 | 288 | return *(double *)ptr; |
| 288 | 289 | } |
| 289 | 290 | |
| 290 | -static inline void stfl(void *ptr, float v) | |
| 291 | +static inline void stfl_raw(void *ptr, float v) | |
| 291 | 292 | { |
| 292 | 293 | *(float *)ptr = v; |
| 293 | 294 | } |
| 294 | 295 | |
| 295 | -static inline void stfq(void *ptr, double v) | |
| 296 | +static inline void stfq_raw(void *ptr, double v) | |
| 296 | 297 | { |
| 297 | 298 | *(double *)ptr = v; |
| 298 | 299 | } |
| 299 | 300 | #endif |
| 300 | 301 | |
| 302 | +/* MMU memory access macros */ | |
| 303 | + | |
| 304 | +#if defined(CONFIG_USER_ONLY) | |
| 305 | + | |
| 306 | +/* if user mode, no other memory access functions */ | |
| 307 | +#define ldub(p) ldub_raw(p) | |
| 308 | +#define ldsb(p) ldsb_raw(p) | |
| 309 | +#define lduw(p) lduw_raw(p) | |
| 310 | +#define ldsw(p) ldsw_raw(p) | |
| 311 | +#define ldl(p) ldl_raw(p) | |
| 312 | +#define ldq(p) ldq_raw(p) | |
| 313 | +#define ldfl(p) ldfl_raw(p) | |
| 314 | +#define ldfq(p) ldfq_raw(p) | |
| 315 | +#define stb(p, v) stb_raw(p, v) | |
| 316 | +#define stw(p, v) stw_raw(p, v) | |
| 317 | +#define stl(p, v) stl_raw(p, v) | |
| 318 | +#define stq(p, v) stq_raw(p, v) | |
| 319 | +#define stfl(p, v) stfl_raw(p, v) | |
| 320 | +#define stfq(p, v) stfq_raw(p, v) | |
| 321 | + | |
| 322 | +#define ldub_code(p) ldub_raw(p) | |
| 323 | +#define ldsb_code(p) ldsb_raw(p) | |
| 324 | +#define lduw_code(p) lduw_raw(p) | |
| 325 | +#define ldsw_code(p) ldsw_raw(p) | |
| 326 | +#define ldl_code(p) ldl_raw(p) | |
| 327 | + | |
| 328 | +#define ldub_kernel(p) ldub_raw(p) | |
| 329 | +#define ldsb_kernel(p) ldsb_raw(p) | |
| 330 | +#define lduw_kernel(p) lduw_raw(p) | |
| 331 | +#define ldsw_kernel(p) ldsw_raw(p) | |
| 332 | +#define ldl_kernel(p) ldl_raw(p) | |
| 333 | +#define stb_kernel(p, v) stb_raw(p, v) | |
| 334 | +#define stw_kernel(p, v) stw_raw(p, v) | |
| 335 | +#define stl_kernel(p, v) stl_raw(p, v) | |
| 336 | +#define stq_kernel(p, v) stq_raw(p, v) | |
| 337 | + | |
| 338 | +#endif /* defined(CONFIG_USER_ONLY) */ | |
| 339 | + | |
| 301 | 340 | /* page related stuff */ |
| 302 | 341 | |
| 303 | 342 | #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) | ... | ... |
exec.c
| ... | ... | @@ -444,16 +444,20 @@ static inline void tb_alloc_page(TranslationBlock *tb, unsigned int page_index) |
| 444 | 444 | prot = 0; |
| 445 | 445 | for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) |
| 446 | 446 | prot |= page_get_flags(addr); |
| 447 | +#if !defined(CONFIG_SOFTMMU) | |
| 447 | 448 | mprotect((void *)host_start, host_page_size, |
| 448 | 449 | (prot & PAGE_BITS) & ~PAGE_WRITE); |
| 450 | +#endif | |
| 451 | +#if !defined(CONFIG_USER_ONLY) | |
| 452 | + /* suppress soft TLB */ | |
| 453 | + /* XXX: must flush on all processor with same address space */ | |
| 454 | + tlb_flush_page_write(cpu_single_env, host_start); | |
| 455 | +#endif | |
| 449 | 456 | #ifdef DEBUG_TB_INVALIDATE |
| 450 | 457 | printf("protecting code page: 0x%08lx\n", |
| 451 | 458 | host_start); |
| 452 | 459 | #endif |
| 453 | 460 | p->flags &= ~PAGE_WRITE; |
| 454 | -#ifdef DEBUG_TB_CHECK | |
| 455 | - tb_page_check(); | |
| 456 | -#endif | |
| 457 | 461 | } |
| 458 | 462 | } |
| 459 | 463 | |
| ... | ... | @@ -483,6 +487,9 @@ void tb_link(TranslationBlock *tb) |
| 483 | 487 | if (page_index2 != page_index1) { |
| 484 | 488 | tb_alloc_page(tb, page_index2); |
| 485 | 489 | } |
| 490 | +#ifdef DEBUG_TB_CHECK | |
| 491 | + tb_page_check(); | |
| 492 | +#endif | |
| 486 | 493 | tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
| 487 | 494 | tb->jmp_next[0] = NULL; |
| 488 | 495 | tb->jmp_next[1] = NULL; |
| ... | ... | @@ -517,20 +524,23 @@ int page_unprotect(unsigned long address) |
| 517 | 524 | /* if the page was really writable, then we change its |
| 518 | 525 | protection back to writable */ |
| 519 | 526 | if (prot & PAGE_WRITE_ORG) { |
| 520 | - mprotect((void *)host_start, host_page_size, | |
| 521 | - (prot & PAGE_BITS) | PAGE_WRITE); | |
| 522 | 527 | pindex = (address - host_start) >> TARGET_PAGE_BITS; |
| 523 | - p1[pindex].flags |= PAGE_WRITE; | |
| 524 | - /* and since the content will be modified, we must invalidate | |
| 525 | - the corresponding translated code. */ | |
| 526 | - tb_invalidate_page(address); | |
| 528 | + if (!(p1[pindex].flags & PAGE_WRITE)) { | |
| 529 | +#if !defined(CONFIG_SOFTMMU) | |
| 530 | + mprotect((void *)host_start, host_page_size, | |
| 531 | + (prot & PAGE_BITS) | PAGE_WRITE); | |
| 532 | +#endif | |
| 533 | + p1[pindex].flags |= PAGE_WRITE; | |
| 534 | + /* and since the content will be modified, we must invalidate | |
| 535 | + the corresponding translated code. */ | |
| 536 | + tb_invalidate_page(address); | |
| 527 | 537 | #ifdef DEBUG_TB_CHECK |
| 528 | - tb_invalidate_check(address); | |
| 538 | + tb_invalidate_check(address); | |
| 529 | 539 | #endif |
| 530 | - return 1; | |
| 531 | - } else { | |
| 532 | - return 0; | |
| 540 | + return 1; | |
| 541 | + } | |
| 533 | 542 | } |
| 543 | + return 0; | |
| 534 | 544 | } |
| 535 | 545 | |
| 536 | 546 | /* call this function when system calls directly modify a memory area */ |
| ... | ... | @@ -734,13 +744,17 @@ void cpu_abort(CPUState *env, const char *fmt, ...) |
| 734 | 744 | /* unmap all maped pages and flush all associated code */ |
| 735 | 745 | void page_unmap(void) |
| 736 | 746 | { |
| 737 | - PageDesc *p, *pmap; | |
| 738 | - unsigned long addr; | |
| 739 | - int i, j, ret, j1; | |
| 747 | + PageDesc *pmap; | |
| 748 | + int i; | |
| 740 | 749 | |
| 741 | 750 | for(i = 0; i < L1_SIZE; i++) { |
| 742 | 751 | pmap = l1_map[i]; |
| 743 | 752 | if (pmap) { |
| 753 | +#if !defined(CONFIG_SOFTMMU) | |
| 754 | + PageDesc *p; | |
| 755 | + unsigned long addr; | |
| 756 | + int j, ret, j1; | |
| 757 | + | |
| 744 | 758 | p = pmap; |
| 745 | 759 | for(j = 0;j < L2_SIZE;) { |
| 746 | 760 | if (p->flags & PAGE_VALID) { |
| ... | ... | @@ -763,6 +777,7 @@ void page_unmap(void) |
| 763 | 777 | j++; |
| 764 | 778 | } |
| 765 | 779 | } |
| 780 | +#endif | |
| 766 | 781 | free(pmap); |
| 767 | 782 | l1_map[i] = NULL; |
| 768 | 783 | } |
| ... | ... | @@ -773,7 +788,7 @@ void page_unmap(void) |
| 773 | 788 | |
| 774 | 789 | void tlb_flush(CPUState *env) |
| 775 | 790 | { |
| 776 | -#if defined(TARGET_I386) | |
| 791 | +#if !defined(CONFIG_USER_ONLY) | |
| 777 | 792 | int i; |
| 778 | 793 | for(i = 0; i < CPU_TLB_SIZE; i++) { |
| 779 | 794 | env->tlb_read[0][i].address = -1; |
| ... | ... | @@ -784,16 +799,38 @@ void tlb_flush(CPUState *env) |
| 784 | 799 | #endif |
| 785 | 800 | } |
| 786 | 801 | |
| 802 | +static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr) | |
| 803 | +{ | |
| 804 | + if (addr == (tlb_entry->address & | |
| 805 | + (TARGET_PAGE_MASK | TLB_INVALID_MASK))) | |
| 806 | + tlb_entry->address = -1; | |
| 807 | +} | |
| 808 | + | |
| 787 | 809 | void tlb_flush_page(CPUState *env, uint32_t addr) |
| 788 | 810 | { |
| 789 | -#if defined(TARGET_I386) | |
| 811 | +#if !defined(CONFIG_USER_ONLY) | |
| 812 | + int i; | |
| 813 | + | |
| 814 | + addr &= TARGET_PAGE_MASK; | |
| 815 | + i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
| 816 | + tlb_flush_entry(&env->tlb_read[0][i], addr); | |
| 817 | + tlb_flush_entry(&env->tlb_write[0][i], addr); | |
| 818 | + tlb_flush_entry(&env->tlb_read[1][i], addr); | |
| 819 | + tlb_flush_entry(&env->tlb_write[1][i], addr); | |
| 820 | +#endif | |
| 821 | +} | |
| 822 | + | |
| 823 | +/* make all write to page 'addr' trigger a TLB exception to detect | |
| 824 | + self modifying code */ | |
| 825 | +void tlb_flush_page_write(CPUState *env, uint32_t addr) | |
| 826 | +{ | |
| 827 | +#if !defined(CONFIG_USER_ONLY) | |
| 790 | 828 | int i; |
| 791 | 829 | |
| 830 | + addr &= TARGET_PAGE_MASK; | |
| 792 | 831 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 793 | - env->tlb_read[0][i].address = -1; | |
| 794 | - env->tlb_write[0][i].address = -1; | |
| 795 | - env->tlb_read[1][i].address = -1; | |
| 796 | - env->tlb_write[1][i].address = -1; | |
| 832 | + tlb_flush_entry(&env->tlb_write[0][i], addr); | |
| 833 | + tlb_flush_entry(&env->tlb_write[1][i], addr); | |
| 797 | 834 | #endif |
| 798 | 835 | } |
| 799 | 836 | |
| ... | ... | @@ -900,3 +937,25 @@ int cpu_register_io_memory(int io_index, |
| 900 | 937 | } |
| 901 | 938 | return io_index << IO_MEM_SHIFT; |
| 902 | 939 | } |
| 940 | + | |
| 941 | +#if !defined(CONFIG_USER_ONLY) | |
| 942 | + | |
| 943 | +#define MMUSUFFIX _cmmu | |
| 944 | +#define GETPC() NULL | |
| 945 | +#define env cpu_single_env | |
| 946 | + | |
| 947 | +#define SHIFT 0 | |
| 948 | +#include "softmmu_template.h" | |
| 949 | + | |
| 950 | +#define SHIFT 1 | |
| 951 | +#include "softmmu_template.h" | |
| 952 | + | |
| 953 | +#define SHIFT 2 | |
| 954 | +#include "softmmu_template.h" | |
| 955 | + | |
| 956 | +#define SHIFT 3 | |
| 957 | +#include "softmmu_template.h" | |
| 958 | + | |
| 959 | +#undef env | |
| 960 | + | |
| 961 | +#endif | ... | ... |
hw/vga_template.h
| ... | ... | @@ -354,7 +354,7 @@ static void glue(vga_draw_line15_, DEPTH)(VGAState *s1, uint8_t *d, |
| 354 | 354 | |
| 355 | 355 | w = width; |
| 356 | 356 | do { |
| 357 | - v = lduw((void *)s); | |
| 357 | + v = lduw_raw((void *)s); | |
| 358 | 358 | r = (v >> 7) & 0xf8; |
| 359 | 359 | g = (v >> 2) & 0xf8; |
| 360 | 360 | b = (v << 3) & 0xf8; |
| ... | ... | @@ -379,7 +379,7 @@ static void glue(vga_draw_line16_, DEPTH)(VGAState *s1, uint8_t *d, |
| 379 | 379 | |
| 380 | 380 | w = width; |
| 381 | 381 | do { |
| 382 | - v = lduw((void *)s); | |
| 382 | + v = lduw_raw((void *)s); | |
| 383 | 383 | r = (v >> 8) & 0xf8; |
| 384 | 384 | g = (v >> 3) & 0xfc; |
| 385 | 385 | b = (v << 3) & 0xf8; | ... | ... |
softmmu_header.h
| ... | ... | @@ -19,26 +19,48 @@ |
| 19 | 19 | */ |
| 20 | 20 | #if DATA_SIZE == 8 |
| 21 | 21 | #define SUFFIX q |
| 22 | +#define USUFFIX q | |
| 22 | 23 | #define DATA_TYPE uint64_t |
| 23 | 24 | #elif DATA_SIZE == 4 |
| 24 | 25 | #define SUFFIX l |
| 26 | +#define USUFFIX l | |
| 25 | 27 | #define DATA_TYPE uint32_t |
| 26 | 28 | #elif DATA_SIZE == 2 |
| 27 | 29 | #define SUFFIX w |
| 30 | +#define USUFFIX uw | |
| 28 | 31 | #define DATA_TYPE uint16_t |
| 29 | 32 | #define DATA_STYPE int16_t |
| 30 | 33 | #elif DATA_SIZE == 1 |
| 31 | 34 | #define SUFFIX b |
| 35 | +#define USUFFIX ub | |
| 32 | 36 | #define DATA_TYPE uint8_t |
| 33 | 37 | #define DATA_STYPE int8_t |
| 34 | 38 | #else |
| 35 | 39 | #error unsupported data size |
| 36 | 40 | #endif |
| 37 | 41 | |
| 38 | -#if MEMUSER == 0 | |
| 39 | -#define MEMSUFFIX _kernel | |
| 42 | +#if ACCESS_TYPE == 0 | |
| 43 | + | |
| 44 | +#define CPU_MEM_INDEX 0 | |
| 45 | +#define MMUSUFFIX _mmu | |
| 46 | + | |
| 47 | +#elif ACCESS_TYPE == 1 | |
| 48 | + | |
| 49 | +#define CPU_MEM_INDEX 1 | |
| 50 | +#define MMUSUFFIX _mmu | |
| 51 | + | |
| 52 | +#elif ACCESS_TYPE == 2 | |
| 53 | + | |
| 54 | +#define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3) | |
| 55 | +#define MMUSUFFIX _mmu | |
| 56 | + | |
| 57 | +#elif ACCESS_TYPE == 3 | |
| 58 | + | |
| 59 | +#define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3) | |
| 60 | +#define MMUSUFFIX _cmmu | |
| 61 | + | |
| 40 | 62 | #else |
| 41 | -#define MEMSUFFIX _user | |
| 63 | +#error invalid ACCESS_TYPE | |
| 42 | 64 | #endif |
| 43 | 65 | |
| 44 | 66 | #if DATA_SIZE == 8 |
| ... | ... | @@ -48,24 +70,26 @@ |
| 48 | 70 | #endif |
| 49 | 71 | |
| 50 | 72 | |
| 51 | -#if MEMUSER == 0 | |
| 52 | -DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr); | |
| 53 | -void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE v); | |
| 54 | -#endif | |
| 73 | +DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(unsigned long addr, | |
| 74 | + int is_user); | |
| 75 | +void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr, DATA_TYPE v, int is_user); | |
| 55 | 76 | |
| 56 | -static inline int glue(glue(ldu, SUFFIX), MEMSUFFIX)(void *ptr) | |
| 77 | +static inline int glue(glue(ld, USUFFIX), MEMSUFFIX)(void *ptr) | |
| 57 | 78 | { |
| 58 | 79 | int index; |
| 59 | 80 | RES_TYPE res; |
| 60 | 81 | unsigned long addr, physaddr; |
| 82 | + int is_user; | |
| 83 | + | |
| 61 | 84 | addr = (unsigned long)ptr; |
| 62 | 85 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 63 | - if (__builtin_expect(env->tlb_read[MEMUSER][index].address != | |
| 86 | + is_user = CPU_MEM_INDEX; | |
| 87 | + if (__builtin_expect(env->tlb_read[is_user][index].address != | |
| 64 | 88 | (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { |
| 65 | - res = glue(glue(__ld, SUFFIX), _mmu)(addr); | |
| 89 | + res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user); | |
| 66 | 90 | } else { |
| 67 | - physaddr = addr + env->tlb_read[MEMUSER][index].addend; | |
| 68 | - res = glue(glue(ldu, SUFFIX), _raw)((uint8_t *)physaddr); | |
| 91 | + physaddr = addr + env->tlb_read[is_user][index].addend; | |
| 92 | + res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr); | |
| 69 | 93 | } |
| 70 | 94 | return res; |
| 71 | 95 | } |
| ... | ... | @@ -75,13 +99,16 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr) |
| 75 | 99 | { |
| 76 | 100 | int res, index; |
| 77 | 101 | unsigned long addr, physaddr; |
| 102 | + int is_user; | |
| 103 | + | |
| 78 | 104 | addr = (unsigned long)ptr; |
| 79 | 105 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 80 | - if (__builtin_expect(env->tlb_read[MEMUSER][index].address != | |
| 106 | + is_user = CPU_MEM_INDEX; | |
| 107 | + if (__builtin_expect(env->tlb_read[is_user][index].address != | |
| 81 | 108 | (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { |
| 82 | - res = (DATA_STYPE)glue(glue(__ld, SUFFIX), _mmu)(addr); | |
| 109 | + res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user); | |
| 83 | 110 | } else { |
| 84 | - physaddr = addr + env->tlb_read[MEMUSER][index].addend; | |
| 111 | + physaddr = addr + env->tlb_read[is_user][index].addend; | |
| 85 | 112 | res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr); |
| 86 | 113 | } |
| 87 | 114 | return res; |
| ... | ... | @@ -92,13 +119,16 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v) |
| 92 | 119 | { |
| 93 | 120 | int index; |
| 94 | 121 | unsigned long addr, physaddr; |
| 122 | + int is_user; | |
| 123 | + | |
| 95 | 124 | addr = (unsigned long)ptr; |
| 96 | 125 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 97 | - if (__builtin_expect(env->tlb_write[MEMUSER][index].address != | |
| 126 | + is_user = CPU_MEM_INDEX; | |
| 127 | + if (__builtin_expect(env->tlb_write[is_user][index].address != | |
| 98 | 128 | (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { |
| 99 | - glue(glue(__st, SUFFIX), _mmu)(addr, v); | |
| 129 | + glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, is_user); | |
| 100 | 130 | } else { |
| 101 | - physaddr = addr + env->tlb_write[MEMUSER][index].addend; | |
| 131 | + physaddr = addr + env->tlb_write[is_user][index].addend; | |
| 102 | 132 | glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v); |
| 103 | 133 | } |
| 104 | 134 | } |
| ... | ... | @@ -107,5 +137,7 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v) |
| 107 | 137 | #undef DATA_TYPE |
| 108 | 138 | #undef DATA_STYPE |
| 109 | 139 | #undef SUFFIX |
| 140 | +#undef USUFFIX | |
| 110 | 141 | #undef DATA_SIZE |
| 111 | -#undef MEMSUFFIX | |
| 142 | +#undef CPU_MEM_INDEX | |
| 143 | +#undef MMUSUFFIX | ... | ... |
softmmu_template.h
| ... | ... | @@ -21,23 +21,31 @@ |
| 21 | 21 | |
| 22 | 22 | #if DATA_SIZE == 8 |
| 23 | 23 | #define SUFFIX q |
| 24 | +#define USUFFIX q | |
| 24 | 25 | #define DATA_TYPE uint64_t |
| 25 | 26 | #elif DATA_SIZE == 4 |
| 26 | 27 | #define SUFFIX l |
| 28 | +#define USUFFIX l | |
| 27 | 29 | #define DATA_TYPE uint32_t |
| 28 | 30 | #elif DATA_SIZE == 2 |
| 29 | 31 | #define SUFFIX w |
| 32 | +#define USUFFIX uw | |
| 30 | 33 | #define DATA_TYPE uint16_t |
| 31 | 34 | #elif DATA_SIZE == 1 |
| 32 | 35 | #define SUFFIX b |
| 36 | +#define USUFFIX ub | |
| 33 | 37 | #define DATA_TYPE uint8_t |
| 34 | 38 | #else |
| 35 | 39 | #error unsupported data size |
| 36 | 40 | #endif |
| 37 | 41 | |
| 38 | -static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr); | |
| 39 | -static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val, | |
| 40 | - void *retaddr); | |
| 42 | +static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr, | |
| 43 | + int is_user, | |
| 44 | + void *retaddr); | |
| 45 | +static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr, | |
| 46 | + DATA_TYPE val, | |
| 47 | + int is_user, | |
| 48 | + void *retaddr); | |
| 41 | 49 | |
| 42 | 50 | static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr, |
| 43 | 51 | unsigned long tlb_addr) |
| ... | ... | @@ -81,16 +89,16 @@ static inline void glue(io_write, SUFFIX)(unsigned long physaddr, |
| 81 | 89 | } |
| 82 | 90 | |
| 83 | 91 | /* handle all cases except unaligned access which span two pages */ |
| 84 | -DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr) | |
| 92 | +DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(unsigned long addr, | |
| 93 | + int is_user) | |
| 85 | 94 | { |
| 86 | 95 | DATA_TYPE res; |
| 87 | - int is_user, index; | |
| 96 | + int index; | |
| 88 | 97 | unsigned long physaddr, tlb_addr; |
| 89 | 98 | void *retaddr; |
| 90 | 99 | |
| 91 | 100 | /* test if there is match for unaligned or IO access */ |
| 92 | 101 | /* XXX: could done more in memory macro in a non portable way */ |
| 93 | - is_user = ((env->hflags & HF_CPL_MASK) == 3); | |
| 94 | 102 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 95 | 103 | redo: |
| 96 | 104 | tlb_addr = env->tlb_read[is_user][index].address; |
| ... | ... | @@ -104,29 +112,31 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr) |
| 104 | 112 | } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
| 105 | 113 | /* slow unaligned access (it spans two pages or IO) */ |
| 106 | 114 | do_unaligned_access: |
| 107 | - retaddr = __builtin_return_address(0); | |
| 108 | - res = glue(slow_ld, SUFFIX)(addr, retaddr); | |
| 115 | + retaddr = GETPC(); | |
| 116 | + res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr, | |
| 117 | + is_user, retaddr); | |
| 109 | 118 | } else { |
| 110 | 119 | /* unaligned access in the same page */ |
| 111 | - res = glue(glue(ldu, SUFFIX), _raw)((uint8_t *)physaddr); | |
| 120 | + res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr); | |
| 112 | 121 | } |
| 113 | 122 | } else { |
| 114 | 123 | /* the page is not in the TLB : fill it */ |
| 115 | - retaddr = __builtin_return_address(0); | |
| 116 | - tlb_fill(addr, 0, retaddr); | |
| 124 | + retaddr = GETPC(); | |
| 125 | + tlb_fill(addr, 0, is_user, retaddr); | |
| 117 | 126 | goto redo; |
| 118 | 127 | } |
| 119 | 128 | return res; |
| 120 | 129 | } |
| 121 | 130 | |
| 122 | 131 | /* handle all unaligned cases */ |
| 123 | -static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr) | |
| 132 | +static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr, | |
| 133 | + int is_user, | |
| 134 | + void *retaddr) | |
| 124 | 135 | { |
| 125 | 136 | DATA_TYPE res, res1, res2; |
| 126 | - int is_user, index, shift; | |
| 137 | + int index, shift; | |
| 127 | 138 | unsigned long physaddr, tlb_addr, addr1, addr2; |
| 128 | 139 | |
| 129 | - is_user = ((env->hflags & HF_CPL_MASK) == 3); | |
| 130 | 140 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 131 | 141 | redo: |
| 132 | 142 | tlb_addr = env->tlb_read[is_user][index].address; |
| ... | ... | @@ -142,8 +152,10 @@ static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr) |
| 142 | 152 | /* slow unaligned access (it spans two pages) */ |
| 143 | 153 | addr1 = addr & ~(DATA_SIZE - 1); |
| 144 | 154 | addr2 = addr1 + DATA_SIZE; |
| 145 | - res1 = glue(slow_ld, SUFFIX)(addr1, retaddr); | |
| 146 | - res2 = glue(slow_ld, SUFFIX)(addr2, retaddr); | |
| 155 | + res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1, | |
| 156 | + is_user, retaddr); | |
| 157 | + res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2, | |
| 158 | + is_user, retaddr); | |
| 147 | 159 | shift = (addr & (DATA_SIZE - 1)) * 8; |
| 148 | 160 | #ifdef TARGET_WORDS_BIGENDIAN |
| 149 | 161 | res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); |
| ... | ... | @@ -152,24 +164,25 @@ static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr) |
| 152 | 164 | #endif |
| 153 | 165 | } else { |
| 154 | 166 | /* unaligned/aligned access in the same page */ |
| 155 | - res = glue(glue(ldu, SUFFIX), _raw)((uint8_t *)physaddr); | |
| 167 | + res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr); | |
| 156 | 168 | } |
| 157 | 169 | } else { |
| 158 | 170 | /* the page is not in the TLB : fill it */ |
| 159 | - tlb_fill(addr, 0, retaddr); | |
| 171 | + tlb_fill(addr, 0, is_user, retaddr); | |
| 160 | 172 | goto redo; |
| 161 | 173 | } |
| 162 | 174 | return res; |
| 163 | 175 | } |
| 164 | 176 | |
| 165 | 177 | |
| 166 | -void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE val) | |
| 178 | +void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr, | |
| 179 | + DATA_TYPE val, | |
| 180 | + int is_user) | |
| 167 | 181 | { |
| 168 | 182 | unsigned long physaddr, tlb_addr; |
| 169 | 183 | void *retaddr; |
| 170 | - int is_user, index; | |
| 184 | + int index; | |
| 171 | 185 | |
| 172 | - is_user = ((env->hflags & HF_CPL_MASK) == 3); | |
| 173 | 186 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 174 | 187 | redo: |
| 175 | 188 | tlb_addr = env->tlb_write[is_user][index].address; |
| ... | ... | @@ -182,28 +195,30 @@ void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE val |
| 182 | 195 | glue(io_write, SUFFIX)(physaddr, val, tlb_addr); |
| 183 | 196 | } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
| 184 | 197 | do_unaligned_access: |
| 185 | - retaddr = __builtin_return_address(0); | |
| 186 | - glue(slow_st, SUFFIX)(addr, val, retaddr); | |
| 198 | + retaddr = GETPC(); | |
| 199 | + glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val, | |
| 200 | + is_user, retaddr); | |
| 187 | 201 | } else { |
| 188 | 202 | /* aligned/unaligned access in the same page */ |
| 189 | 203 | glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, val); |
| 190 | 204 | } |
| 191 | 205 | } else { |
| 192 | 206 | /* the page is not in the TLB : fill it */ |
| 193 | - retaddr = __builtin_return_address(0); | |
| 194 | - tlb_fill(addr, 1, retaddr); | |
| 207 | + retaddr = GETPC(); | |
| 208 | + tlb_fill(addr, 1, is_user, retaddr); | |
| 195 | 209 | goto redo; |
| 196 | 210 | } |
| 197 | 211 | } |
| 198 | 212 | |
| 199 | 213 | /* handles all unaligned cases */ |
| 200 | -static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val, | |
| 201 | - void *retaddr) | |
| 214 | +static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr, | |
| 215 | + DATA_TYPE val, | |
| 216 | + int is_user, | |
| 217 | + void *retaddr) | |
| 202 | 218 | { |
| 203 | 219 | unsigned long physaddr, tlb_addr; |
| 204 | - int is_user, index, i; | |
| 220 | + int index, i; | |
| 205 | 221 | |
| 206 | - is_user = ((env->hflags & HF_CPL_MASK) == 3); | |
| 207 | 222 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 208 | 223 | redo: |
| 209 | 224 | tlb_addr = env->tlb_write[is_user][index].address; |
| ... | ... | @@ -219,9 +234,11 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val, |
| 219 | 234 | /* XXX: not efficient, but simple */ |
| 220 | 235 | for(i = 0;i < DATA_SIZE; i++) { |
| 221 | 236 | #ifdef TARGET_WORDS_BIGENDIAN |
| 222 | - slow_stb(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)), retaddr); | |
| 237 | + glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)), | |
| 238 | + is_user, retaddr); | |
| 223 | 239 | #else |
| 224 | - slow_stb(addr + i, val >> (i * 8), retaddr); | |
| 240 | + glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8), | |
| 241 | + is_user, retaddr); | |
| 225 | 242 | #endif |
| 226 | 243 | } |
| 227 | 244 | } else { |
| ... | ... | @@ -230,7 +247,7 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val, |
| 230 | 247 | } |
| 231 | 248 | } else { |
| 232 | 249 | /* the page is not in the TLB : fill it */ |
| 233 | - tlb_fill(addr, 1, retaddr); | |
| 250 | + tlb_fill(addr, 1, is_user, retaddr); | |
| 234 | 251 | goto redo; |
| 235 | 252 | } |
| 236 | 253 | } |
| ... | ... | @@ -238,4 +255,5 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val, |
| 238 | 255 | #undef SHIFT |
| 239 | 256 | #undef DATA_TYPE |
| 240 | 257 | #undef SUFFIX |
| 258 | +#undef USUFFIX | |
| 241 | 259 | #undef DATA_SIZE | ... | ... |
target-i386/exec.h
| ... | ... | @@ -137,8 +137,10 @@ void helper_invlpg(unsigned int addr); |
| 137 | 137 | void cpu_x86_update_cr0(CPUX86State *env); |
| 138 | 138 | void cpu_x86_update_cr3(CPUX86State *env); |
| 139 | 139 | void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr); |
| 140 | -int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write); | |
| 141 | -void tlb_fill(unsigned long addr, int is_write, void *retaddr); | |
| 140 | +int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, | |
| 141 | + int is_write, int is_user, int is_softmmu); | |
| 142 | +void tlb_fill(unsigned long addr, int is_write, int is_user, | |
| 143 | + void *retaddr); | |
| 142 | 144 | void __hidden cpu_lock(void); |
| 143 | 145 | void __hidden cpu_unlock(void); |
| 144 | 146 | void do_interrupt(int intno, int is_int, int error_code, |
| ... | ... | @@ -366,26 +368,30 @@ static inline void load_eflags(int eflags, int update_mask) |
| 366 | 368 | (eflags & update_mask); |
| 367 | 369 | } |
| 368 | 370 | |
| 369 | -/* memory access macros */ | |
| 371 | +/* XXX: move that to a generic header */ | |
| 372 | +#if !defined(CONFIG_USER_ONLY) | |
| 370 | 373 | |
| 371 | -#define ldul ldl | |
| 372 | -#define lduq ldq | |
| 373 | 374 | #define ldul_user ldl_user |
| 374 | 375 | #define ldul_kernel ldl_kernel |
| 375 | 376 | |
| 376 | -#define ldub_raw ldub | |
| 377 | -#define ldsb_raw ldsb | |
| 378 | -#define lduw_raw lduw | |
| 379 | -#define ldsw_raw ldsw | |
| 380 | -#define ldl_raw ldl | |
| 381 | -#define ldq_raw ldq | |
| 377 | +#define ACCESS_TYPE 0 | |
| 378 | +#define MEMSUFFIX _kernel | |
| 379 | +#define DATA_SIZE 1 | |
| 380 | +#include "softmmu_header.h" | |
| 381 | + | |
| 382 | +#define DATA_SIZE 2 | |
| 383 | +#include "softmmu_header.h" | |
| 382 | 384 | |
| 383 | -#define stb_raw stb | |
| 384 | -#define stw_raw stw | |
| 385 | -#define stl_raw stl | |
| 386 | -#define stq_raw stq | |
| 385 | +#define DATA_SIZE 4 | |
| 386 | +#include "softmmu_header.h" | |
| 387 | + | |
| 388 | +#define DATA_SIZE 8 | |
| 389 | +#include "softmmu_header.h" | |
| 390 | +#undef ACCESS_TYPE | |
| 391 | +#undef MEMSUFFIX | |
| 387 | 392 | |
| 388 | -#define MEMUSER 0 | |
| 393 | +#define ACCESS_TYPE 1 | |
| 394 | +#define MEMSUFFIX _user | |
| 389 | 395 | #define DATA_SIZE 1 |
| 390 | 396 | #include "softmmu_header.h" |
| 391 | 397 | |
| ... | ... | @@ -397,9 +403,12 @@ static inline void load_eflags(int eflags, int update_mask) |
| 397 | 403 | |
| 398 | 404 | #define DATA_SIZE 8 |
| 399 | 405 | #include "softmmu_header.h" |
| 406 | +#undef ACCESS_TYPE | |
| 407 | +#undef MEMSUFFIX | |
| 400 | 408 | |
| 401 | -#undef MEMUSER | |
| 402 | -#define MEMUSER 1 | |
| 409 | +/* these access are slower, they must be as rare as possible */ | |
| 410 | +#define ACCESS_TYPE 2 | |
| 411 | +#define MEMSUFFIX _data | |
| 403 | 412 | #define DATA_SIZE 1 |
| 404 | 413 | #include "softmmu_header.h" |
| 405 | 414 | |
| ... | ... | @@ -411,6 +420,59 @@ static inline void load_eflags(int eflags, int update_mask) |
| 411 | 420 | |
| 412 | 421 | #define DATA_SIZE 8 |
| 413 | 422 | #include "softmmu_header.h" |
| 423 | +#undef ACCESS_TYPE | |
| 424 | +#undef MEMSUFFIX | |
| 425 | + | |
| 426 | +#define ldub(p) ldub_data(p) | |
| 427 | +#define ldsb(p) ldsb_data(p) | |
| 428 | +#define lduw(p) lduw_data(p) | |
| 429 | +#define ldsw(p) ldsw_data(p) | |
| 430 | +#define ldl(p) ldl_data(p) | |
| 431 | +#define ldq(p) ldq_data(p) | |
| 432 | + | |
| 433 | +#define stb(p, v) stb_data(p, v) | |
| 434 | +#define stw(p, v) stw_data(p, v) | |
| 435 | +#define stl(p, v) stl_data(p, v) | |
| 436 | +#define stq(p, v) stq_data(p, v) | |
| 437 | + | |
| 438 | +static inline double ldfq(void *ptr) | |
| 439 | +{ | |
| 440 | + union { | |
| 441 | + double d; | |
| 442 | + uint64_t i; | |
| 443 | + } u; | |
| 444 | + u.i = ldq(ptr); | |
| 445 | + return u.d; | |
| 446 | +} | |
| 447 | + | |
| 448 | +static inline void stfq(void *ptr, double v) | |
| 449 | +{ | |
| 450 | + union { | |
| 451 | + double d; | |
| 452 | + uint64_t i; | |
| 453 | + } u; | |
| 454 | + u.d = v; | |
| 455 | + stq(ptr, u.i); | |
| 456 | +} | |
| 414 | 457 | |
| 415 | -#undef MEMUSER | |
| 458 | +static inline float ldfl(void *ptr) | |
| 459 | +{ | |
| 460 | + union { | |
| 461 | + float f; | |
| 462 | + uint32_t i; | |
| 463 | + } u; | |
| 464 | + u.i = ldl(ptr); | |
| 465 | + return u.f; | |
| 466 | +} | |
| 467 | + | |
| 468 | +static inline void stfl(void *ptr, float v) | |
| 469 | +{ | |
| 470 | + union { | |
| 471 | + float f; | |
| 472 | + uint32_t i; | |
| 473 | + } u; | |
| 474 | + u.f = v; | |
| 475 | + stl(ptr, u.i); | |
| 476 | +} | |
| 416 | 477 | |
| 478 | +#endif /* !defined(CONFIG_USER_ONLY) */ | ... | ... |
target-i386/helper.c
| ... | ... | @@ -153,11 +153,11 @@ static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, |
| 153 | 153 | if (index + (4 << shift) - 1 > env->tr.limit) |
| 154 | 154 | raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc); |
| 155 | 155 | if (shift == 0) { |
| 156 | - *esp_ptr = lduw(env->tr.base + index); | |
| 157 | - *ss_ptr = lduw(env->tr.base + index + 2); | |
| 156 | + *esp_ptr = lduw_kernel(env->tr.base + index); | |
| 157 | + *ss_ptr = lduw_kernel(env->tr.base + index + 2); | |
| 158 | 158 | } else { |
| 159 | - *esp_ptr = ldl(env->tr.base + index); | |
| 160 | - *ss_ptr = lduw(env->tr.base + index + 4); | |
| 159 | + *esp_ptr = ldl_kernel(env->tr.base + index); | |
| 160 | + *ss_ptr = lduw_kernel(env->tr.base + index + 4); | |
| 161 | 161 | } |
| 162 | 162 | } |
| 163 | 163 | |
| ... | ... | @@ -177,8 +177,8 @@ static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, |
| 177 | 177 | if ((index + 7) > dt->limit) |
| 178 | 178 | return -1; |
| 179 | 179 | ptr = dt->base + index; |
| 180 | - *e1_ptr = ldl(ptr); | |
| 181 | - *e2_ptr = ldl(ptr + 4); | |
| 180 | + *e1_ptr = ldl_kernel(ptr); | |
| 181 | + *e2_ptr = ldl_kernel(ptr + 4); | |
| 182 | 182 | return 0; |
| 183 | 183 | } |
| 184 | 184 | |
| ... | ... | @@ -226,8 +226,8 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, |
| 226 | 226 | if (intno * 8 + 7 > dt->limit) |
| 227 | 227 | raise_exception_err(EXCP0D_GPF, intno * 8 + 2); |
| 228 | 228 | ptr = dt->base + intno * 8; |
| 229 | - e1 = ldl(ptr); | |
| 230 | - e2 = ldl(ptr + 4); | |
| 229 | + e1 = ldl_kernel(ptr); | |
| 230 | + e2 = ldl_kernel(ptr + 4); | |
| 231 | 231 | /* check gate type */ |
| 232 | 232 | type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; |
| 233 | 233 | switch(type) { |
| ... | ... | @@ -344,47 +344,47 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, |
| 344 | 344 | int old_eflags; |
| 345 | 345 | if (env->eflags & VM_MASK) { |
| 346 | 346 | ssp -= 4; |
| 347 | - stl(ssp, env->segs[R_GS].selector); | |
| 347 | + stl_kernel(ssp, env->segs[R_GS].selector); | |
| 348 | 348 | ssp -= 4; |
| 349 | - stl(ssp, env->segs[R_FS].selector); | |
| 349 | + stl_kernel(ssp, env->segs[R_FS].selector); | |
| 350 | 350 | ssp -= 4; |
| 351 | - stl(ssp, env->segs[R_DS].selector); | |
| 351 | + stl_kernel(ssp, env->segs[R_DS].selector); | |
| 352 | 352 | ssp -= 4; |
| 353 | - stl(ssp, env->segs[R_ES].selector); | |
| 353 | + stl_kernel(ssp, env->segs[R_ES].selector); | |
| 354 | 354 | } |
| 355 | 355 | if (new_stack) { |
| 356 | 356 | ssp -= 4; |
| 357 | - stl(ssp, old_ss); | |
| 357 | + stl_kernel(ssp, old_ss); | |
| 358 | 358 | ssp -= 4; |
| 359 | - stl(ssp, old_esp); | |
| 359 | + stl_kernel(ssp, old_esp); | |
| 360 | 360 | } |
| 361 | 361 | ssp -= 4; |
| 362 | 362 | old_eflags = compute_eflags(); |
| 363 | - stl(ssp, old_eflags); | |
| 363 | + stl_kernel(ssp, old_eflags); | |
| 364 | 364 | ssp -= 4; |
| 365 | - stl(ssp, old_cs); | |
| 365 | + stl_kernel(ssp, old_cs); | |
| 366 | 366 | ssp -= 4; |
| 367 | - stl(ssp, old_eip); | |
| 367 | + stl_kernel(ssp, old_eip); | |
| 368 | 368 | if (has_error_code) { |
| 369 | 369 | ssp -= 4; |
| 370 | - stl(ssp, error_code); | |
| 370 | + stl_kernel(ssp, error_code); | |
| 371 | 371 | } |
| 372 | 372 | } else { |
| 373 | 373 | if (new_stack) { |
| 374 | 374 | ssp -= 2; |
| 375 | - stw(ssp, old_ss); | |
| 375 | + stw_kernel(ssp, old_ss); | |
| 376 | 376 | ssp -= 2; |
| 377 | - stw(ssp, old_esp); | |
| 377 | + stw_kernel(ssp, old_esp); | |
| 378 | 378 | } |
| 379 | 379 | ssp -= 2; |
| 380 | - stw(ssp, compute_eflags()); | |
| 380 | + stw_kernel(ssp, compute_eflags()); | |
| 381 | 381 | ssp -= 2; |
| 382 | - stw(ssp, old_cs); | |
| 382 | + stw_kernel(ssp, old_cs); | |
| 383 | 383 | ssp -= 2; |
| 384 | - stw(ssp, old_eip); | |
| 384 | + stw_kernel(ssp, old_eip); | |
| 385 | 385 | if (has_error_code) { |
| 386 | 386 | ssp -= 2; |
| 387 | - stw(ssp, error_code); | |
| 387 | + stw_kernel(ssp, error_code); | |
| 388 | 388 | } |
| 389 | 389 | } |
| 390 | 390 | |
| ... | ... | @@ -410,8 +410,8 @@ static void do_interrupt_real(int intno, int is_int, int error_code, |
| 410 | 410 | if (intno * 4 + 3 > dt->limit) |
| 411 | 411 | raise_exception_err(EXCP0D_GPF, intno * 8 + 2); |
| 412 | 412 | ptr = dt->base + intno * 4; |
| 413 | - offset = lduw(ptr); | |
| 414 | - selector = lduw(ptr + 2); | |
| 413 | + offset = lduw_kernel(ptr); | |
| 414 | + selector = lduw_kernel(ptr + 2); | |
| 415 | 415 | esp = ESP; |
| 416 | 416 | ssp = env->segs[R_SS].base; |
| 417 | 417 | if (is_int) |
| ... | ... | @@ -420,11 +420,11 @@ static void do_interrupt_real(int intno, int is_int, int error_code, |
| 420 | 420 | old_eip = env->eip; |
| 421 | 421 | old_cs = env->segs[R_CS].selector; |
| 422 | 422 | esp -= 2; |
| 423 | - stw(ssp + (esp & 0xffff), compute_eflags()); | |
| 423 | + stw_kernel(ssp + (esp & 0xffff), compute_eflags()); | |
| 424 | 424 | esp -= 2; |
| 425 | - stw(ssp + (esp & 0xffff), old_cs); | |
| 425 | + stw_kernel(ssp + (esp & 0xffff), old_cs); | |
| 426 | 426 | esp -= 2; |
| 427 | - stw(ssp + (esp & 0xffff), old_eip); | |
| 427 | + stw_kernel(ssp + (esp & 0xffff), old_eip); | |
| 428 | 428 | |
| 429 | 429 | /* update processor state */ |
| 430 | 430 | ESP = (ESP & ~0xffff) | (esp & 0xffff); |
| ... | ... | @@ -445,7 +445,7 @@ void do_interrupt_user(int intno, int is_int, int error_code, |
| 445 | 445 | |
| 446 | 446 | dt = &env->idt; |
| 447 | 447 | ptr = dt->base + (intno * 8); |
| 448 | - e2 = ldl(ptr + 4); | |
| 448 | + e2 = ldl_kernel(ptr + 4); | |
| 449 | 449 | |
| 450 | 450 | dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
| 451 | 451 | cpl = env->hflags & HF_CPL_MASK; |
| ... | ... | @@ -651,8 +651,8 @@ void helper_lldt_T0(void) |
| 651 | 651 | if ((index + 7) > dt->limit) |
| 652 | 652 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 653 | 653 | ptr = dt->base + index; |
| 654 | - e1 = ldl(ptr); | |
| 655 | - e2 = ldl(ptr + 4); | |
| 654 | + e1 = ldl_kernel(ptr); | |
| 655 | + e2 = ldl_kernel(ptr + 4); | |
| 656 | 656 | if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) |
| 657 | 657 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 658 | 658 | if (!(e2 & DESC_P_MASK)) |
| ... | ... | @@ -684,8 +684,8 @@ void helper_ltr_T0(void) |
| 684 | 684 | if ((index + 7) > dt->limit) |
| 685 | 685 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 686 | 686 | ptr = dt->base + index; |
| 687 | - e1 = ldl(ptr); | |
| 688 | - e2 = ldl(ptr + 4); | |
| 687 | + e1 = ldl_kernel(ptr); | |
| 688 | + e2 = ldl_kernel(ptr + 4); | |
| 689 | 689 | type = (e2 >> DESC_TYPE_SHIFT) & 0xf; |
| 690 | 690 | if ((e2 & DESC_S_MASK) || |
| 691 | 691 | (type != 2 && type != 9)) |
| ... | ... | @@ -694,7 +694,7 @@ void helper_ltr_T0(void) |
| 694 | 694 | raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
| 695 | 695 | load_seg_cache_raw_dt(&env->tr, e1, e2); |
| 696 | 696 | e2 |= 0x00000200; /* set the busy bit */ |
| 697 | - stl(ptr + 4, e2); | |
| 697 | + stl_kernel(ptr + 4, e2); | |
| 698 | 698 | } |
| 699 | 699 | env->tr.selector = selector; |
| 700 | 700 | } |
| ... | ... | @@ -813,14 +813,14 @@ void helper_lcall_real_T0_T1(int shift, int next_eip) |
| 813 | 813 | ssp = env->segs[R_SS].base; |
| 814 | 814 | if (shift) { |
| 815 | 815 | esp -= 4; |
| 816 | - stl(ssp + (esp & esp_mask), env->segs[R_CS].selector); | |
| 816 | + stl_kernel(ssp + (esp & esp_mask), env->segs[R_CS].selector); | |
| 817 | 817 | esp -= 4; |
| 818 | - stl(ssp + (esp & esp_mask), next_eip); | |
| 818 | + stl_kernel(ssp + (esp & esp_mask), next_eip); | |
| 819 | 819 | } else { |
| 820 | 820 | esp -= 2; |
| 821 | - stw(ssp + (esp & esp_mask), env->segs[R_CS].selector); | |
| 821 | + stw_kernel(ssp + (esp & esp_mask), env->segs[R_CS].selector); | |
| 822 | 822 | esp -= 2; |
| 823 | - stw(ssp + (esp & esp_mask), next_eip); | |
| 823 | + stw_kernel(ssp + (esp & esp_mask), next_eip); | |
| 824 | 824 | } |
| 825 | 825 | |
| 826 | 826 | if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
| ... | ... | @@ -873,14 +873,14 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) |
| 873 | 873 | ssp = env->segs[R_SS].base + sp; |
| 874 | 874 | if (shift) { |
| 875 | 875 | ssp -= 4; |
| 876 | - stl(ssp, env->segs[R_CS].selector); | |
| 876 | + stl_kernel(ssp, env->segs[R_CS].selector); | |
| 877 | 877 | ssp -= 4; |
| 878 | - stl(ssp, next_eip); | |
| 878 | + stl_kernel(ssp, next_eip); | |
| 879 | 879 | } else { |
| 880 | 880 | ssp -= 2; |
| 881 | - stw(ssp, env->segs[R_CS].selector); | |
| 881 | + stw_kernel(ssp, env->segs[R_CS].selector); | |
| 882 | 882 | ssp -= 2; |
| 883 | - stw(ssp, next_eip); | |
| 883 | + stw_kernel(ssp, next_eip); | |
| 884 | 884 | } |
| 885 | 885 | sp -= (4 << shift); |
| 886 | 886 | |
| ... | ... | @@ -975,23 +975,23 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) |
| 975 | 975 | ssp = env->segs[R_SS].base + sp; |
| 976 | 976 | if (shift) { |
| 977 | 977 | ssp -= 4; |
| 978 | - stl(ssp, old_ss); | |
| 978 | + stl_kernel(ssp, old_ss); | |
| 979 | 979 | ssp -= 4; |
| 980 | - stl(ssp, old_esp); | |
| 980 | + stl_kernel(ssp, old_esp); | |
| 981 | 981 | ssp -= 4 * param_count; |
| 982 | 982 | for(i = 0; i < param_count; i++) { |
| 983 | - val = ldl(old_ssp + i * 4); | |
| 984 | - stl(ssp + i * 4, val); | |
| 983 | + val = ldl_kernel(old_ssp + i * 4); | |
| 984 | + stl_kernel(ssp + i * 4, val); | |
| 985 | 985 | } |
| 986 | 986 | } else { |
| 987 | 987 | ssp -= 2; |
| 988 | - stw(ssp, old_ss); | |
| 988 | + stw_kernel(ssp, old_ss); | |
| 989 | 989 | ssp -= 2; |
| 990 | - stw(ssp, old_esp); | |
| 990 | + stw_kernel(ssp, old_esp); | |
| 991 | 991 | ssp -= 2 * param_count; |
| 992 | 992 | for(i = 0; i < param_count; i++) { |
| 993 | - val = lduw(old_ssp + i * 2); | |
| 994 | - stw(ssp + i * 2, val); | |
| 993 | + val = lduw_kernel(old_ssp + i * 2); | |
| 994 | + stw_kernel(ssp + i * 2, val); | |
| 995 | 995 | } |
| 996 | 996 | } |
| 997 | 997 | } else { |
| ... | ... | @@ -1004,14 +1004,14 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) |
| 1004 | 1004 | |
| 1005 | 1005 | if (shift) { |
| 1006 | 1006 | ssp -= 4; |
| 1007 | - stl(ssp, env->segs[R_CS].selector); | |
| 1007 | + stl_kernel(ssp, env->segs[R_CS].selector); | |
| 1008 | 1008 | ssp -= 4; |
| 1009 | - stl(ssp, next_eip); | |
| 1009 | + stl_kernel(ssp, next_eip); | |
| 1010 | 1010 | } else { |
| 1011 | 1011 | ssp -= 2; |
| 1012 | - stw(ssp, env->segs[R_CS].selector); | |
| 1012 | + stw_kernel(ssp, env->segs[R_CS].selector); | |
| 1013 | 1013 | ssp -= 2; |
| 1014 | - stw(ssp, next_eip); | |
| 1014 | + stw_kernel(ssp, next_eip); | |
| 1015 | 1015 | } |
| 1016 | 1016 | |
| 1017 | 1017 | sp -= push_size; |
| ... | ... | @@ -1042,14 +1042,14 @@ void helper_iret_real(int shift) |
| 1042 | 1042 | ssp = env->segs[R_SS].base + sp; |
| 1043 | 1043 | if (shift == 1) { |
| 1044 | 1044 | /* 32 bits */ |
| 1045 | - new_eflags = ldl(ssp + 8); | |
| 1046 | - new_cs = ldl(ssp + 4) & 0xffff; | |
| 1047 | - new_eip = ldl(ssp) & 0xffff; | |
| 1045 | + new_eflags = ldl_kernel(ssp + 8); | |
| 1046 | + new_cs = ldl_kernel(ssp + 4) & 0xffff; | |
| 1047 | + new_eip = ldl_kernel(ssp) & 0xffff; | |
| 1048 | 1048 | } else { |
| 1049 | 1049 | /* 16 bits */ |
| 1050 | - new_eflags = lduw(ssp + 4); | |
| 1051 | - new_cs = lduw(ssp + 2); | |
| 1052 | - new_eip = lduw(ssp); | |
| 1050 | + new_eflags = lduw_kernel(ssp + 4); | |
| 1051 | + new_cs = lduw_kernel(ssp + 2); | |
| 1052 | + new_eip = lduw_kernel(ssp); | |
| 1053 | 1053 | } |
| 1054 | 1054 | new_esp = sp + (6 << shift); |
| 1055 | 1055 | ESP = (ESP & 0xffff0000) | |
| ... | ... | @@ -1078,17 +1078,17 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) |
| 1078 | 1078 | if (shift == 1) { |
| 1079 | 1079 | /* 32 bits */ |
| 1080 | 1080 | if (is_iret) |
| 1081 | - new_eflags = ldl(ssp + 8); | |
| 1082 | - new_cs = ldl(ssp + 4) & 0xffff; | |
| 1083 | - new_eip = ldl(ssp); | |
| 1081 | + new_eflags = ldl_kernel(ssp + 8); | |
| 1082 | + new_cs = ldl_kernel(ssp + 4) & 0xffff; | |
| 1083 | + new_eip = ldl_kernel(ssp); | |
| 1084 | 1084 | if (is_iret && (new_eflags & VM_MASK)) |
| 1085 | 1085 | goto return_to_vm86; |
| 1086 | 1086 | } else { |
| 1087 | 1087 | /* 16 bits */ |
| 1088 | 1088 | if (is_iret) |
| 1089 | - new_eflags = lduw(ssp + 4); | |
| 1090 | - new_cs = lduw(ssp + 2); | |
| 1091 | - new_eip = lduw(ssp); | |
| 1089 | + new_eflags = lduw_kernel(ssp + 4); | |
| 1090 | + new_cs = lduw_kernel(ssp + 2); | |
| 1091 | + new_eip = lduw_kernel(ssp); | |
| 1092 | 1092 | } |
| 1093 | 1093 | if ((new_cs & 0xfffc) == 0) |
| 1094 | 1094 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| ... | ... | @@ -1124,12 +1124,12 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) |
| 1124 | 1124 | ssp += (4 << shift) + ((2 * is_iret) << shift) + addend; |
| 1125 | 1125 | if (shift == 1) { |
| 1126 | 1126 | /* 32 bits */ |
| 1127 | - new_esp = ldl(ssp); | |
| 1128 | - new_ss = ldl(ssp + 4) & 0xffff; | |
| 1127 | + new_esp = ldl_kernel(ssp); | |
| 1128 | + new_ss = ldl_kernel(ssp + 4) & 0xffff; | |
| 1129 | 1129 | } else { |
| 1130 | 1130 | /* 16 bits */ |
| 1131 | - new_esp = lduw(ssp); | |
| 1132 | - new_ss = lduw(ssp + 2); | |
| 1131 | + new_esp = lduw_kernel(ssp); | |
| 1132 | + new_ss = lduw_kernel(ssp + 2); | |
| 1133 | 1133 | } |
| 1134 | 1134 | |
| 1135 | 1135 | if ((new_ss & 3) != rpl) |
| ... | ... | @@ -1175,12 +1175,12 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) |
| 1175 | 1175 | return; |
| 1176 | 1176 | |
| 1177 | 1177 | return_to_vm86: |
| 1178 | - new_esp = ldl(ssp + 12); | |
| 1179 | - new_ss = ldl(ssp + 16); | |
| 1180 | - new_es = ldl(ssp + 20); | |
| 1181 | - new_ds = ldl(ssp + 24); | |
| 1182 | - new_fs = ldl(ssp + 28); | |
| 1183 | - new_gs = ldl(ssp + 32); | |
| 1178 | + new_esp = ldl_kernel(ssp + 12); | |
| 1179 | + new_ss = ldl_kernel(ssp + 16); | |
| 1180 | + new_es = ldl_kernel(ssp + 20); | |
| 1181 | + new_ds = ldl_kernel(ssp + 24); | |
| 1182 | + new_fs = ldl_kernel(ssp + 28); | |
| 1183 | + new_gs = ldl_kernel(ssp + 32); | |
| 1184 | 1184 | |
| 1185 | 1185 | /* modify processor state */ |
| 1186 | 1186 | load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK); |
| ... | ... | @@ -1770,6 +1770,11 @@ void helper_frstor(uint8_t *ptr, int data32) |
| 1770 | 1770 | } |
| 1771 | 1771 | } |
| 1772 | 1772 | |
| 1773 | +#if !defined(CONFIG_USER_ONLY) | |
| 1774 | + | |
| 1775 | +#define MMUSUFFIX _mmu | |
| 1776 | +#define GETPC() (__builtin_return_address(0)) | |
| 1777 | + | |
| 1773 | 1778 | #define SHIFT 0 |
| 1774 | 1779 | #include "softmmu_template.h" |
| 1775 | 1780 | |
| ... | ... | @@ -1782,22 +1787,41 @@ void helper_frstor(uint8_t *ptr, int data32) |
| 1782 | 1787 | #define SHIFT 3 |
| 1783 | 1788 | #include "softmmu_template.h" |
| 1784 | 1789 | |
| 1785 | -/* try to fill the TLB and return an exception if error */ | |
| 1786 | -void tlb_fill(unsigned long addr, int is_write, void *retaddr) | |
| 1790 | +#endif | |
| 1791 | + | |
| 1792 | +/* try to fill the TLB and return an exception if error. If retaddr is | |
| 1793 | + NULL, it means that the function was called in C code (i.e. not | |
| 1794 | + from generated code or from helper.c) */ | |
| 1795 | +/* XXX: fix it to restore all registers */ | |
| 1796 | +void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr) | |
| 1787 | 1797 | { |
| 1788 | 1798 | TranslationBlock *tb; |
| 1789 | 1799 | int ret; |
| 1790 | 1800 | unsigned long pc; |
| 1791 | - ret = cpu_x86_handle_mmu_fault(env, addr, is_write); | |
| 1801 | + CPUX86State *saved_env; | |
| 1802 | + | |
| 1803 | + /* XXX: hack to restore env in all cases, even if not called from | |
| 1804 | + generated code */ | |
| 1805 | + saved_env = env; | |
| 1806 | + env = cpu_single_env; | |
| 1807 | + if (is_write && page_unprotect(addr)) { | |
| 1808 | + /* nothing more to do: the page was write protected because | |
| 1809 | + there was code in it. page_unprotect() flushed the code. */ | |
| 1810 | + } | |
| 1811 | + | |
| 1812 | + ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1); | |
| 1792 | 1813 | if (ret) { |
| 1793 | - /* now we have a real cpu fault */ | |
| 1794 | - pc = (unsigned long)retaddr; | |
| 1795 | - tb = tb_find_pc(pc); | |
| 1796 | - if (tb) { | |
| 1797 | - /* the PC is inside the translated code. It means that we have | |
| 1798 | - a virtual CPU fault */ | |
| 1799 | - cpu_restore_state(tb, env, pc); | |
| 1814 | + if (retaddr) { | |
| 1815 | + /* now we have a real cpu fault */ | |
| 1816 | + pc = (unsigned long)retaddr; | |
| 1817 | + tb = tb_find_pc(pc); | |
| 1818 | + if (tb) { | |
| 1819 | + /* the PC is inside the translated code. It means that we have | |
| 1820 | + a virtual CPU fault */ | |
| 1821 | + cpu_restore_state(tb, env, pc); | |
| 1822 | + } | |
| 1800 | 1823 | } |
| 1801 | 1824 | raise_exception_err(EXCP0E_PAGE, env->error_code); |
| 1802 | 1825 | } |
| 1826 | + env = saved_env; | |
| 1803 | 1827 | } | ... | ... |
target-i386/helper2.c
| ... | ... | @@ -210,7 +210,9 @@ void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr) |
| 210 | 210 | flags = page_get_flags(addr); |
| 211 | 211 | if (flags & PAGE_VALID) { |
| 212 | 212 | virt_addr = addr & ~0xfff; |
| 213 | +#if !defined(CONFIG_SOFTMMU) | |
| 213 | 214 | munmap((void *)virt_addr, 4096); |
| 215 | +#endif | |
| 214 | 216 | page_set_flags(virt_addr, virt_addr + 4096, 0); |
| 215 | 217 | } |
| 216 | 218 | } |
| ... | ... | @@ -221,16 +223,14 @@ void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr) |
| 221 | 223 | 1 = generate PF fault |
| 222 | 224 | 2 = soft MMU activation required for this block |
| 223 | 225 | */ |
| 224 | -int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) | |
| 226 | +int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, | |
| 227 | + int is_write, int is_user, int is_softmmu) | |
| 225 | 228 | { |
| 226 | 229 | uint8_t *pde_ptr, *pte_ptr; |
| 227 | 230 | uint32_t pde, pte, virt_addr; |
| 228 | - int cpl, error_code, is_dirty, is_user, prot, page_size, ret; | |
| 231 | + int error_code, is_dirty, prot, page_size, ret; | |
| 229 | 232 | unsigned long pd; |
| 230 | 233 | |
| 231 | - cpl = env->hflags & HF_CPL_MASK; | |
| 232 | - is_user = (cpl == 3); | |
| 233 | - | |
| 234 | 234 | #ifdef DEBUG_MMU |
| 235 | 235 | printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n", |
| 236 | 236 | addr, is_write, is_user, env->eip); |
| ... | ... | @@ -252,7 +252,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) |
| 252 | 252 | |
| 253 | 253 | /* page directory entry */ |
| 254 | 254 | pde_ptr = phys_ram_base + ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)); |
| 255 | - pde = ldl(pde_ptr); | |
| 255 | + pde = ldl_raw(pde_ptr); | |
| 256 | 256 | if (!(pde & PG_PRESENT_MASK)) { |
| 257 | 257 | error_code = 0; |
| 258 | 258 | goto do_fault; |
| ... | ... | @@ -274,7 +274,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) |
| 274 | 274 | pde |= PG_ACCESSED_MASK; |
| 275 | 275 | if (is_dirty) |
| 276 | 276 | pde |= PG_DIRTY_MASK; |
| 277 | - stl(pde_ptr, pde); | |
| 277 | + stl_raw(pde_ptr, pde); | |
| 278 | 278 | } |
| 279 | 279 | |
| 280 | 280 | pte = pde & ~0x003ff000; /* align to 4MB */ |
| ... | ... | @@ -283,12 +283,12 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) |
| 283 | 283 | } else { |
| 284 | 284 | if (!(pde & PG_ACCESSED_MASK)) { |
| 285 | 285 | pde |= PG_ACCESSED_MASK; |
| 286 | - stl(pde_ptr, pde); | |
| 286 | + stl_raw(pde_ptr, pde); | |
| 287 | 287 | } |
| 288 | 288 | |
| 289 | 289 | /* page directory entry */ |
| 290 | 290 | pte_ptr = phys_ram_base + ((pde & ~0xfff) + ((addr >> 10) & 0xffc)); |
| 291 | - pte = ldl(pte_ptr); | |
| 291 | + pte = ldl_raw(pte_ptr); | |
| 292 | 292 | if (!(pte & PG_PRESENT_MASK)) { |
| 293 | 293 | error_code = 0; |
| 294 | 294 | goto do_fault; |
| ... | ... | @@ -308,7 +308,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) |
| 308 | 308 | pte |= PG_ACCESSED_MASK; |
| 309 | 309 | if (is_dirty) |
| 310 | 310 | pte |= PG_DIRTY_MASK; |
| 311 | - stl(pte_ptr, pte); | |
| 311 | + stl_raw(pte_ptr, pte); | |
| 312 | 312 | } |
| 313 | 313 | page_size = 4096; |
| 314 | 314 | virt_addr = addr & ~0xfff; |
| ... | ... | @@ -325,7 +325,10 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) |
| 325 | 325 | } |
| 326 | 326 | |
| 327 | 327 | do_mapping: |
| 328 | - if (env->hflags & HF_SOFTMMU_MASK) { | |
| 328 | +#if !defined(CONFIG_SOFTMMU) | |
| 329 | + if (is_softmmu) | |
| 330 | +#endif | |
| 331 | + { | |
| 329 | 332 | unsigned long paddr, vaddr, address, addend, page_offset; |
| 330 | 333 | int index; |
| 331 | 334 | |
| ... | ... | @@ -352,32 +355,39 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) |
| 352 | 355 | env->tlb_write[is_user][index].address = address; |
| 353 | 356 | env->tlb_write[is_user][index].addend = addend; |
| 354 | 357 | } |
| 358 | + page_set_flags(vaddr, vaddr + TARGET_PAGE_SIZE, | |
| 359 | + PAGE_VALID | PAGE_EXEC | prot); | |
| 360 | + ret = 0; | |
| 355 | 361 | } |
| 356 | - ret = 0; | |
| 357 | - /* XXX: incorrect for 4MB pages */ | |
| 358 | - pd = physpage_find(pte & ~0xfff); | |
| 359 | - if ((pd & 0xfff) != 0) { | |
| 360 | - /* IO access: no mapping is done as it will be handled by the | |
| 361 | - soft MMU */ | |
| 362 | - if (!(env->hflags & HF_SOFTMMU_MASK)) | |
| 363 | - ret = 2; | |
| 364 | - } else { | |
| 365 | - void *map_addr; | |
| 366 | - map_addr = mmap((void *)virt_addr, page_size, prot, | |
| 367 | - MAP_SHARED | MAP_FIXED, phys_ram_fd, pd); | |
| 368 | - if (map_addr == MAP_FAILED) { | |
| 369 | - fprintf(stderr, | |
| 370 | - "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", | |
| 371 | - pte & ~0xfff, virt_addr); | |
| 372 | - exit(1); | |
| 373 | - } | |
| 362 | +#if !defined(CONFIG_SOFTMMU) | |
| 363 | + else { | |
| 364 | + ret = 0; | |
| 365 | + /* XXX: incorrect for 4MB pages */ | |
| 366 | + pd = physpage_find(pte & ~0xfff); | |
| 367 | + if ((pd & 0xfff) != 0) { | |
| 368 | + /* IO access: no mapping is done as it will be handled by the | |
| 369 | + soft MMU */ | |
| 370 | + if (!(env->hflags & HF_SOFTMMU_MASK)) | |
| 371 | + ret = 2; | |
| 372 | + } else { | |
| 373 | + void *map_addr; | |
| 374 | + map_addr = mmap((void *)virt_addr, page_size, prot, | |
| 375 | + MAP_SHARED | MAP_FIXED, phys_ram_fd, pd); | |
| 376 | + if (map_addr == MAP_FAILED) { | |
| 377 | + fprintf(stderr, | |
| 378 | + "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", | |
| 379 | + pte & ~0xfff, virt_addr); | |
| 380 | + exit(1); | |
| 381 | + } | |
| 374 | 382 | #ifdef DEBUG_MMU |
| 375 | - printf("mmaping 0x%08x to virt 0x%08x pse=%d\n", | |
| 376 | - pte & ~0xfff, virt_addr, (page_size != 4096)); | |
| 383 | + printf("mmaping 0x%08x to virt 0x%08x pse=%d\n", | |
| 384 | + pte & ~0xfff, virt_addr, (page_size != 4096)); | |
| 377 | 385 | #endif |
| 378 | - page_set_flags(virt_addr, virt_addr + page_size, | |
| 379 | - PAGE_VALID | PAGE_EXEC | prot); | |
| 386 | + page_set_flags(virt_addr, virt_addr + page_size, | |
| 387 | + PAGE_VALID | PAGE_EXEC | prot); | |
| 388 | + } | |
| 380 | 389 | } |
| 390 | +#endif | |
| 381 | 391 | return ret; |
| 382 | 392 | do_fault_protect: |
| 383 | 393 | error_code = PG_ERROR_P_MASK; | ... | ... |
target-i386/op.c
| ... | ... | @@ -376,14 +376,16 @@ void OPPROTO op_andl_A0_ffff(void) |
| 376 | 376 | |
| 377 | 377 | /* memory access */ |
| 378 | 378 | |
| 379 | -#define MEMSUFFIX | |
| 379 | +#define MEMSUFFIX _raw | |
| 380 | 380 | #include "ops_mem.h" |
| 381 | 381 | |
| 382 | +#if !defined(CONFIG_USER_ONLY) | |
| 382 | 383 | #define MEMSUFFIX _user |
| 383 | 384 | #include "ops_mem.h" |
| 384 | 385 | |
| 385 | 386 | #define MEMSUFFIX _kernel |
| 386 | 387 | #include "ops_mem.h" |
| 388 | +#endif | |
| 387 | 389 | |
| 388 | 390 | /* used for bit operations */ |
| 389 | 391 | ... | ... |
target-i386/translate.c
| ... | ... | @@ -570,10 +570,10 @@ static GenOpFunc *gen_op_bsx_T0_cc[2][2] = { |
| 570 | 570 | }; |
| 571 | 571 | |
| 572 | 572 | static GenOpFunc *gen_op_lds_T0_A0[3 * 3] = { |
| 573 | - gen_op_ldsb_T0_A0, | |
| 574 | - gen_op_ldsw_T0_A0, | |
| 573 | + gen_op_ldsb_raw_T0_A0, | |
| 574 | + gen_op_ldsw_raw_T0_A0, | |
| 575 | 575 | NULL, |
| 576 | - | |
| 576 | +#ifndef CONFIG_USER_ONLY | |
| 577 | 577 | gen_op_ldsb_kernel_T0_A0, |
| 578 | 578 | gen_op_ldsw_kernel_T0_A0, |
| 579 | 579 | NULL, |
| ... | ... | @@ -581,13 +581,15 @@ static GenOpFunc *gen_op_lds_T0_A0[3 * 3] = { |
| 581 | 581 | gen_op_ldsb_user_T0_A0, |
| 582 | 582 | gen_op_ldsw_user_T0_A0, |
| 583 | 583 | NULL, |
| 584 | +#endif | |
| 584 | 585 | }; |
| 585 | 586 | |
| 586 | 587 | static GenOpFunc *gen_op_ldu_T0_A0[3 * 3] = { |
| 587 | - gen_op_ldub_T0_A0, | |
| 588 | - gen_op_lduw_T0_A0, | |
| 588 | + gen_op_ldub_raw_T0_A0, | |
| 589 | + gen_op_lduw_raw_T0_A0, | |
| 589 | 590 | NULL, |
| 590 | 591 | |
| 592 | +#ifndef CONFIG_USER_ONLY | |
| 591 | 593 | gen_op_ldub_kernel_T0_A0, |
| 592 | 594 | gen_op_lduw_kernel_T0_A0, |
| 593 | 595 | NULL, |
| ... | ... | @@ -595,14 +597,16 @@ static GenOpFunc *gen_op_ldu_T0_A0[3 * 3] = { |
| 595 | 597 | gen_op_ldub_user_T0_A0, |
| 596 | 598 | gen_op_lduw_user_T0_A0, |
| 597 | 599 | NULL, |
| 600 | +#endif | |
| 598 | 601 | }; |
| 599 | 602 | |
| 600 | 603 | /* sign does not matter, except for lidt/lgdt call (TODO: fix it) */ |
| 601 | 604 | static GenOpFunc *gen_op_ld_T0_A0[3 * 3] = { |
| 602 | - gen_op_ldub_T0_A0, | |
| 603 | - gen_op_lduw_T0_A0, | |
| 604 | - gen_op_ldl_T0_A0, | |
| 605 | + gen_op_ldub_raw_T0_A0, | |
| 606 | + gen_op_lduw_raw_T0_A0, | |
| 607 | + gen_op_ldl_raw_T0_A0, | |
| 605 | 608 | |
| 609 | +#ifndef CONFIG_USER_ONLY | |
| 606 | 610 | gen_op_ldub_kernel_T0_A0, |
| 607 | 611 | gen_op_lduw_kernel_T0_A0, |
| 608 | 612 | gen_op_ldl_kernel_T0_A0, |
| ... | ... | @@ -610,13 +614,15 @@ static GenOpFunc *gen_op_ld_T0_A0[3 * 3] = { |
| 610 | 614 | gen_op_ldub_user_T0_A0, |
| 611 | 615 | gen_op_lduw_user_T0_A0, |
| 612 | 616 | gen_op_ldl_user_T0_A0, |
| 617 | +#endif | |
| 613 | 618 | }; |
| 614 | 619 | |
| 615 | 620 | static GenOpFunc *gen_op_ld_T1_A0[3 * 3] = { |
| 616 | - gen_op_ldub_T1_A0, | |
| 617 | - gen_op_lduw_T1_A0, | |
| 618 | - gen_op_ldl_T1_A0, | |
| 621 | + gen_op_ldub_raw_T1_A0, | |
| 622 | + gen_op_lduw_raw_T1_A0, | |
| 623 | + gen_op_ldl_raw_T1_A0, | |
| 619 | 624 | |
| 625 | +#ifndef CONFIG_USER_ONLY | |
| 620 | 626 | gen_op_ldub_kernel_T1_A0, |
| 621 | 627 | gen_op_lduw_kernel_T1_A0, |
| 622 | 628 | gen_op_ldl_kernel_T1_A0, |
| ... | ... | @@ -624,13 +630,15 @@ static GenOpFunc *gen_op_ld_T1_A0[3 * 3] = { |
| 624 | 630 | gen_op_ldub_user_T1_A0, |
| 625 | 631 | gen_op_lduw_user_T1_A0, |
| 626 | 632 | gen_op_ldl_user_T1_A0, |
| 633 | +#endif | |
| 627 | 634 | }; |
| 628 | 635 | |
| 629 | 636 | static GenOpFunc *gen_op_st_T0_A0[3 * 3] = { |
| 630 | - gen_op_stb_T0_A0, | |
| 631 | - gen_op_stw_T0_A0, | |
| 632 | - gen_op_stl_T0_A0, | |
| 637 | + gen_op_stb_raw_T0_A0, | |
| 638 | + gen_op_stw_raw_T0_A0, | |
| 639 | + gen_op_stl_raw_T0_A0, | |
| 633 | 640 | |
| 641 | +#ifndef CONFIG_USER_ONLY | |
| 634 | 642 | gen_op_stb_kernel_T0_A0, |
| 635 | 643 | gen_op_stw_kernel_T0_A0, |
| 636 | 644 | gen_op_stl_kernel_T0_A0, |
| ... | ... | @@ -638,6 +646,7 @@ static GenOpFunc *gen_op_st_T0_A0[3 * 3] = { |
| 638 | 646 | gen_op_stb_user_T0_A0, |
| 639 | 647 | gen_op_stw_user_T0_A0, |
| 640 | 648 | gen_op_stl_user_T0_A0, |
| 649 | +#endif | |
| 641 | 650 | }; |
| 642 | 651 | |
| 643 | 652 | static inline void gen_string_movl_A0_ESI(DisasContext *s) |
| ... | ... | @@ -1176,7 +1185,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ |
| 1176 | 1185 | |
| 1177 | 1186 | if (base == 4) { |
| 1178 | 1187 | havesib = 1; |
| 1179 | - code = ldub(s->pc++); | |
| 1188 | + code = ldub_code(s->pc++); | |
| 1180 | 1189 | scale = (code >> 6) & 3; |
| 1181 | 1190 | index = (code >> 3) & 7; |
| 1182 | 1191 | base = code & 7; |
| ... | ... | @@ -1186,18 +1195,18 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ |
| 1186 | 1195 | case 0: |
| 1187 | 1196 | if (base == 5) { |
| 1188 | 1197 | base = -1; |
| 1189 | - disp = ldl(s->pc); | |
| 1198 | + disp = ldl_code(s->pc); | |
| 1190 | 1199 | s->pc += 4; |
| 1191 | 1200 | } else { |
| 1192 | 1201 | disp = 0; |
| 1193 | 1202 | } |
| 1194 | 1203 | break; |
| 1195 | 1204 | case 1: |
| 1196 | - disp = (int8_t)ldub(s->pc++); | |
| 1205 | + disp = (int8_t)ldub_code(s->pc++); | |
| 1197 | 1206 | break; |
| 1198 | 1207 | default: |
| 1199 | 1208 | case 2: |
| 1200 | - disp = ldl(s->pc); | |
| 1209 | + disp = ldl_code(s->pc); | |
| 1201 | 1210 | s->pc += 4; |
| 1202 | 1211 | break; |
| 1203 | 1212 | } |
| ... | ... | @@ -1229,7 +1238,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ |
| 1229 | 1238 | switch (mod) { |
| 1230 | 1239 | case 0: |
| 1231 | 1240 | if (rm == 6) { |
| 1232 | - disp = lduw(s->pc); | |
| 1241 | + disp = lduw_code(s->pc); | |
| 1233 | 1242 | s->pc += 2; |
| 1234 | 1243 | gen_op_movl_A0_im(disp); |
| 1235 | 1244 | rm = 0; /* avoid SS override */ |
| ... | ... | @@ -1239,11 +1248,11 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ |
| 1239 | 1248 | } |
| 1240 | 1249 | break; |
| 1241 | 1250 | case 1: |
| 1242 | - disp = (int8_t)ldub(s->pc++); | |
| 1251 | + disp = (int8_t)ldub_code(s->pc++); | |
| 1243 | 1252 | break; |
| 1244 | 1253 | default: |
| 1245 | 1254 | case 2: |
| 1246 | - disp = lduw(s->pc); | |
| 1255 | + disp = lduw_code(s->pc); | |
| 1247 | 1256 | s->pc += 2; |
| 1248 | 1257 | break; |
| 1249 | 1258 | } |
| ... | ... | @@ -1337,16 +1346,16 @@ static inline uint32_t insn_get(DisasContext *s, int ot) |
| 1337 | 1346 | |
| 1338 | 1347 | switch(ot) { |
| 1339 | 1348 | case OT_BYTE: |
| 1340 | - ret = ldub(s->pc); | |
| 1349 | + ret = ldub_code(s->pc); | |
| 1341 | 1350 | s->pc++; |
| 1342 | 1351 | break; |
| 1343 | 1352 | case OT_WORD: |
| 1344 | - ret = lduw(s->pc); | |
| 1353 | + ret = lduw_code(s->pc); | |
| 1345 | 1354 | s->pc += 2; |
| 1346 | 1355 | break; |
| 1347 | 1356 | default: |
| 1348 | 1357 | case OT_LONG: |
| 1349 | - ret = ldl(s->pc); | |
| 1358 | + ret = ldl_code(s->pc); | |
| 1350 | 1359 | s->pc += 4; |
| 1351 | 1360 | break; |
| 1352 | 1361 | } |
| ... | ... | @@ -1756,7 +1765,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 1756 | 1765 | dflag = s->code32; |
| 1757 | 1766 | s->override = -1; |
| 1758 | 1767 | next_byte: |
| 1759 | - b = ldub(s->pc); | |
| 1768 | + b = ldub_code(s->pc); | |
| 1760 | 1769 | s->pc++; |
| 1761 | 1770 | /* check prefixes */ |
| 1762 | 1771 | switch (b) { |
| ... | ... | @@ -1814,7 +1823,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 1814 | 1823 | case 0x0f: |
| 1815 | 1824 | /**************************/ |
| 1816 | 1825 | /* extended op code */ |
| 1817 | - b = ldub(s->pc++) | 0x100; | |
| 1826 | + b = ldub_code(s->pc++) | 0x100; | |
| 1818 | 1827 | goto reswitch; |
| 1819 | 1828 | |
| 1820 | 1829 | /**************************/ |
| ... | ... | @@ -1839,7 +1848,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 1839 | 1848 | |
| 1840 | 1849 | switch(f) { |
| 1841 | 1850 | case 0: /* OP Ev, Gv */ |
| 1842 | - modrm = ldub(s->pc++); | |
| 1851 | + modrm = ldub_code(s->pc++); | |
| 1843 | 1852 | reg = ((modrm >> 3) & 7); |
| 1844 | 1853 | mod = (modrm >> 6) & 3; |
| 1845 | 1854 | rm = modrm & 7; |
| ... | ... | @@ -1861,7 +1870,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 1861 | 1870 | gen_op(s, op, ot, opreg); |
| 1862 | 1871 | break; |
| 1863 | 1872 | case 1: /* OP Gv, Ev */ |
| 1864 | - modrm = ldub(s->pc++); | |
| 1873 | + modrm = ldub_code(s->pc++); | |
| 1865 | 1874 | mod = (modrm >> 6) & 3; |
| 1866 | 1875 | reg = ((modrm >> 3) & 7); |
| 1867 | 1876 | rm = modrm & 7; |
| ... | ... | @@ -1895,7 +1904,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 1895 | 1904 | else |
| 1896 | 1905 | ot = dflag ? OT_LONG : OT_WORD; |
| 1897 | 1906 | |
| 1898 | - modrm = ldub(s->pc++); | |
| 1907 | + modrm = ldub_code(s->pc++); | |
| 1899 | 1908 | mod = (modrm >> 6) & 3; |
| 1900 | 1909 | rm = modrm & 7; |
| 1901 | 1910 | op = (modrm >> 3) & 7; |
| ... | ... | @@ -1939,7 +1948,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 1939 | 1948 | else |
| 1940 | 1949 | ot = dflag ? OT_LONG : OT_WORD; |
| 1941 | 1950 | |
| 1942 | - modrm = ldub(s->pc++); | |
| 1951 | + modrm = ldub_code(s->pc++); | |
| 1943 | 1952 | mod = (modrm >> 6) & 3; |
| 1944 | 1953 | rm = modrm & 7; |
| 1945 | 1954 | op = (modrm >> 3) & 7; |
| ... | ... | @@ -2045,7 +2054,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2045 | 2054 | else |
| 2046 | 2055 | ot = dflag ? OT_LONG : OT_WORD; |
| 2047 | 2056 | |
| 2048 | - modrm = ldub(s->pc++); | |
| 2057 | + modrm = ldub_code(s->pc++); | |
| 2049 | 2058 | mod = (modrm >> 6) & 3; |
| 2050 | 2059 | rm = modrm & 7; |
| 2051 | 2060 | op = (modrm >> 3) & 7; |
| ... | ... | @@ -2085,10 +2094,10 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2085 | 2094 | gen_push_T0(s); |
| 2086 | 2095 | gen_eob(s); |
| 2087 | 2096 | break; |
| 2088 | - case 3: /*< lcall Ev */ | |
| 2097 | + case 3: /* lcall Ev */ | |
| 2089 | 2098 | gen_op_ld_T1_A0[ot + s->mem_index](); |
| 2090 | 2099 | gen_op_addl_A0_im(1 << (ot - OT_WORD + 1)); |
| 2091 | - gen_op_ld_T0_A0[OT_WORD + s->mem_index](); | |
| 2100 | + gen_op_ldu_T0_A0[OT_WORD + s->mem_index](); | |
| 2092 | 2101 | do_lcall: |
| 2093 | 2102 | if (s->pe && !s->vm86) { |
| 2094 | 2103 | if (s->cc_op != CC_OP_DYNAMIC) |
| ... | ... | @@ -2109,7 +2118,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2109 | 2118 | case 5: /* ljmp Ev */ |
| 2110 | 2119 | gen_op_ld_T1_A0[ot + s->mem_index](); |
| 2111 | 2120 | gen_op_addl_A0_im(1 << (ot - OT_WORD + 1)); |
| 2112 | - gen_op_lduw_T0_A0(); | |
| 2121 | + gen_op_ldu_T0_A0[OT_WORD + s->mem_index](); | |
| 2113 | 2122 | do_ljmp: |
| 2114 | 2123 | if (s->pe && !s->vm86) { |
| 2115 | 2124 | if (s->cc_op != CC_OP_DYNAMIC) |
| ... | ... | @@ -2138,7 +2147,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2138 | 2147 | else |
| 2139 | 2148 | ot = dflag ? OT_LONG : OT_WORD; |
| 2140 | 2149 | |
| 2141 | - modrm = ldub(s->pc++); | |
| 2150 | + modrm = ldub_code(s->pc++); | |
| 2142 | 2151 | mod = (modrm >> 6) & 3; |
| 2143 | 2152 | rm = modrm & 7; |
| 2144 | 2153 | reg = (modrm >> 3) & 7; |
| ... | ... | @@ -2179,7 +2188,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2179 | 2188 | case 0x69: /* imul Gv, Ev, I */ |
| 2180 | 2189 | case 0x6b: |
| 2181 | 2190 | ot = dflag ? OT_LONG : OT_WORD; |
| 2182 | - modrm = ldub(s->pc++); | |
| 2191 | + modrm = ldub_code(s->pc++); | |
| 2183 | 2192 | reg = ((modrm >> 3) & 7) + OR_EAX; |
| 2184 | 2193 | gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); |
| 2185 | 2194 | if (b == 0x69) { |
| ... | ... | @@ -2206,7 +2215,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2206 | 2215 | ot = OT_BYTE; |
| 2207 | 2216 | else |
| 2208 | 2217 | ot = dflag ? OT_LONG : OT_WORD; |
| 2209 | - modrm = ldub(s->pc++); | |
| 2218 | + modrm = ldub_code(s->pc++); | |
| 2210 | 2219 | reg = (modrm >> 3) & 7; |
| 2211 | 2220 | mod = (modrm >> 6) & 3; |
| 2212 | 2221 | if (mod == 3) { |
| ... | ... | @@ -2233,7 +2242,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2233 | 2242 | ot = OT_BYTE; |
| 2234 | 2243 | else |
| 2235 | 2244 | ot = dflag ? OT_LONG : OT_WORD; |
| 2236 | - modrm = ldub(s->pc++); | |
| 2245 | + modrm = ldub_code(s->pc++); | |
| 2237 | 2246 | reg = (modrm >> 3) & 7; |
| 2238 | 2247 | mod = (modrm >> 6) & 3; |
| 2239 | 2248 | gen_op_mov_TN_reg[ot][1][reg](); |
| ... | ... | @@ -2250,7 +2259,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2250 | 2259 | s->cc_op = CC_OP_SUBB + ot; |
| 2251 | 2260 | break; |
| 2252 | 2261 | case 0x1c7: /* cmpxchg8b */ |
| 2253 | - modrm = ldub(s->pc++); | |
| 2262 | + modrm = ldub_code(s->pc++); | |
| 2254 | 2263 | mod = (modrm >> 6) & 3; |
| 2255 | 2264 | if (mod == 3) |
| 2256 | 2265 | goto illegal_op; |
| ... | ... | @@ -2291,7 +2300,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2291 | 2300 | break; |
| 2292 | 2301 | case 0x8f: /* pop Ev */ |
| 2293 | 2302 | ot = dflag ? OT_LONG : OT_WORD; |
| 2294 | - modrm = ldub(s->pc++); | |
| 2303 | + modrm = ldub_code(s->pc++); | |
| 2295 | 2304 | gen_pop_T0(s); |
| 2296 | 2305 | s->popl_esp_hack = 2 << dflag; |
| 2297 | 2306 | gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1); |
| ... | ... | @@ -2301,9 +2310,9 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2301 | 2310 | case 0xc8: /* enter */ |
| 2302 | 2311 | { |
| 2303 | 2312 | int level; |
| 2304 | - val = lduw(s->pc); | |
| 2313 | + val = lduw_code(s->pc); | |
| 2305 | 2314 | s->pc += 2; |
| 2306 | - level = ldub(s->pc++); | |
| 2315 | + level = ldub_code(s->pc++); | |
| 2307 | 2316 | gen_enter(s, val, level); |
| 2308 | 2317 | } |
| 2309 | 2318 | break; |
| ... | ... | @@ -2369,7 +2378,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2369 | 2378 | ot = OT_BYTE; |
| 2370 | 2379 | else |
| 2371 | 2380 | ot = dflag ? OT_LONG : OT_WORD; |
| 2372 | - modrm = ldub(s->pc++); | |
| 2381 | + modrm = ldub_code(s->pc++); | |
| 2373 | 2382 | reg = (modrm >> 3) & 7; |
| 2374 | 2383 | |
| 2375 | 2384 | /* generate a generic store */ |
| ... | ... | @@ -2381,7 +2390,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2381 | 2390 | ot = OT_BYTE; |
| 2382 | 2391 | else |
| 2383 | 2392 | ot = dflag ? OT_LONG : OT_WORD; |
| 2384 | - modrm = ldub(s->pc++); | |
| 2393 | + modrm = ldub_code(s->pc++); | |
| 2385 | 2394 | mod = (modrm >> 6) & 3; |
| 2386 | 2395 | if (mod != 3) |
| 2387 | 2396 | gen_lea_modrm(s, modrm, ®_addr, &offset_addr); |
| ... | ... | @@ -2398,14 +2407,14 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2398 | 2407 | ot = OT_BYTE; |
| 2399 | 2408 | else |
| 2400 | 2409 | ot = dflag ? OT_LONG : OT_WORD; |
| 2401 | - modrm = ldub(s->pc++); | |
| 2410 | + modrm = ldub_code(s->pc++); | |
| 2402 | 2411 | reg = (modrm >> 3) & 7; |
| 2403 | 2412 | |
| 2404 | 2413 | gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); |
| 2405 | 2414 | gen_op_mov_reg_T0[ot][reg](); |
| 2406 | 2415 | break; |
| 2407 | 2416 | case 0x8e: /* mov seg, Gv */ |
| 2408 | - modrm = ldub(s->pc++); | |
| 2417 | + modrm = ldub_code(s->pc++); | |
| 2409 | 2418 | reg = (modrm >> 3) & 7; |
| 2410 | 2419 | if (reg >= 6 || reg == R_CS) |
| 2411 | 2420 | goto illegal_op; |
| ... | ... | @@ -2422,7 +2431,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2422 | 2431 | } |
| 2423 | 2432 | break; |
| 2424 | 2433 | case 0x8c: /* mov Gv, seg */ |
| 2425 | - modrm = ldub(s->pc++); | |
| 2434 | + modrm = ldub_code(s->pc++); | |
| 2426 | 2435 | reg = (modrm >> 3) & 7; |
| 2427 | 2436 | mod = (modrm >> 6) & 3; |
| 2428 | 2437 | if (reg >= 6) |
| ... | ... | @@ -2444,7 +2453,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2444 | 2453 | d_ot = dflag + OT_WORD; |
| 2445 | 2454 | /* ot is the size of source */ |
| 2446 | 2455 | ot = (b & 1) + OT_BYTE; |
| 2447 | - modrm = ldub(s->pc++); | |
| 2456 | + modrm = ldub_code(s->pc++); | |
| 2448 | 2457 | reg = ((modrm >> 3) & 7) + OR_EAX; |
| 2449 | 2458 | mod = (modrm >> 6) & 3; |
| 2450 | 2459 | rm = modrm & 7; |
| ... | ... | @@ -2481,7 +2490,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2481 | 2490 | |
| 2482 | 2491 | case 0x8d: /* lea */ |
| 2483 | 2492 | ot = dflag ? OT_LONG : OT_WORD; |
| 2484 | - modrm = ldub(s->pc++); | |
| 2493 | + modrm = ldub_code(s->pc++); | |
| 2485 | 2494 | reg = (modrm >> 3) & 7; |
| 2486 | 2495 | /* we must ensure that no segment is added */ |
| 2487 | 2496 | s->override = -1; |
| ... | ... | @@ -2574,7 +2583,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2574 | 2583 | ot = OT_BYTE; |
| 2575 | 2584 | else |
| 2576 | 2585 | ot = dflag ? OT_LONG : OT_WORD; |
| 2577 | - modrm = ldub(s->pc++); | |
| 2586 | + modrm = ldub_code(s->pc++); | |
| 2578 | 2587 | reg = (modrm >> 3) & 7; |
| 2579 | 2588 | mod = (modrm >> 6) & 3; |
| 2580 | 2589 | if (mod == 3) { |
| ... | ... | @@ -2613,7 +2622,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2613 | 2622 | op = R_GS; |
| 2614 | 2623 | do_lxx: |
| 2615 | 2624 | ot = dflag ? OT_LONG : OT_WORD; |
| 2616 | - modrm = ldub(s->pc++); | |
| 2625 | + modrm = ldub_code(s->pc++); | |
| 2617 | 2626 | reg = (modrm >> 3) & 7; |
| 2618 | 2627 | mod = (modrm >> 6) & 3; |
| 2619 | 2628 | if (mod == 3) |
| ... | ... | @@ -2622,7 +2631,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2622 | 2631 | gen_op_ld_T1_A0[ot + s->mem_index](); |
| 2623 | 2632 | gen_op_addl_A0_im(1 << (ot - OT_WORD + 1)); |
| 2624 | 2633 | /* load the segment first to handle exceptions properly */ |
| 2625 | - gen_op_lduw_T0_A0(); | |
| 2634 | + gen_op_ldu_T0_A0[OT_WORD + s->mem_index](); | |
| 2626 | 2635 | gen_movl_seg_T0(s, op, pc_start - s->cs_base); |
| 2627 | 2636 | /* then put the data */ |
| 2628 | 2637 | gen_op_mov_reg_T1[ot][reg](); |
| ... | ... | @@ -2645,7 +2654,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2645 | 2654 | else |
| 2646 | 2655 | ot = dflag ? OT_LONG : OT_WORD; |
| 2647 | 2656 | |
| 2648 | - modrm = ldub(s->pc++); | |
| 2657 | + modrm = ldub_code(s->pc++); | |
| 2649 | 2658 | mod = (modrm >> 6) & 3; |
| 2650 | 2659 | rm = modrm & 7; |
| 2651 | 2660 | op = (modrm >> 3) & 7; |
| ... | ... | @@ -2662,7 +2671,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2662 | 2671 | gen_shift(s, op, ot, opreg, OR_ECX); |
| 2663 | 2672 | } else { |
| 2664 | 2673 | if (shift == 2) { |
| 2665 | - shift = ldub(s->pc++); | |
| 2674 | + shift = ldub_code(s->pc++); | |
| 2666 | 2675 | } |
| 2667 | 2676 | gen_shifti(s, op, ot, opreg, shift); |
| 2668 | 2677 | } |
| ... | ... | @@ -2696,7 +2705,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2696 | 2705 | shift = 0; |
| 2697 | 2706 | do_shiftd: |
| 2698 | 2707 | ot = dflag ? OT_LONG : OT_WORD; |
| 2699 | - modrm = ldub(s->pc++); | |
| 2708 | + modrm = ldub_code(s->pc++); | |
| 2700 | 2709 | mod = (modrm >> 6) & 3; |
| 2701 | 2710 | rm = modrm & 7; |
| 2702 | 2711 | reg = (modrm >> 3) & 7; |
| ... | ... | @@ -2710,7 +2719,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2710 | 2719 | gen_op_mov_TN_reg[ot][1][reg](); |
| 2711 | 2720 | |
| 2712 | 2721 | if (shift) { |
| 2713 | - val = ldub(s->pc++); | |
| 2722 | + val = ldub_code(s->pc++); | |
| 2714 | 2723 | val &= 0x1f; |
| 2715 | 2724 | if (val) { |
| 2716 | 2725 | if (mod == 3) |
| ... | ... | @@ -2739,7 +2748,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 2739 | 2748 | /************************/ |
| 2740 | 2749 | /* floats */ |
| 2741 | 2750 | case 0xd8 ... 0xdf: |
| 2742 | - modrm = ldub(s->pc++); | |
| 2751 | + modrm = ldub_code(s->pc++); | |
| 2743 | 2752 | mod = (modrm >> 6) & 3; |
| 2744 | 2753 | rm = modrm & 7; |
| 2745 | 2754 | op = ((b & 7) << 3) | ((modrm >> 3) & 7); |
| ... | ... | @@ -3256,7 +3265,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3256 | 3265 | ot = OT_BYTE; |
| 3257 | 3266 | else |
| 3258 | 3267 | ot = dflag ? OT_LONG : OT_WORD; |
| 3259 | - val = ldub(s->pc++); | |
| 3268 | + val = ldub_code(s->pc++); | |
| 3260 | 3269 | gen_op_movl_T0_im(val); |
| 3261 | 3270 | gen_op_in[ot](); |
| 3262 | 3271 | gen_op_mov_reg_T1[ot][R_EAX](); |
| ... | ... | @@ -3271,7 +3280,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3271 | 3280 | ot = OT_BYTE; |
| 3272 | 3281 | else |
| 3273 | 3282 | ot = dflag ? OT_LONG : OT_WORD; |
| 3274 | - val = ldub(s->pc++); | |
| 3283 | + val = ldub_code(s->pc++); | |
| 3275 | 3284 | gen_op_movl_T0_im(val); |
| 3276 | 3285 | gen_op_mov_TN_reg[ot][1][R_EAX](); |
| 3277 | 3286 | gen_op_out[ot](); |
| ... | ... | @@ -3309,7 +3318,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3309 | 3318 | /************************/ |
| 3310 | 3319 | /* control */ |
| 3311 | 3320 | case 0xc2: /* ret im */ |
| 3312 | - val = ldsw(s->pc); | |
| 3321 | + val = ldsw_code(s->pc); | |
| 3313 | 3322 | s->pc += 2; |
| 3314 | 3323 | gen_pop_T0(s); |
| 3315 | 3324 | gen_stack_update(s, val + (2 << s->dflag)); |
| ... | ... | @@ -3327,7 +3336,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3327 | 3336 | gen_eob(s); |
| 3328 | 3337 | break; |
| 3329 | 3338 | case 0xca: /* lret im */ |
| 3330 | - val = ldsw(s->pc); | |
| 3339 | + val = ldsw_code(s->pc); | |
| 3331 | 3340 | s->pc += 2; |
| 3332 | 3341 | do_lret: |
| 3333 | 3342 | if (s->pe && !s->vm86) { |
| ... | ... | @@ -3443,13 +3452,13 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3443 | 3452 | break; |
| 3444 | 3453 | |
| 3445 | 3454 | case 0x190 ... 0x19f: /* setcc Gv */ |
| 3446 | - modrm = ldub(s->pc++); | |
| 3455 | + modrm = ldub_code(s->pc++); | |
| 3447 | 3456 | gen_setcc(s, b); |
| 3448 | 3457 | gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1); |
| 3449 | 3458 | break; |
| 3450 | 3459 | case 0x140 ... 0x14f: /* cmov Gv, Ev */ |
| 3451 | 3460 | ot = dflag ? OT_LONG : OT_WORD; |
| 3452 | - modrm = ldub(s->pc++); | |
| 3461 | + modrm = ldub_code(s->pc++); | |
| 3453 | 3462 | reg = (modrm >> 3) & 7; |
| 3454 | 3463 | mod = (modrm >> 6) & 3; |
| 3455 | 3464 | gen_setcc(s, b); |
| ... | ... | @@ -3542,7 +3551,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3542 | 3551 | /* bit operations */ |
| 3543 | 3552 | case 0x1ba: /* bt/bts/btr/btc Gv, im */ |
| 3544 | 3553 | ot = dflag ? OT_LONG : OT_WORD; |
| 3545 | - modrm = ldub(s->pc++); | |
| 3554 | + modrm = ldub_code(s->pc++); | |
| 3546 | 3555 | op = (modrm >> 3) & 7; |
| 3547 | 3556 | mod = (modrm >> 6) & 3; |
| 3548 | 3557 | rm = modrm & 7; |
| ... | ... | @@ -3553,7 +3562,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3553 | 3562 | gen_op_mov_TN_reg[ot][0][rm](); |
| 3554 | 3563 | } |
| 3555 | 3564 | /* load shift */ |
| 3556 | - val = ldub(s->pc++); | |
| 3565 | + val = ldub_code(s->pc++); | |
| 3557 | 3566 | gen_op_movl_T1_im(val); |
| 3558 | 3567 | if (op < 4) |
| 3559 | 3568 | goto illegal_op; |
| ... | ... | @@ -3581,7 +3590,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3581 | 3590 | op = 3; |
| 3582 | 3591 | do_btx: |
| 3583 | 3592 | ot = dflag ? OT_LONG : OT_WORD; |
| 3584 | - modrm = ldub(s->pc++); | |
| 3593 | + modrm = ldub_code(s->pc++); | |
| 3585 | 3594 | reg = (modrm >> 3) & 7; |
| 3586 | 3595 | mod = (modrm >> 6) & 3; |
| 3587 | 3596 | rm = modrm & 7; |
| ... | ... | @@ -3610,7 +3619,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3610 | 3619 | case 0x1bc: /* bsf */ |
| 3611 | 3620 | case 0x1bd: /* bsr */ |
| 3612 | 3621 | ot = dflag ? OT_LONG : OT_WORD; |
| 3613 | - modrm = ldub(s->pc++); | |
| 3622 | + modrm = ldub_code(s->pc++); | |
| 3614 | 3623 | reg = (modrm >> 3) & 7; |
| 3615 | 3624 | gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); |
| 3616 | 3625 | gen_op_bsx_T0_cc[ot - OT_WORD][b & 1](); |
| ... | ... | @@ -3646,12 +3655,12 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3646 | 3655 | s->cc_op = CC_OP_EFLAGS; |
| 3647 | 3656 | break; |
| 3648 | 3657 | case 0xd4: /* aam */ |
| 3649 | - val = ldub(s->pc++); | |
| 3658 | + val = ldub_code(s->pc++); | |
| 3650 | 3659 | gen_op_aam(val); |
| 3651 | 3660 | s->cc_op = CC_OP_LOGICB; |
| 3652 | 3661 | break; |
| 3653 | 3662 | case 0xd5: /* aad */ |
| 3654 | - val = ldub(s->pc++); | |
| 3663 | + val = ldub_code(s->pc++); | |
| 3655 | 3664 | gen_op_aad(val); |
| 3656 | 3665 | s->cc_op = CC_OP_LOGICB; |
| 3657 | 3666 | break; |
| ... | ... | @@ -3665,7 +3674,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3665 | 3674 | gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); |
| 3666 | 3675 | break; |
| 3667 | 3676 | case 0xcd: /* int N */ |
| 3668 | - val = ldub(s->pc++); | |
| 3677 | + val = ldub_code(s->pc++); | |
| 3669 | 3678 | /* XXX: add error code for vm86 GPF */ |
| 3670 | 3679 | if (!s->vm86) |
| 3671 | 3680 | gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); |
| ... | ... | @@ -3718,7 +3727,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3718 | 3727 | break; |
| 3719 | 3728 | case 0x62: /* bound */ |
| 3720 | 3729 | ot = dflag ? OT_LONG : OT_WORD; |
| 3721 | - modrm = ldub(s->pc++); | |
| 3730 | + modrm = ldub_code(s->pc++); | |
| 3722 | 3731 | reg = (modrm >> 3) & 7; |
| 3723 | 3732 | mod = (modrm >> 6) & 3; |
| 3724 | 3733 | if (mod == 3) |
| ... | ... | @@ -3785,7 +3794,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3785 | 3794 | } |
| 3786 | 3795 | break; |
| 3787 | 3796 | case 0x100: |
| 3788 | - modrm = ldub(s->pc++); | |
| 3797 | + modrm = ldub_code(s->pc++); | |
| 3789 | 3798 | mod = (modrm >> 6) & 3; |
| 3790 | 3799 | op = (modrm >> 3) & 7; |
| 3791 | 3800 | switch(op) { |
| ... | ... | @@ -3828,7 +3837,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3828 | 3837 | } |
| 3829 | 3838 | break; |
| 3830 | 3839 | case 0x101: |
| 3831 | - modrm = ldub(s->pc++); | |
| 3840 | + modrm = ldub_code(s->pc++); | |
| 3832 | 3841 | mod = (modrm >> 6) & 3; |
| 3833 | 3842 | op = (modrm >> 3) & 7; |
| 3834 | 3843 | switch(op) { |
| ... | ... | @@ -3904,7 +3913,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3904 | 3913 | if (!s->pe || s->vm86) |
| 3905 | 3914 | goto illegal_op; |
| 3906 | 3915 | ot = dflag ? OT_LONG : OT_WORD; |
| 3907 | - modrm = ldub(s->pc++); | |
| 3916 | + modrm = ldub_code(s->pc++); | |
| 3908 | 3917 | reg = (modrm >> 3) & 7; |
| 3909 | 3918 | gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); |
| 3910 | 3919 | gen_op_mov_TN_reg[ot][1][reg](); |
| ... | ... | @@ -3918,7 +3927,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3918 | 3927 | gen_op_mov_reg_T1[ot][reg](); |
| 3919 | 3928 | break; |
| 3920 | 3929 | case 0x118: |
| 3921 | - modrm = ldub(s->pc++); | |
| 3930 | + modrm = ldub_code(s->pc++); | |
| 3922 | 3931 | mod = (modrm >> 6) & 3; |
| 3923 | 3932 | op = (modrm >> 3) & 7; |
| 3924 | 3933 | switch(op) { |
| ... | ... | @@ -3940,7 +3949,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3940 | 3949 | if (s->cpl != 0) { |
| 3941 | 3950 | gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
| 3942 | 3951 | } else { |
| 3943 | - modrm = ldub(s->pc++); | |
| 3952 | + modrm = ldub_code(s->pc++); | |
| 3944 | 3953 | if ((modrm & 0xc0) != 0xc0) |
| 3945 | 3954 | goto illegal_op; |
| 3946 | 3955 | rm = modrm & 7; |
| ... | ... | @@ -3970,7 +3979,7 @@ static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start) |
| 3970 | 3979 | if (s->cpl != 0) { |
| 3971 | 3980 | gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
| 3972 | 3981 | } else { |
| 3973 | - modrm = ldub(s->pc++); | |
| 3982 | + modrm = ldub_code(s->pc++); | |
| 3974 | 3983 | if ((modrm & 0xc0) != 0xc0) |
| 3975 | 3984 | goto illegal_op; |
| 3976 | 3985 | rm = modrm & 7; | ... | ... |