Commit 3f3373166227b13e762e20d2fb51eadfa6a2d653
1 parent
d05e66d2
pop ss, mov ss, x and sti disable irqs for the next instruction - began dispatch…
… optimization by adding new x86 cpu 'hidden' flags git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@372 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
8 changed files
with
109 additions
and
58 deletions
cpu-exec.c
| @@ -186,7 +186,8 @@ int cpu_exec(CPUState *env1) | @@ -186,7 +186,8 @@ int cpu_exec(CPUState *env1) | ||
| 186 | #if defined(TARGET_I386) | 186 | #if defined(TARGET_I386) |
| 187 | /* if hardware interrupt pending, we execute it */ | 187 | /* if hardware interrupt pending, we execute it */ |
| 188 | if ((interrupt_request & CPU_INTERRUPT_HARD) && | 188 | if ((interrupt_request & CPU_INTERRUPT_HARD) && |
| 189 | - (env->eflags & IF_MASK)) { | 189 | + (env->eflags & IF_MASK) && |
| 190 | + !(env->hflags & HF_INHIBIT_IRQ_MASK)) { | ||
| 190 | int intno; | 191 | int intno; |
| 191 | intno = cpu_x86_get_pic_interrupt(env); | 192 | intno = cpu_x86_get_pic_interrupt(env); |
| 192 | if (loglevel) { | 193 | if (loglevel) { |
| @@ -233,21 +234,20 @@ int cpu_exec(CPUState *env1) | @@ -233,21 +234,20 @@ int cpu_exec(CPUState *env1) | ||
| 233 | #endif | 234 | #endif |
| 234 | } | 235 | } |
| 235 | #endif | 236 | #endif |
| 236 | - /* we compute the CPU state. We assume it will not | ||
| 237 | - change during the whole generated block. */ | 237 | + /* we record a subset of the CPU state. It will |
| 238 | + always be the same before a given translated block | ||
| 239 | + is executed. */ | ||
| 238 | #if defined(TARGET_I386) | 240 | #if defined(TARGET_I386) |
| 239 | flags = (env->segs[R_CS].flags & DESC_B_MASK) | 241 | flags = (env->segs[R_CS].flags & DESC_B_MASK) |
| 240 | - >> (DESC_B_SHIFT - GEN_FLAG_CODE32_SHIFT); | 242 | + >> (DESC_B_SHIFT - HF_CS32_SHIFT); |
| 241 | flags |= (env->segs[R_SS].flags & DESC_B_MASK) | 243 | flags |= (env->segs[R_SS].flags & DESC_B_MASK) |
| 242 | - >> (DESC_B_SHIFT - GEN_FLAG_SS32_SHIFT); | 244 | + >> (DESC_B_SHIFT - HF_SS32_SHIFT); |
| 243 | flags |= (((unsigned long)env->segs[R_DS].base | | 245 | flags |= (((unsigned long)env->segs[R_DS].base | |
| 244 | (unsigned long)env->segs[R_ES].base | | 246 | (unsigned long)env->segs[R_ES].base | |
| 245 | (unsigned long)env->segs[R_SS].base) != 0) << | 247 | (unsigned long)env->segs[R_SS].base) != 0) << |
| 246 | - GEN_FLAG_ADDSEG_SHIFT; | ||
| 247 | - flags |= env->cpl << GEN_FLAG_CPL_SHIFT; | ||
| 248 | - flags |= env->soft_mmu << GEN_FLAG_SOFT_MMU_SHIFT; | ||
| 249 | - flags |= (env->eflags & VM_MASK) >> (17 - GEN_FLAG_VM_SHIFT); | ||
| 250 | - flags |= (env->eflags & (IOPL_MASK | TF_MASK)); | 248 | + HF_ADDSEG_SHIFT; |
| 249 | + flags |= env->hflags; | ||
| 250 | + flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); | ||
| 251 | cs_base = env->segs[R_CS].base; | 251 | cs_base = env->segs[R_CS].base; |
| 252 | pc = cs_base + env->eip; | 252 | pc = cs_base + env->eip; |
| 253 | #elif defined(TARGET_ARM) | 253 | #elif defined(TARGET_ARM) |
| @@ -337,8 +337,8 @@ int cpu_exec(CPUState *env1) | @@ -337,8 +337,8 @@ int cpu_exec(CPUState *env1) | ||
| 337 | /* reset soft MMU for next block (it can currently | 337 | /* reset soft MMU for next block (it can currently |
| 338 | only be set by a memory fault) */ | 338 | only be set by a memory fault) */ |
| 339 | #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU) | 339 | #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU) |
| 340 | - if (env->soft_mmu) { | ||
| 341 | - env->soft_mmu = 0; | 340 | + if (env->hflags & HF_SOFTMMU_MASK) { |
| 341 | + env->hflags &= ~HF_SOFTMMU_MASK; | ||
| 342 | /* do not allow linking to another block */ | 342 | /* do not allow linking to another block */ |
| 343 | T0 = 0; | 343 | T0 = 0; |
| 344 | } | 344 | } |
| @@ -499,7 +499,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address, | @@ -499,7 +499,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address, | ||
| 499 | raise_exception_err(EXCP0E_PAGE, env->error_code); | 499 | raise_exception_err(EXCP0E_PAGE, env->error_code); |
| 500 | } else { | 500 | } else { |
| 501 | /* activate soft MMU for this block */ | 501 | /* activate soft MMU for this block */ |
| 502 | - env->soft_mmu = 1; | 502 | + env->hflags |= HF_SOFTMMU_MASK; |
| 503 | sigprocmask(SIG_SETMASK, old_set, NULL); | 503 | sigprocmask(SIG_SETMASK, old_set, NULL); |
| 504 | cpu_loop_exit(); | 504 | cpu_loop_exit(); |
| 505 | } | 505 | } |
cpu-i386.h
| @@ -73,6 +73,10 @@ | @@ -73,6 +73,10 @@ | ||
| 73 | #define CC_S 0x0080 | 73 | #define CC_S 0x0080 |
| 74 | #define CC_O 0x0800 | 74 | #define CC_O 0x0800 |
| 75 | 75 | ||
| 76 | +#define TF_SHIFT 8 | ||
| 77 | +#define IOPL_SHIFT 12 | ||
| 78 | +#define VM_SHIFT 17 | ||
| 79 | + | ||
| 76 | #define TF_MASK 0x00000100 | 80 | #define TF_MASK 0x00000100 |
| 77 | #define IF_MASK 0x00000200 | 81 | #define IF_MASK 0x00000200 |
| 78 | #define DF_MASK 0x00000400 | 82 | #define DF_MASK 0x00000400 |
| @@ -85,6 +89,29 @@ | @@ -85,6 +89,29 @@ | ||
| 85 | #define VIP_MASK 0x00100000 | 89 | #define VIP_MASK 0x00100000 |
| 86 | #define ID_MASK 0x00200000 | 90 | #define ID_MASK 0x00200000 |
| 87 | 91 | ||
| 92 | +/* hidden flags - used internally by qemu to represent additionnal cpu | ||
| 93 | + states. Only the CPL and INHIBIT_IRQ are not redundant. We avoid | ||
| 94 | + using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring | ||
| 95 | + with eflags. */ | ||
| 96 | +/* current cpl */ | ||
| 97 | +#define HF_CPL_SHIFT 0 | ||
| 98 | +/* true if soft mmu is being used */ | ||
| 99 | +#define HF_SOFTMMU_SHIFT 2 | ||
| 100 | +/* true if hardware interrupts must be disabled for next instruction */ | ||
| 101 | +#define HF_INHIBIT_IRQ_SHIFT 3 | ||
| 102 | +/* 16 or 32 segments */ | ||
| 103 | +#define HF_CS32_SHIFT 4 | ||
| 104 | +#define HF_SS32_SHIFT 5 | ||
| 105 | +/* zero base for DS, ES and SS */ | ||
| 106 | +#define HF_ADDSEG_SHIFT 6 | ||
| 107 | + | ||
| 108 | +#define HF_CPL_MASK (3 << HF_CPL_SHIFT) | ||
| 109 | +#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT) | ||
| 110 | +#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) | ||
| 111 | +#define HF_CS32_MASK (1 << HF_CS32_SHIFT) | ||
| 112 | +#define HF_SS32_MASK (1 << HF_CS32_SHIFT) | ||
| 113 | +#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) | ||
| 114 | + | ||
| 88 | #define CR0_PE_MASK (1 << 0) | 115 | #define CR0_PE_MASK (1 << 0) |
| 89 | #define CR0_TS_MASK (1 << 3) | 116 | #define CR0_TS_MASK (1 << 3) |
| 90 | #define CR0_WP_MASK (1 << 16) | 117 | #define CR0_WP_MASK (1 << 16) |
| @@ -226,6 +253,7 @@ typedef struct CPUX86State { | @@ -226,6 +253,7 @@ typedef struct CPUX86State { | ||
| 226 | uint32_t cc_dst; | 253 | uint32_t cc_dst; |
| 227 | uint32_t cc_op; | 254 | uint32_t cc_op; |
| 228 | int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ | 255 | int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ |
| 256 | + uint32_t hflags; /* hidden flags, see HF_xxx constants */ | ||
| 229 | 257 | ||
| 230 | /* FPU state */ | 258 | /* FPU state */ |
| 231 | unsigned int fpstt; /* top of stack index */ | 259 | unsigned int fpstt; /* top of stack index */ |
| @@ -249,8 +277,6 @@ typedef struct CPUX86State { | @@ -249,8 +277,6 @@ typedef struct CPUX86State { | ||
| 249 | SegmentCache tr; | 277 | SegmentCache tr; |
| 250 | SegmentCache gdt; /* only base and limit are used */ | 278 | SegmentCache gdt; /* only base and limit are used */ |
| 251 | SegmentCache idt; /* only base and limit are used */ | 279 | SegmentCache idt; /* only base and limit are used */ |
| 252 | - int cpl; /* current cpl */ | ||
| 253 | - int soft_mmu; /* TRUE if soft mmu is being used */ | ||
| 254 | 280 | ||
| 255 | /* sysenter registers */ | 281 | /* sysenter registers */ |
| 256 | uint32_t sysenter_cs; | 282 | uint32_t sysenter_cs; |
| @@ -303,7 +329,11 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); | @@ -303,7 +329,11 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); | ||
| 303 | /* wrapper, just in case memory mappings must be changed */ | 329 | /* wrapper, just in case memory mappings must be changed */ |
| 304 | static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) | 330 | static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) |
| 305 | { | 331 | { |
| 306 | - s->cpl = cpl; | 332 | +#if HF_CPL_MASK == 3 |
| 333 | + s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl; | ||
| 334 | +#else | ||
| 335 | +#error HF_CPL_MASK is hardcoded | ||
| 336 | +#endif | ||
| 307 | } | 337 | } |
| 308 | 338 | ||
| 309 | /* simulate fsave/frstor */ | 339 | /* simulate fsave/frstor */ |
exec.h
| @@ -61,16 +61,6 @@ extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; | @@ -61,16 +61,6 @@ extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; | ||
| 61 | 61 | ||
| 62 | #if defined(TARGET_I386) | 62 | #if defined(TARGET_I386) |
| 63 | 63 | ||
| 64 | -#define GEN_FLAG_CODE32_SHIFT 0 | ||
| 65 | -#define GEN_FLAG_ADDSEG_SHIFT 1 | ||
| 66 | -#define GEN_FLAG_SS32_SHIFT 2 | ||
| 67 | -#define GEN_FLAG_VM_SHIFT 3 | ||
| 68 | -#define GEN_FLAG_ST_SHIFT 4 | ||
| 69 | -#define GEN_FLAG_TF_SHIFT 8 /* same position as eflags */ | ||
| 70 | -#define GEN_FLAG_CPL_SHIFT 9 | ||
| 71 | -#define GEN_FLAG_SOFT_MMU_SHIFT 11 | ||
| 72 | -#define GEN_FLAG_IOPL_SHIFT 12 /* same position as eflags */ | ||
| 73 | - | ||
| 74 | void optimize_flags_init(void); | 64 | void optimize_flags_init(void); |
| 75 | 65 | ||
| 76 | #endif | 66 | #endif |
helper-i386.c
| @@ -189,7 +189,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | @@ -189,7 +189,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | ||
| 189 | { | 189 | { |
| 190 | SegmentCache *dt; | 190 | SegmentCache *dt; |
| 191 | uint8_t *ptr, *ssp; | 191 | uint8_t *ptr, *ssp; |
| 192 | - int type, dpl, selector, ss_dpl; | 192 | + int type, dpl, selector, ss_dpl, cpl; |
| 193 | int has_error_code, new_stack, shift; | 193 | int has_error_code, new_stack, shift; |
| 194 | uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size; | 194 | uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size; |
| 195 | uint32_t old_cs, old_ss, old_esp, old_eip; | 195 | uint32_t old_cs, old_ss, old_esp, old_eip; |
| @@ -216,8 +216,9 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | @@ -216,8 +216,9 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | ||
| 216 | break; | 216 | break; |
| 217 | } | 217 | } |
| 218 | dpl = (e2 >> DESC_DPL_SHIFT) & 3; | 218 | dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
| 219 | + cpl = env->hflags & HF_CPL_MASK; | ||
| 219 | /* check privledge if software int */ | 220 | /* check privledge if software int */ |
| 220 | - if (is_int && dpl < env->cpl) | 221 | + if (is_int && dpl < cpl) |
| 221 | raise_exception_err(EXCP0D_GPF, intno * 8 + 2); | 222 | raise_exception_err(EXCP0D_GPF, intno * 8 + 2); |
| 222 | /* check valid bit */ | 223 | /* check valid bit */ |
| 223 | if (!(e2 & DESC_P_MASK)) | 224 | if (!(e2 & DESC_P_MASK)) |
| @@ -232,11 +233,11 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | @@ -232,11 +233,11 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | ||
| 232 | if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) | 233 | if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) |
| 233 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | 234 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 234 | dpl = (e2 >> DESC_DPL_SHIFT) & 3; | 235 | dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
| 235 | - if (dpl > env->cpl) | 236 | + if (dpl > cpl) |
| 236 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | 237 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 237 | if (!(e2 & DESC_P_MASK)) | 238 | if (!(e2 & DESC_P_MASK)) |
| 238 | raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); | 239 | raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
| 239 | - if (!(e2 & DESC_C_MASK) && dpl < env->cpl) { | 240 | + if (!(e2 & DESC_C_MASK) && dpl < cpl) { |
| 240 | /* to inner priviledge */ | 241 | /* to inner priviledge */ |
| 241 | get_ss_esp_from_tss(&ss, &esp, dpl); | 242 | get_ss_esp_from_tss(&ss, &esp, dpl); |
| 242 | if ((ss & 0xfffc) == 0) | 243 | if ((ss & 0xfffc) == 0) |
| @@ -255,7 +256,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | @@ -255,7 +256,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | ||
| 255 | if (!(ss_e2 & DESC_P_MASK)) | 256 | if (!(ss_e2 & DESC_P_MASK)) |
| 256 | raise_exception_err(EXCP0A_TSS, ss & 0xfffc); | 257 | raise_exception_err(EXCP0A_TSS, ss & 0xfffc); |
| 257 | new_stack = 1; | 258 | new_stack = 1; |
| 258 | - } else if ((e2 & DESC_C_MASK) || dpl == env->cpl) { | 259 | + } else if ((e2 & DESC_C_MASK) || dpl == cpl) { |
| 259 | /* to same priviledge */ | 260 | /* to same priviledge */ |
| 260 | new_stack = 0; | 261 | new_stack = 0; |
| 261 | } else { | 262 | } else { |
| @@ -402,7 +403,7 @@ void do_interrupt_user(int intno, int is_int, int error_code, | @@ -402,7 +403,7 @@ void do_interrupt_user(int intno, int is_int, int error_code, | ||
| 402 | { | 403 | { |
| 403 | SegmentCache *dt; | 404 | SegmentCache *dt; |
| 404 | uint8_t *ptr; | 405 | uint8_t *ptr; |
| 405 | - int dpl; | 406 | + int dpl, cpl; |
| 406 | uint32_t e2; | 407 | uint32_t e2; |
| 407 | 408 | ||
| 408 | dt = &env->idt; | 409 | dt = &env->idt; |
| @@ -410,8 +411,9 @@ void do_interrupt_user(int intno, int is_int, int error_code, | @@ -410,8 +411,9 @@ void do_interrupt_user(int intno, int is_int, int error_code, | ||
| 410 | e2 = ldl(ptr + 4); | 411 | e2 = ldl(ptr + 4); |
| 411 | 412 | ||
| 412 | dpl = (e2 >> DESC_DPL_SHIFT) & 3; | 413 | dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
| 414 | + cpl = env->hflags & HF_CPL_MASK; | ||
| 413 | /* check privledge if software int */ | 415 | /* check privledge if software int */ |
| 414 | - if (is_int && dpl < env->cpl) | 416 | + if (is_int && dpl < cpl) |
| 415 | raise_exception_err(EXCP0D_GPF, intno * 8 + 2); | 417 | raise_exception_err(EXCP0D_GPF, intno * 8 + 2); |
| 416 | 418 | ||
| 417 | /* Since we emulate only user space, we cannot do more than | 419 | /* Since we emulate only user space, we cannot do more than |
| @@ -742,7 +744,7 @@ void helper_ljmp_protected_T0_T1(void) | @@ -742,7 +744,7 @@ void helper_ljmp_protected_T0_T1(void) | ||
| 742 | raise_exception_err(EXCP0D_GPF, 0); | 744 | raise_exception_err(EXCP0D_GPF, 0); |
| 743 | if (load_segment(&e1, &e2, new_cs) != 0) | 745 | if (load_segment(&e1, &e2, new_cs) != 0) |
| 744 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | 746 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 745 | - cpl = env->cpl; | 747 | + cpl = env->hflags & HF_CPL_MASK; |
| 746 | if (e2 & DESC_S_MASK) { | 748 | if (e2 & DESC_S_MASK) { |
| 747 | if (!(e2 & DESC_CS_MASK)) | 749 | if (!(e2 & DESC_CS_MASK)) |
| 748 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | 750 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| @@ -826,7 +828,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) | @@ -826,7 +828,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) | ||
| 826 | raise_exception_err(EXCP0D_GPF, 0); | 828 | raise_exception_err(EXCP0D_GPF, 0); |
| 827 | if (load_segment(&e1, &e2, new_cs) != 0) | 829 | if (load_segment(&e1, &e2, new_cs) != 0) |
| 828 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | 830 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 829 | - cpl = env->cpl; | 831 | + cpl = env->hflags & HF_CPL_MASK; |
| 830 | if (e2 & DESC_S_MASK) { | 832 | if (e2 & DESC_S_MASK) { |
| 831 | if (!(e2 & DESC_CS_MASK)) | 833 | if (!(e2 & DESC_CS_MASK)) |
| 832 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | 834 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| @@ -1079,7 +1081,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) | @@ -1079,7 +1081,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) | ||
| 1079 | if (!(e2 & DESC_S_MASK) || | 1081 | if (!(e2 & DESC_S_MASK) || |
| 1080 | !(e2 & DESC_CS_MASK)) | 1082 | !(e2 & DESC_CS_MASK)) |
| 1081 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | 1083 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
| 1082 | - cpl = env->cpl; | 1084 | + cpl = env->hflags & HF_CPL_MASK; |
| 1083 | rpl = new_cs & 3; | 1085 | rpl = new_cs & 3; |
| 1084 | if (rpl < cpl) | 1086 | if (rpl < cpl) |
| 1085 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | 1087 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
helper2-i386.c
| @@ -52,7 +52,7 @@ CPUX86State *cpu_x86_init(void) | @@ -52,7 +52,7 @@ CPUX86State *cpu_x86_init(void) | ||
| 52 | 52 | ||
| 53 | tlb_flush(env); | 53 | tlb_flush(env); |
| 54 | #ifdef CONFIG_SOFTMMU | 54 | #ifdef CONFIG_SOFTMMU |
| 55 | - env->soft_mmu = 1; | 55 | + env->hflags |= HF_SOFTMMU_MASK; |
| 56 | #endif | 56 | #endif |
| 57 | /* init various static tables */ | 57 | /* init various static tables */ |
| 58 | if (!inited) { | 58 | if (!inited) { |
| @@ -228,7 +228,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) | @@ -228,7 +228,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) | ||
| 228 | int cpl, error_code, is_dirty, is_user, prot, page_size, ret; | 228 | int cpl, error_code, is_dirty, is_user, prot, page_size, ret; |
| 229 | unsigned long pd; | 229 | unsigned long pd; |
| 230 | 230 | ||
| 231 | - cpl = env->cpl; | 231 | + cpl = env->hflags & HF_CPL_MASK; |
| 232 | is_user = (cpl == 3); | 232 | is_user = (cpl == 3); |
| 233 | 233 | ||
| 234 | #ifdef DEBUG_MMU | 234 | #ifdef DEBUG_MMU |
| @@ -325,7 +325,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) | @@ -325,7 +325,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) | ||
| 325 | } | 325 | } |
| 326 | 326 | ||
| 327 | do_mapping: | 327 | do_mapping: |
| 328 | - if (env->soft_mmu) { | 328 | + if (env->hflags & HF_SOFTMMU_MASK) { |
| 329 | unsigned long paddr, vaddr, address, addend, page_offset; | 329 | unsigned long paddr, vaddr, address, addend, page_offset; |
| 330 | int index; | 330 | int index; |
| 331 | 331 | ||
| @@ -359,7 +359,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) | @@ -359,7 +359,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) | ||
| 359 | if ((pd & 0xfff) != 0) { | 359 | if ((pd & 0xfff) != 0) { |
| 360 | /* IO access: no mapping is done as it will be handled by the | 360 | /* IO access: no mapping is done as it will be handled by the |
| 361 | soft MMU */ | 361 | soft MMU */ |
| 362 | - if (!env->soft_mmu) | 362 | + if (!(env->hflags & HF_SOFTMMU_MASK)) |
| 363 | ret = 2; | 363 | ret = 2; |
| 364 | } else { | 364 | } else { |
| 365 | void *map_addr; | 365 | void *map_addr; |
op-i386.c
| @@ -457,6 +457,16 @@ void OPPROTO op_sti(void) | @@ -457,6 +457,16 @@ void OPPROTO op_sti(void) | ||
| 457 | env->eflags |= IF_MASK; | 457 | env->eflags |= IF_MASK; |
| 458 | } | 458 | } |
| 459 | 459 | ||
| 460 | +void OPPROTO op_set_inhibit_irq(void) | ||
| 461 | +{ | ||
| 462 | + env->hflags |= HF_INHIBIT_IRQ_MASK; | ||
| 463 | +} | ||
| 464 | + | ||
| 465 | +void OPPROTO op_reset_inhibit_irq(void) | ||
| 466 | +{ | ||
| 467 | + env->hflags &= ~HF_INHIBIT_IRQ_MASK; | ||
| 468 | +} | ||
| 469 | + | ||
| 460 | #if 0 | 470 | #if 0 |
| 461 | /* vm86plus instructions */ | 471 | /* vm86plus instructions */ |
| 462 | void OPPROTO op_cli_vm(void) | 472 | void OPPROTO op_cli_vm(void) |
softmmu_template.h
| @@ -90,7 +90,7 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr) | @@ -90,7 +90,7 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr) | ||
| 90 | 90 | ||
| 91 | /* test if there is match for unaligned or IO access */ | 91 | /* test if there is match for unaligned or IO access */ |
| 92 | /* XXX: could done more in memory macro in a non portable way */ | 92 | /* XXX: could done more in memory macro in a non portable way */ |
| 93 | - is_user = (env->cpl == 3); | 93 | + is_user = ((env->hflags & HF_CPL_MASK) == 3); |
| 94 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | 94 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 95 | redo: | 95 | redo: |
| 96 | tlb_addr = env->tlb_read[is_user][index].address; | 96 | tlb_addr = env->tlb_read[is_user][index].address; |
| @@ -126,7 +126,7 @@ static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr) | @@ -126,7 +126,7 @@ static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr) | ||
| 126 | int is_user, index, shift; | 126 | int is_user, index, shift; |
| 127 | unsigned long physaddr, tlb_addr, addr1, addr2; | 127 | unsigned long physaddr, tlb_addr, addr1, addr2; |
| 128 | 128 | ||
| 129 | - is_user = (env->cpl == 3); | 129 | + is_user = ((env->hflags & HF_CPL_MASK) == 3); |
| 130 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | 130 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 131 | redo: | 131 | redo: |
| 132 | tlb_addr = env->tlb_read[is_user][index].address; | 132 | tlb_addr = env->tlb_read[is_user][index].address; |
| @@ -169,7 +169,7 @@ void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE val | @@ -169,7 +169,7 @@ void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE val | ||
| 169 | void *retaddr; | 169 | void *retaddr; |
| 170 | int is_user, index; | 170 | int is_user, index; |
| 171 | 171 | ||
| 172 | - is_user = (env->cpl == 3); | 172 | + is_user = ((env->hflags & HF_CPL_MASK) == 3); |
| 173 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | 173 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 174 | redo: | 174 | redo: |
| 175 | tlb_addr = env->tlb_write[is_user][index].address; | 175 | tlb_addr = env->tlb_write[is_user][index].address; |
| @@ -203,7 +203,7 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val, | @@ -203,7 +203,7 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val, | ||
| 203 | unsigned long physaddr, tlb_addr; | 203 | unsigned long physaddr, tlb_addr; |
| 204 | int is_user, index, i; | 204 | int is_user, index, i; |
| 205 | 205 | ||
| 206 | - is_user = (env->cpl == 3); | 206 | + is_user = ((env->hflags & HF_CPL_MASK) == 3); |
| 207 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | 207 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
| 208 | redo: | 208 | redo: |
| 209 | tlb_addr = env->tlb_write[is_user][index].address; | 209 | tlb_addr = env->tlb_write[is_user][index].address; |
translate-i386.c
| @@ -1552,7 +1552,9 @@ static void gen_movl_seg_T0(DisasContext *s, int seg_reg, unsigned int cur_eip) | @@ -1552,7 +1552,9 @@ static void gen_movl_seg_T0(DisasContext *s, int seg_reg, unsigned int cur_eip) | ||
| 1552 | else | 1552 | else |
| 1553 | gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[seg_reg])); | 1553 | gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[seg_reg])); |
| 1554 | /* abort translation because the register may have a non zero base | 1554 | /* abort translation because the register may have a non zero base |
| 1555 | - or because ss32 may change */ | 1555 | + or because ss32 may change. For R_SS, translation must always |
| 1556 | + stop as a special handling must be done to disable hardware | ||
| 1557 | + interrupts for the next instruction */ | ||
| 1556 | if (seg_reg == R_SS || (!s->addseg && seg_reg < R_FS)) | 1558 | if (seg_reg == R_SS || (!s->addseg && seg_reg < R_FS)) |
| 1557 | s->is_jmp = 2; | 1559 | s->is_jmp = 2; |
| 1558 | } | 1560 | } |
| @@ -2356,10 +2358,14 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) | @@ -2356,10 +2358,14 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) | ||
| 2356 | case 0x07: /* pop es */ | 2358 | case 0x07: /* pop es */ |
| 2357 | case 0x17: /* pop ss */ | 2359 | case 0x17: /* pop ss */ |
| 2358 | case 0x1f: /* pop ds */ | 2360 | case 0x1f: /* pop ds */ |
| 2361 | + reg = b >> 3; | ||
| 2359 | gen_pop_T0(s); | 2362 | gen_pop_T0(s); |
| 2360 | - gen_movl_seg_T0(s, b >> 3, pc_start - s->cs_base); | 2363 | + gen_movl_seg_T0(s, reg, pc_start - s->cs_base); |
| 2361 | gen_pop_update(s); | 2364 | gen_pop_update(s); |
| 2362 | - /* XXX: if reg == SS, inhibit interrupts/trace */ | 2365 | + if (reg == R_SS) { |
| 2366 | + /* if reg == SS, inhibit interrupts/trace */ | ||
| 2367 | + gen_op_set_inhibit_irq(); | ||
| 2368 | + } | ||
| 2363 | break; | 2369 | break; |
| 2364 | case 0x1a1: /* pop fs */ | 2370 | case 0x1a1: /* pop fs */ |
| 2365 | case 0x1a9: /* pop gs */ | 2371 | case 0x1a9: /* pop gs */ |
| @@ -2418,7 +2424,10 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) | @@ -2418,7 +2424,10 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) | ||
| 2418 | goto illegal_op; | 2424 | goto illegal_op; |
| 2419 | gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); | 2425 | gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); |
| 2420 | gen_movl_seg_T0(s, reg, pc_start - s->cs_base); | 2426 | gen_movl_seg_T0(s, reg, pc_start - s->cs_base); |
| 2421 | - /* XXX: if reg == SS, inhibit interrupts/trace */ | 2427 | + if (reg == R_SS) { |
| 2428 | + /* if reg == SS, inhibit interrupts/trace */ | ||
| 2429 | + gen_op_set_inhibit_irq(); | ||
| 2430 | + } | ||
| 2422 | break; | 2431 | break; |
| 2423 | case 0x8c: /* mov Gv, seg */ | 2432 | case 0x8c: /* mov Gv, seg */ |
| 2424 | modrm = ldub(s->pc++); | 2433 | modrm = ldub(s->pc++); |
| @@ -3704,6 +3713,8 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) | @@ -3704,6 +3713,8 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) | ||
| 3704 | if (!s->vm86) { | 3713 | if (!s->vm86) { |
| 3705 | if (s->cpl <= s->iopl) { | 3714 | if (s->cpl <= s->iopl) { |
| 3706 | gen_op_sti(); | 3715 | gen_op_sti(); |
| 3716 | + /* interruptions are enabled only the first insn after sti */ | ||
| 3717 | + gen_op_set_inhibit_irq(); | ||
| 3707 | s->is_jmp = 2; /* give a chance to handle pending irqs */ | 3718 | s->is_jmp = 2; /* give a chance to handle pending irqs */ |
| 3708 | } else { | 3719 | } else { |
| 3709 | gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); | 3720 | gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
| @@ -3711,12 +3722,13 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) | @@ -3711,12 +3722,13 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) | ||
| 3711 | } else { | 3722 | } else { |
| 3712 | if (s->iopl == 3) { | 3723 | if (s->iopl == 3) { |
| 3713 | gen_op_sti(); | 3724 | gen_op_sti(); |
| 3725 | + /* interruptions are enabled only the first insn after sti */ | ||
| 3726 | + gen_op_set_inhibit_irq(); | ||
| 3714 | s->is_jmp = 2; /* give a chance to handle pending irqs */ | 3727 | s->is_jmp = 2; /* give a chance to handle pending irqs */ |
| 3715 | } else { | 3728 | } else { |
| 3716 | gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); | 3729 | gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
| 3717 | } | 3730 | } |
| 3718 | } | 3731 | } |
| 3719 | - /* XXX: interruptions are enabled only the first insn after sti */ | ||
| 3720 | break; | 3732 | break; |
| 3721 | case 0x62: /* bound */ | 3733 | case 0x62: /* bound */ |
| 3722 | ot = dflag ? OT_LONG : OT_WORD; | 3734 | ot = dflag ? OT_LONG : OT_WORD; |
| @@ -4380,21 +4392,21 @@ static inline int gen_intermediate_code_internal(CPUState *env, | @@ -4380,21 +4392,21 @@ static inline int gen_intermediate_code_internal(CPUState *env, | ||
| 4380 | flags = tb->flags; | 4392 | flags = tb->flags; |
| 4381 | 4393 | ||
| 4382 | dc->pe = env->cr[0] & CR0_PE_MASK; | 4394 | dc->pe = env->cr[0] & CR0_PE_MASK; |
| 4383 | - dc->code32 = (flags >> GEN_FLAG_CODE32_SHIFT) & 1; | ||
| 4384 | - dc->ss32 = (flags >> GEN_FLAG_SS32_SHIFT) & 1; | ||
| 4385 | - dc->addseg = (flags >> GEN_FLAG_ADDSEG_SHIFT) & 1; | ||
| 4386 | - dc->f_st = (flags >> GEN_FLAG_ST_SHIFT) & 7; | ||
| 4387 | - dc->vm86 = (flags >> GEN_FLAG_VM_SHIFT) & 1; | ||
| 4388 | - dc->cpl = (flags >> GEN_FLAG_CPL_SHIFT) & 3; | ||
| 4389 | - dc->iopl = (flags >> GEN_FLAG_IOPL_SHIFT) & 3; | ||
| 4390 | - dc->tf = (flags >> GEN_FLAG_TF_SHIFT) & 1; | 4395 | + dc->code32 = (flags >> HF_CS32_SHIFT) & 1; |
| 4396 | + dc->ss32 = (flags >> HF_SS32_SHIFT) & 1; | ||
| 4397 | + dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1; | ||
| 4398 | + dc->f_st = 0; | ||
| 4399 | + dc->vm86 = (flags >> VM_SHIFT) & 1; | ||
| 4400 | + dc->cpl = (flags >> HF_CPL_SHIFT) & 3; | ||
| 4401 | + dc->iopl = (flags >> IOPL_SHIFT) & 3; | ||
| 4402 | + dc->tf = (flags >> TF_SHIFT) & 1; | ||
| 4391 | dc->cc_op = CC_OP_DYNAMIC; | 4403 | dc->cc_op = CC_OP_DYNAMIC; |
| 4392 | dc->cs_base = cs_base; | 4404 | dc->cs_base = cs_base; |
| 4393 | dc->tb = tb; | 4405 | dc->tb = tb; |
| 4394 | dc->popl_esp_hack = 0; | 4406 | dc->popl_esp_hack = 0; |
| 4395 | /* select memory access functions */ | 4407 | /* select memory access functions */ |
| 4396 | dc->mem_index = 0; | 4408 | dc->mem_index = 0; |
| 4397 | - if ((flags >> GEN_FLAG_SOFT_MMU_SHIFT) & 1) { | 4409 | + if (flags & HF_SOFTMMU_MASK) { |
| 4398 | if (dc->cpl == 3) | 4410 | if (dc->cpl == 3) |
| 4399 | dc->mem_index = 6; | 4411 | dc->mem_index = 6; |
| 4400 | else | 4412 | else |
| @@ -4408,6 +4420,13 @@ static inline int gen_intermediate_code_internal(CPUState *env, | @@ -4408,6 +4420,13 @@ static inline int gen_intermediate_code_internal(CPUState *env, | ||
| 4408 | dc->is_jmp = DISAS_NEXT; | 4420 | dc->is_jmp = DISAS_NEXT; |
| 4409 | pc_ptr = pc_start; | 4421 | pc_ptr = pc_start; |
| 4410 | lj = -1; | 4422 | lj = -1; |
| 4423 | + | ||
| 4424 | + /* if irq were inhibited for the next instruction, we can disable | ||
| 4425 | + them here as it is simpler (otherwise jumps would have to | ||
| 4426 | + handled as special case) */ | ||
| 4427 | + if (flags & HF_INHIBIT_IRQ_MASK) { | ||
| 4428 | + gen_op_reset_inhibit_irq(); | ||
| 4429 | + } | ||
| 4411 | do { | 4430 | do { |
| 4412 | if (env->nb_breakpoints > 0) { | 4431 | if (env->nb_breakpoints > 0) { |
| 4413 | for(j = 0; j < env->nb_breakpoints; j++) { | 4432 | for(j = 0; j < env->nb_breakpoints; j++) { |