Commit db620f46a8c8b168831812eabd7428a952964334
1 parent
3cd9acb4
reworked SVM interrupt handling logic - fixed vmrun EIP saved value - reworked c…
…r8 handling - added CPUState.hflags2 git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4662 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
7 changed files
with
110 additions
and
109 deletions
cpu-exec.c
| ... | ... | @@ -368,11 +368,8 @@ int cpu_exec(CPUState *env1) |
| 368 | 368 | next_tb = 0; /* force lookup of first TB */ |
| 369 | 369 | for(;;) { |
| 370 | 370 | interrupt_request = env->interrupt_request; |
| 371 | - if (__builtin_expect(interrupt_request, 0) | |
| 372 | -#if defined(TARGET_I386) | |
| 373 | - && env->hflags & HF_GIF_MASK | |
| 374 | -#endif | |
| 375 | - && likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) { | |
| 371 | + if (__builtin_expect(interrupt_request, 0) && | |
| 372 | + likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) { | |
| 376 | 373 | if (interrupt_request & CPU_INTERRUPT_DEBUG) { |
| 377 | 374 | env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; |
| 378 | 375 | env->exception_index = EXCP_DEBUG; |
| ... | ... | @@ -388,47 +385,51 @@ int cpu_exec(CPUState *env1) |
| 388 | 385 | } |
| 389 | 386 | #endif |
| 390 | 387 | #if defined(TARGET_I386) |
| 391 | - if ((interrupt_request & CPU_INTERRUPT_SMI) && | |
| 392 | - !(env->hflags & HF_SMM_MASK)) { | |
| 393 | - svm_check_intercept(SVM_EXIT_SMI); | |
| 394 | - env->interrupt_request &= ~CPU_INTERRUPT_SMI; | |
| 395 | - do_smm_enter(); | |
| 396 | - next_tb = 0; | |
| 397 | - } else if ((interrupt_request & CPU_INTERRUPT_NMI) && | |
| 398 | - !(env->hflags & HF_NMI_MASK)) { | |
| 399 | - env->interrupt_request &= ~CPU_INTERRUPT_NMI; | |
| 400 | - env->hflags |= HF_NMI_MASK; | |
| 401 | - do_interrupt(EXCP02_NMI, 0, 0, 0, 1); | |
| 402 | - next_tb = 0; | |
| 403 | - } else if ((interrupt_request & CPU_INTERRUPT_HARD) && | |
| 404 | - (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) && | |
| 405 | - !(env->hflags & HF_INHIBIT_IRQ_MASK)) { | |
| 406 | - int intno; | |
| 407 | - svm_check_intercept(SVM_EXIT_INTR); | |
| 408 | - env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); | |
| 409 | - intno = cpu_get_pic_interrupt(env); | |
| 410 | - if (loglevel & CPU_LOG_TB_IN_ASM) { | |
| 411 | - fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno); | |
| 412 | - } | |
| 413 | - do_interrupt(intno, 0, 0, 0, 1); | |
| 414 | - /* ensure that no TB jump will be modified as | |
| 415 | - the program flow was changed */ | |
| 416 | - next_tb = 0; | |
| 388 | + if (env->hflags2 & HF2_GIF_MASK) { | |
| 389 | + if ((interrupt_request & CPU_INTERRUPT_SMI) && | |
| 390 | + !(env->hflags & HF_SMM_MASK)) { | |
| 391 | + svm_check_intercept(SVM_EXIT_SMI); | |
| 392 | + env->interrupt_request &= ~CPU_INTERRUPT_SMI; | |
| 393 | + do_smm_enter(); | |
| 394 | + next_tb = 0; | |
| 395 | + } else if ((interrupt_request & CPU_INTERRUPT_NMI) && | |
| 396 | + !(env->hflags2 & HF2_NMI_MASK)) { | |
| 397 | + env->interrupt_request &= ~CPU_INTERRUPT_NMI; | |
| 398 | + env->hflags2 |= HF2_NMI_MASK; | |
| 399 | + do_interrupt(EXCP02_NMI, 0, 0, 0, 1); | |
| 400 | + next_tb = 0; | |
| 401 | + } else if ((interrupt_request & CPU_INTERRUPT_HARD) && | |
| 402 | + (((env->hflags2 & HF2_VINTR_MASK) && | |
| 403 | + (env->hflags2 & HF2_HIF_MASK)) || | |
| 404 | + (!(env->hflags2 & HF2_VINTR_MASK) && | |
| 405 | + (env->eflags & IF_MASK && | |
| 406 | + !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { | |
| 407 | + int intno; | |
| 408 | + svm_check_intercept(SVM_EXIT_INTR); | |
| 409 | + env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); | |
| 410 | + intno = cpu_get_pic_interrupt(env); | |
| 411 | + if (loglevel & CPU_LOG_TB_IN_ASM) { | |
| 412 | + fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno); | |
| 413 | + } | |
| 414 | + do_interrupt(intno, 0, 0, 0, 1); | |
| 415 | + /* ensure that no TB jump will be modified as | |
| 416 | + the program flow was changed */ | |
| 417 | + next_tb = 0; | |
| 417 | 418 | #if !defined(CONFIG_USER_ONLY) |
| 418 | - } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && | |
| 419 | - (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) { | |
| 420 | - int intno; | |
| 421 | - /* FIXME: this should respect TPR */ | |
| 422 | - env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; | |
| 423 | - svm_check_intercept(SVM_EXIT_VINTR); | |
| 424 | - intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); | |
| 425 | - if (loglevel & CPU_LOG_TB_IN_ASM) | |
| 426 | - fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno); | |
| 427 | - do_interrupt(intno, 0, 0, -1, 1); | |
| 428 | - stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), | |
| 429 | - ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK); | |
| 430 | - next_tb = 0; | |
| 419 | + } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && | |
| 420 | + (env->eflags & IF_MASK) && | |
| 421 | + !(env->hflags & HF_INHIBIT_IRQ_MASK)) { | |
| 422 | + int intno; | |
| 423 | + /* FIXME: this should respect TPR */ | |
| 424 | + svm_check_intercept(SVM_EXIT_VINTR); | |
| 425 | + env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; | |
| 426 | + intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); | |
| 427 | + if (loglevel & CPU_LOG_TB_IN_ASM) | |
| 428 | + fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno); | |
| 429 | + do_interrupt(intno, 0, 0, 0, 1); | |
| 430 | + next_tb = 0; | |
| 431 | 431 | #endif |
| 432 | + } | |
| 432 | 433 | } |
| 433 | 434 | #elif defined(TARGET_PPC) |
| 434 | 435 | #if 0 | ... | ... |
target-i386/cpu.h
| ... | ... | @@ -145,11 +145,8 @@ |
| 145 | 145 | #define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */ |
| 146 | 146 | #define HF_VM_SHIFT 17 /* must be same as eflags */ |
| 147 | 147 | #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ |
| 148 | -#define HF_GIF_SHIFT 20 /* if set CPU takes interrupts */ | |
| 149 | -#define HF_HIF_SHIFT 21 /* shadow copy of IF_MASK when in SVM */ | |
| 150 | -#define HF_NMI_SHIFT 22 /* CPU serving NMI */ | |
| 151 | -#define HF_SVME_SHIFT 23 /* SVME enabled (copy of EFER.SVME) */ | |
| 152 | -#define HF_SVMI_SHIFT 24 /* SVM intercepts are active */ | |
| 148 | +#define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ | |
| 149 | +#define HF_SVMI_SHIFT 21 /* SVM intercepts are active */ | |
| 153 | 150 | |
| 154 | 151 | #define HF_CPL_MASK (3 << HF_CPL_SHIFT) |
| 155 | 152 | #define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT) |
| ... | ... | @@ -166,12 +163,21 @@ |
| 166 | 163 | #define HF_CS64_MASK (1 << HF_CS64_SHIFT) |
| 167 | 164 | #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) |
| 168 | 165 | #define HF_SMM_MASK (1 << HF_SMM_SHIFT) |
| 169 | -#define HF_GIF_MASK (1 << HF_GIF_SHIFT) | |
| 170 | -#define HF_HIF_MASK (1 << HF_HIF_SHIFT) | |
| 171 | -#define HF_NMI_MASK (1 << HF_NMI_SHIFT) | |
| 172 | 166 | #define HF_SVME_MASK (1 << HF_SVME_SHIFT) |
| 173 | 167 | #define HF_SVMI_MASK (1 << HF_SVMI_SHIFT) |
| 174 | 168 | |
| 169 | +/* hflags2 */ | |
| 170 | + | |
| 171 | +#define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ | |
| 172 | +#define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ | |
| 173 | +#define HF2_NMI_SHIFT 2 /* CPU serving NMI */ | |
| 174 | +#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ | |
| 175 | + | |
| 176 | +#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) | |
| 177 | +#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) | |
| 178 | +#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) | |
| 179 | +#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) | |
| 180 | + | |
| 175 | 181 | #define CR0_PE_MASK (1 << 0) |
| 176 | 182 | #define CR0_MP_MASK (1 << 1) |
| 177 | 183 | #define CR0_EM_MASK (1 << 2) |
| ... | ... | @@ -488,7 +494,9 @@ typedef struct CPUX86State { |
| 488 | 494 | target_ulong cc_dst; |
| 489 | 495 | uint32_t cc_op; |
| 490 | 496 | int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ |
| 491 | - uint32_t hflags; /* hidden flags, see HF_xxx constants */ | |
| 497 | + uint32_t hflags; /* TB flags, see HF_xxx constants. These flags | |
| 498 | + are known at translation time. */ | |
| 499 | + uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ | |
| 492 | 500 | |
| 493 | 501 | /* segments */ |
| 494 | 502 | SegmentCache segs[6]; /* selector values */ |
| ... | ... | @@ -497,7 +505,7 @@ typedef struct CPUX86State { |
| 497 | 505 | SegmentCache gdt; /* only base and limit are used */ |
| 498 | 506 | SegmentCache idt; /* only base and limit are used */ |
| 499 | 507 | |
| 500 | - target_ulong cr[9]; /* NOTE: cr1, cr5-7 are unused */ | |
| 508 | + target_ulong cr[5]; /* NOTE: cr1 is unused */ | |
| 501 | 509 | uint64_t a20_mask; |
| 502 | 510 | |
| 503 | 511 | /* FPU state */ |
| ... | ... | @@ -541,6 +549,7 @@ typedef struct CPUX86State { |
| 541 | 549 | uint16_t intercept_dr_read; |
| 542 | 550 | uint16_t intercept_dr_write; |
| 543 | 551 | uint32_t intercept_exceptions; |
| 552 | + uint8_t v_tpr; | |
| 544 | 553 | |
| 545 | 554 | #ifdef TARGET_X86_64 |
| 546 | 555 | target_ulong lstar; | ... | ... |
target-i386/helper.c
| ... | ... | @@ -374,7 +374,7 @@ void cpu_reset(CPUX86State *env) |
| 374 | 374 | #ifdef CONFIG_SOFTMMU |
| 375 | 375 | env->hflags |= HF_SOFTMMU_MASK; |
| 376 | 376 | #endif |
| 377 | - env->hflags |= HF_GIF_MASK; | |
| 377 | + env->hflags2 |= HF2_GIF_MASK; | |
| 378 | 378 | |
| 379 | 379 | cpu_x86_update_cr0(env, 0x60000010); |
| 380 | 380 | env->a20_mask = ~0x0; | ... | ... |
target-i386/helper.h
| ... | ... | @@ -47,9 +47,6 @@ DEF_HELPER(target_ulong, helper_read_crN, (int reg)) |
| 47 | 47 | DEF_HELPER(void, helper_write_crN, (int reg, target_ulong t0)) |
| 48 | 48 | DEF_HELPER(void, helper_lmsw, (target_ulong t0)) |
| 49 | 49 | DEF_HELPER(void, helper_clts, (void)) |
| 50 | -#if !defined(CONFIG_USER_ONLY) | |
| 51 | -DEF_HELPER(target_ulong, helper_movtl_T0_cr8, (void)) | |
| 52 | -#endif | |
| 53 | 50 | DEF_HELPER(void, helper_movl_drN_T0, (int reg, target_ulong t0)) |
| 54 | 51 | DEF_HELPER(void, helper_invlpg, (target_ulong addr)) |
| 55 | 52 | |
| ... | ... | @@ -102,7 +99,7 @@ DEF_HELPER(void, helper_svm_check_intercept_param, (uint32_t type, uint64_t para |
| 102 | 99 | DEF_HELPER(void, helper_vmexit, (uint32_t exit_code, uint64_t exit_info_1)) |
| 103 | 100 | DEF_HELPER(void, helper_svm_check_io, (uint32_t port, uint32_t param, |
| 104 | 101 | uint32_t next_eip_addend)) |
| 105 | -DEF_HELPER(void, helper_vmrun, (int aflag)) | |
| 102 | +DEF_HELPER(void, helper_vmrun, (int aflag, int next_eip_addend)) | |
| 106 | 103 | DEF_HELPER(void, helper_vmmcall, (void)) |
| 107 | 104 | DEF_HELPER(void, helper_vmload, (int aflag)) |
| 108 | 105 | DEF_HELPER(void, helper_vmsave, (int aflag)) | ... | ... |
target-i386/op_helper.c
| ... | ... | @@ -2591,7 +2591,7 @@ void helper_iret_real(int shift) |
| 2591 | 2591 | if (shift == 0) |
| 2592 | 2592 | eflags_mask &= 0xffff; |
| 2593 | 2593 | load_eflags(new_eflags, eflags_mask); |
| 2594 | - env->hflags &= ~HF_NMI_MASK; | |
| 2594 | + env->hflags2 &= ~HF2_NMI_MASK; | |
| 2595 | 2595 | } |
| 2596 | 2596 | |
| 2597 | 2597 | static inline void validate_seg(int seg_reg, int cpl) |
| ... | ... | @@ -2843,7 +2843,7 @@ void helper_iret_protected(int shift, int next_eip) |
| 2843 | 2843 | } else { |
| 2844 | 2844 | helper_ret_protected(shift, 1, 0); |
| 2845 | 2845 | } |
| 2846 | - env->hflags &= ~HF_NMI_MASK; | |
| 2846 | + env->hflags2 &= ~HF2_NMI_MASK; | |
| 2847 | 2847 | #ifdef USE_KQEMU |
| 2848 | 2848 | if (kqemu_is_ok(env)) { |
| 2849 | 2849 | CC_OP = CC_OP_EFLAGS; |
| ... | ... | @@ -2934,7 +2934,11 @@ target_ulong helper_read_crN(int reg) |
| 2934 | 2934 | val = env->cr[reg]; |
| 2935 | 2935 | break; |
| 2936 | 2936 | case 8: |
| 2937 | - val = cpu_get_apic_tpr(env); | |
| 2937 | + if (!(env->hflags2 & HF2_VINTR_MASK)) { | |
| 2938 | + val = cpu_get_apic_tpr(env); | |
| 2939 | + } else { | |
| 2940 | + val = env->v_tpr; | |
| 2941 | + } | |
| 2938 | 2942 | break; |
| 2939 | 2943 | } |
| 2940 | 2944 | return val; |
| ... | ... | @@ -2954,8 +2958,10 @@ void helper_write_crN(int reg, target_ulong t0) |
| 2954 | 2958 | cpu_x86_update_cr4(env, t0); |
| 2955 | 2959 | break; |
| 2956 | 2960 | case 8: |
| 2957 | - cpu_set_apic_tpr(env, t0); | |
| 2958 | - env->cr[8] = t0; | |
| 2961 | + if (!(env->hflags2 & HF2_VINTR_MASK)) { | |
| 2962 | + cpu_set_apic_tpr(env, t0); | |
| 2963 | + } | |
| 2964 | + env->v_tpr = t0 & 0x0f; | |
| 2959 | 2965 | break; |
| 2960 | 2966 | default: |
| 2961 | 2967 | env->cr[reg] = t0; |
| ... | ... | @@ -2978,13 +2984,6 @@ void helper_clts(void) |
| 2978 | 2984 | env->hflags &= ~HF_TS_MASK; |
| 2979 | 2985 | } |
| 2980 | 2986 | |
| 2981 | -#if !defined(CONFIG_USER_ONLY) | |
| 2982 | -target_ulong helper_movtl_T0_cr8(void) | |
| 2983 | -{ | |
| 2984 | - return cpu_get_apic_tpr(env); | |
| 2985 | -} | |
| 2986 | -#endif | |
| 2987 | - | |
| 2988 | 2987 | /* XXX: do more */ |
| 2989 | 2988 | void helper_movl_drN_T0(int reg, target_ulong t0) |
| 2990 | 2989 | { |
| ... | ... | @@ -4721,7 +4720,7 @@ void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr) |
| 4721 | 4720 | |
| 4722 | 4721 | #if defined(CONFIG_USER_ONLY) |
| 4723 | 4722 | |
| 4724 | -void helper_vmrun(int aflag) | |
| 4723 | +void helper_vmrun(int aflag, int next_eip_addend) | |
| 4725 | 4724 | { |
| 4726 | 4725 | } |
| 4727 | 4726 | void helper_vmmcall(void) |
| ... | ... | @@ -4791,7 +4790,7 @@ static inline void svm_load_seg_cache(target_phys_addr_t addr, |
| 4791 | 4790 | sc->base, sc->limit, sc->flags); |
| 4792 | 4791 | } |
| 4793 | 4792 | |
| 4794 | -void helper_vmrun(int aflag) | |
| 4793 | +void helper_vmrun(int aflag, int next_eip_addend) | |
| 4795 | 4794 | { |
| 4796 | 4795 | target_ulong addr; |
| 4797 | 4796 | uint32_t event_inj; |
| ... | ... | @@ -4820,7 +4819,6 @@ void helper_vmrun(int aflag) |
| 4820 | 4819 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); |
| 4821 | 4820 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); |
| 4822 | 4821 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); |
| 4823 | - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]); | |
| 4824 | 4822 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); |
| 4825 | 4823 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]); |
| 4826 | 4824 | |
| ... | ... | @@ -4836,7 +4834,8 @@ void helper_vmrun(int aflag) |
| 4836 | 4834 | svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), |
| 4837 | 4835 | &env->segs[R_DS]); |
| 4838 | 4836 | |
| 4839 | - stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP); | |
| 4837 | + stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), | |
| 4838 | + EIP + next_eip_addend); | |
| 4840 | 4839 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP); |
| 4841 | 4840 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX); |
| 4842 | 4841 | |
| ... | ... | @@ -4866,17 +4865,16 @@ void helper_vmrun(int aflag) |
| 4866 | 4865 | cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3))); |
| 4867 | 4866 | env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2)); |
| 4868 | 4867 | int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); |
| 4868 | + env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); | |
| 4869 | 4869 | if (int_ctl & V_INTR_MASKING_MASK) { |
| 4870 | - env->cr[8] = int_ctl & V_TPR_MASK; | |
| 4871 | - cpu_set_apic_tpr(env, env->cr[8]); | |
| 4870 | + env->v_tpr = int_ctl & V_TPR_MASK; | |
| 4871 | + env->hflags2 |= HF2_VINTR_MASK; | |
| 4872 | 4872 | if (env->eflags & IF_MASK) |
| 4873 | - env->hflags |= HF_HIF_MASK; | |
| 4873 | + env->hflags2 |= HF2_HIF_MASK; | |
| 4874 | 4874 | } |
| 4875 | 4875 | |
| 4876 | -#ifdef TARGET_X86_64 | |
| 4877 | 4876 | cpu_load_efer(env, |
| 4878 | 4877 | ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer))); |
| 4879 | -#endif | |
| 4880 | 4878 | env->eflags = 0; |
| 4881 | 4879 | load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)), |
| 4882 | 4880 | ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); |
| ... | ... | @@ -4912,6 +4910,10 @@ void helper_vmrun(int aflag) |
| 4912 | 4910 | |
| 4913 | 4911 | helper_stgi(); |
| 4914 | 4912 | |
| 4913 | + if (int_ctl & V_IRQ_MASK) { | |
| 4914 | + env->interrupt_request |= CPU_INTERRUPT_VIRQ; | |
| 4915 | + } | |
| 4916 | + | |
| 4915 | 4917 | /* maybe we need to inject an event */ |
| 4916 | 4918 | event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); |
| 4917 | 4919 | if (event_inj & SVM_EVTINJ_VALID) { |
| ... | ... | @@ -4931,14 +4933,17 @@ void helper_vmrun(int aflag) |
| 4931 | 4933 | env->exception_next_eip = -1; |
| 4932 | 4934 | if (loglevel & CPU_LOG_TB_IN_ASM) |
| 4933 | 4935 | fprintf(logfile, "INTR"); |
| 4936 | + /* XXX: is it always correct ? */ | |
| 4937 | + do_interrupt(vector, 0, 0, 0, 1); | |
| 4934 | 4938 | break; |
| 4935 | 4939 | case SVM_EVTINJ_TYPE_NMI: |
| 4936 | - env->exception_index = vector; | |
| 4940 | + env->exception_index = EXCP02_NMI; | |
| 4937 | 4941 | env->error_code = event_inj_err; |
| 4938 | 4942 | env->exception_is_int = 0; |
| 4939 | 4943 | env->exception_next_eip = EIP; |
| 4940 | 4944 | if (loglevel & CPU_LOG_TB_IN_ASM) |
| 4941 | 4945 | fprintf(logfile, "NMI"); |
| 4946 | + cpu_loop_exit(); | |
| 4942 | 4947 | break; |
| 4943 | 4948 | case SVM_EVTINJ_TYPE_EXEPT: |
| 4944 | 4949 | env->exception_index = vector; |
| ... | ... | @@ -4947,6 +4952,7 @@ void helper_vmrun(int aflag) |
| 4947 | 4952 | env->exception_next_eip = -1; |
| 4948 | 4953 | if (loglevel & CPU_LOG_TB_IN_ASM) |
| 4949 | 4954 | fprintf(logfile, "EXEPT"); |
| 4955 | + cpu_loop_exit(); | |
| 4950 | 4956 | break; |
| 4951 | 4957 | case SVM_EVTINJ_TYPE_SOFT: |
| 4952 | 4958 | env->exception_index = vector; |
| ... | ... | @@ -4955,17 +4961,12 @@ void helper_vmrun(int aflag) |
| 4955 | 4961 | env->exception_next_eip = EIP; |
| 4956 | 4962 | if (loglevel & CPU_LOG_TB_IN_ASM) |
| 4957 | 4963 | fprintf(logfile, "SOFT"); |
| 4964 | + cpu_loop_exit(); | |
| 4958 | 4965 | break; |
| 4959 | 4966 | } |
| 4960 | 4967 | if (loglevel & CPU_LOG_TB_IN_ASM) |
| 4961 | 4968 | fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code); |
| 4962 | 4969 | } |
| 4963 | - if ((int_ctl & V_IRQ_MASK) || | |
| 4964 | - (env->intercept & (1ULL << (SVM_EXIT_INTR - SVM_EXIT_INTR)))) { | |
| 4965 | - env->interrupt_request |= CPU_INTERRUPT_VIRQ; | |
| 4966 | - } | |
| 4967 | - | |
| 4968 | - cpu_loop_exit(); | |
| 4969 | 4970 | } |
| 4970 | 4971 | |
| 4971 | 4972 | void helper_vmmcall(void) |
| ... | ... | @@ -5049,13 +5050,13 @@ void helper_vmsave(int aflag) |
| 5049 | 5050 | void helper_stgi(void) |
| 5050 | 5051 | { |
| 5051 | 5052 | helper_svm_check_intercept_param(SVM_EXIT_STGI, 0); |
| 5052 | - env->hflags |= HF_GIF_MASK; | |
| 5053 | + env->hflags2 |= HF2_GIF_MASK; | |
| 5053 | 5054 | } |
| 5054 | 5055 | |
| 5055 | 5056 | void helper_clgi(void) |
| 5056 | 5057 | { |
| 5057 | 5058 | helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0); |
| 5058 | - env->hflags &= ~HF_GIF_MASK; | |
| 5059 | + env->hflags2 &= ~HF2_GIF_MASK; | |
| 5059 | 5060 | } |
| 5060 | 5061 | |
| 5061 | 5062 | void helper_skinit(void) |
| ... | ... | @@ -5204,11 +5205,12 @@ void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) |
| 5204 | 5205 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); |
| 5205 | 5206 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); |
| 5206 | 5207 | |
| 5207 | - if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) { | |
| 5208 | - int_ctl &= ~V_TPR_MASK; | |
| 5209 | - int_ctl |= env->cr[8] & V_TPR_MASK; | |
| 5210 | - stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl); | |
| 5211 | - } | |
| 5208 | + int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); | |
| 5209 | + int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK); | |
| 5210 | + int_ctl |= env->v_tpr & V_TPR_MASK; | |
| 5211 | + if (env->interrupt_request & CPU_INTERRUPT_VIRQ) | |
| 5212 | + int_ctl |= V_IRQ_MASK; | |
| 5213 | + stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl); | |
| 5212 | 5214 | |
| 5213 | 5215 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags()); |
| 5214 | 5216 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip); |
| ... | ... | @@ -5219,7 +5221,7 @@ void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) |
| 5219 | 5221 | stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK); |
| 5220 | 5222 | |
| 5221 | 5223 | /* Reload the host state from vm_hsave */ |
| 5222 | - env->hflags &= ~HF_HIF_MASK; | |
| 5224 | + env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); | |
| 5223 | 5225 | env->hflags &= ~HF_SVMI_MASK; |
| 5224 | 5226 | env->intercept = 0; |
| 5225 | 5227 | env->intercept_exceptions = 0; |
| ... | ... | @@ -5234,17 +5236,10 @@ void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) |
| 5234 | 5236 | cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK); |
| 5235 | 5237 | cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4))); |
| 5236 | 5238 | cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3))); |
| 5237 | - if (int_ctl & V_INTR_MASKING_MASK) { | |
| 5238 | - env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8)); | |
| 5239 | - cpu_set_apic_tpr(env, env->cr[8]); | |
| 5240 | - } | |
| 5241 | 5239 | /* we need to set the efer after the crs so the hidden flags get |
| 5242 | 5240 | set properly */ |
| 5243 | -#ifdef TARGET_X86_64 | |
| 5244 | 5241 | cpu_load_efer(env, |
| 5245 | 5242 | ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer))); |
| 5246 | -#endif | |
| 5247 | - | |
| 5248 | 5243 | env->eflags = 0; |
| 5249 | 5244 | load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)), |
| 5250 | 5245 | ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); | ... | ... |
target-i386/svm.h
| ... | ... | @@ -205,9 +205,7 @@ struct __attribute__ ((__packed__)) vmcb_save_area { |
| 205 | 205 | uint64_t sysenter_esp; |
| 206 | 206 | uint64_t sysenter_eip; |
| 207 | 207 | uint64_t cr2; |
| 208 | - /* qemu: cr8 added to reuse this as hsave */ | |
| 209 | - uint64_t cr8; | |
| 210 | - uint8_t reserved_6[32 - 8]; /* originally 32 */ | |
| 208 | + uint8_t reserved_6[32]; | |
| 211 | 209 | uint64_t g_pat; |
| 212 | 210 | uint64_t dbgctl; |
| 213 | 211 | uint64_t br_from; | ... | ... |
target-i386/translate.c
| ... | ... | @@ -6569,10 +6569,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) |
| 6569 | 6569 | gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
| 6570 | 6570 | break; |
| 6571 | 6571 | } else { |
| 6572 | - tcg_gen_helper_0_1(helper_vmrun, | |
| 6573 | - tcg_const_i32(s->aflag)); | |
| 6574 | - s->cc_op = CC_OP_EFLAGS; | |
| 6575 | - gen_eob(s); | |
| 6572 | + tcg_gen_helper_0_2(helper_vmrun, | |
| 6573 | + tcg_const_i32(s->aflag), | |
| 6574 | + tcg_const_i32(s->pc - pc_start)); | |
| 6575 | + tcg_gen_exit_tb(0); | |
| 6576 | + s->is_jmp = 3; | |
| 6576 | 6577 | } |
| 6577 | 6578 | break; |
| 6578 | 6579 | case 1: /* VMMCALL */ | ... | ... |