Commit 8d7b0fbb3f301e3cd9a018cc0650807e433c1ec5
1 parent
5e966ce6
32 bit RSP update fix (aka Open Solaris x86_64 bug)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@2186 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
1 changed file
with
24 additions
and
9 deletions
target-i386/helper.c
@@ -553,6 +553,20 @@ static inline unsigned int get_sp_mask(unsigned int e2) | @@ -553,6 +553,20 @@ static inline unsigned int get_sp_mask(unsigned int e2) | ||
553 | return 0xffff; | 553 | return 0xffff; |
554 | } | 554 | } |
555 | 555 | ||
556 | +#ifdef TARGET_X86_64 | ||
557 | +#define SET_ESP(val, sp_mask)\ | ||
558 | +do {\ | ||
559 | + if ((sp_mask) == 0xffff)\ | ||
560 | + ESP = (ESP & ~0xffff) | ((val) & 0xffff);\ | ||
561 | + else if ((sp_mask) == 0xffffffffLL)\ | ||
562 | + ESP = (uint32_t)(val);\ | ||
563 | + else\ | ||
564 | + ESP = (val);\ | ||
565 | +} while (0) | ||
566 | +#else | ||
567 | +#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)) | ||
568 | +#endif | ||
569 | + | ||
556 | /* XXX: add a is_user flag to have proper security support */ | 570 | /* XXX: add a is_user flag to have proper security support */ |
557 | #define PUSHW(ssp, sp, sp_mask, val)\ | 571 | #define PUSHW(ssp, sp, sp_mask, val)\ |
558 | {\ | 572 | {\ |
@@ -584,10 +598,10 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | @@ -584,10 +598,10 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | ||
584 | { | 598 | { |
585 | SegmentCache *dt; | 599 | SegmentCache *dt; |
586 | target_ulong ptr, ssp; | 600 | target_ulong ptr, ssp; |
587 | - int type, dpl, selector, ss_dpl, cpl, sp_mask; | 601 | + int type, dpl, selector, ss_dpl, cpl; |
588 | int has_error_code, new_stack, shift; | 602 | int has_error_code, new_stack, shift; |
589 | uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2; | 603 | uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2; |
590 | - uint32_t old_eip; | 604 | + uint32_t old_eip, sp_mask; |
591 | 605 | ||
592 | has_error_code = 0; | 606 | has_error_code = 0; |
593 | if (!is_int && !is_hw) { | 607 | if (!is_int && !is_hw) { |
@@ -623,7 +637,8 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | @@ -623,7 +637,8 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | ||
623 | raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2); | 637 | raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2); |
624 | switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); | 638 | switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); |
625 | if (has_error_code) { | 639 | if (has_error_code) { |
626 | - int mask, type; | 640 | + int type; |
641 | + uint32_t mask; | ||
627 | /* push the error code */ | 642 | /* push the error code */ |
628 | type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; | 643 | type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; |
629 | shift = type >> 3; | 644 | shift = type >> 3; |
@@ -637,7 +652,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | @@ -637,7 +652,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | ||
637 | stl_kernel(ssp, error_code); | 652 | stl_kernel(ssp, error_code); |
638 | else | 653 | else |
639 | stw_kernel(ssp, error_code); | 654 | stw_kernel(ssp, error_code); |
640 | - ESP = (esp & mask) | (ESP & ~mask); | 655 | + SET_ESP(esp, mask); |
641 | } | 656 | } |
642 | return; | 657 | return; |
643 | case 6: /* 286 interrupt gate */ | 658 | case 6: /* 286 interrupt gate */ |
@@ -765,7 +780,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | @@ -765,7 +780,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | ||
765 | cpu_x86_load_seg_cache(env, R_SS, ss, | 780 | cpu_x86_load_seg_cache(env, R_SS, ss, |
766 | ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); | 781 | ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); |
767 | } | 782 | } |
768 | - ESP = (ESP & ~sp_mask) | (esp & sp_mask); | 783 | + SET_ESP(esp, sp_mask); |
769 | 784 | ||
770 | selector = (selector & ~3) | dpl; | 785 | selector = (selector & ~3) | dpl; |
771 | cpu_x86_load_seg_cache(env, R_CS, selector, | 786 | cpu_x86_load_seg_cache(env, R_CS, selector, |
@@ -2015,7 +2030,7 @@ void helper_lcall_real_T0_T1(int shift, int next_eip) | @@ -2015,7 +2030,7 @@ void helper_lcall_real_T0_T1(int shift, int next_eip) | ||
2015 | PUSHW(ssp, esp, esp_mask, next_eip); | 2030 | PUSHW(ssp, esp, esp_mask, next_eip); |
2016 | } | 2031 | } |
2017 | 2032 | ||
2018 | - ESP = (ESP & ~esp_mask) | (esp & esp_mask); | 2033 | + SET_ESP(esp, esp_mask); |
2019 | env->eip = new_eip; | 2034 | env->eip = new_eip; |
2020 | env->segs[R_CS].selector = new_cs; | 2035 | env->segs[R_CS].selector = new_cs; |
2021 | env->segs[R_CS].base = (new_cs << 4); | 2036 | env->segs[R_CS].base = (new_cs << 4); |
@@ -2101,7 +2116,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip_addend) | @@ -2101,7 +2116,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip_addend) | ||
2101 | if (new_eip > limit) | 2116 | if (new_eip > limit) |
2102 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | 2117 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
2103 | /* from this point, not restartable */ | 2118 | /* from this point, not restartable */ |
2104 | - ESP = (ESP & ~sp_mask) | (sp & sp_mask); | 2119 | + SET_ESP(sp, sp_mask); |
2105 | cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, | 2120 | cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, |
2106 | get_seg_base(e1, e2), limit, e2); | 2121 | get_seg_base(e1, e2), limit, e2); |
2107 | EIP = new_eip; | 2122 | EIP = new_eip; |
@@ -2230,7 +2245,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip_addend) | @@ -2230,7 +2245,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip_addend) | ||
2230 | get_seg_limit(e1, e2), | 2245 | get_seg_limit(e1, e2), |
2231 | e2); | 2246 | e2); |
2232 | cpu_x86_set_cpl(env, dpl); | 2247 | cpu_x86_set_cpl(env, dpl); |
2233 | - ESP = (ESP & ~sp_mask) | (sp & sp_mask); | 2248 | + SET_ESP(sp, sp_mask); |
2234 | EIP = offset; | 2249 | EIP = offset; |
2235 | } | 2250 | } |
2236 | #ifdef USE_KQEMU | 2251 | #ifdef USE_KQEMU |
@@ -2459,7 +2474,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) | @@ -2459,7 +2474,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) | ||
2459 | 2474 | ||
2460 | sp += addend; | 2475 | sp += addend; |
2461 | } | 2476 | } |
2462 | - ESP = (ESP & ~sp_mask) | (sp & sp_mask); | 2477 | + SET_ESP(sp, sp_mask); |
2463 | env->eip = new_eip; | 2478 | env->eip = new_eip; |
2464 | if (is_iret) { | 2479 | if (is_iret) { |
2465 | /* NOTE: 'cpl' is the _old_ CPL */ | 2480 | /* NOTE: 'cpl' is the _old_ CPL */ |