Commit 891b38e446f3546b6642fb53b37d07f4c1242f9d

Authored by bellard
1 parent 7dea1da4

more precise stack operations in call/int gates (16 bit wrapping is handled in a…

…ll cases) - makes all call/int gates operations restartable in case of exception


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@462 c046a42c-6fe2-441c-8c8c-71466251a162
Showing 1 changed file with 188 additions and 200 deletions
target-i386/helper.c
@@ -531,16 +531,49 @@ void check_iol_DX(void) @@ -531,16 +531,49 @@ void check_iol_DX(void)
531 check_io(EDX & 0xffff, 4); 531 check_io(EDX & 0xffff, 4);
532 } 532 }
533 533
  534 +static inline unsigned int get_sp_mask(unsigned int e2)
  535 +{
  536 + if (e2 & DESC_B_MASK)
  537 + return 0xffffffff;
  538 + else
  539 + return 0xffff;
  540 +}
  541 +
  542 +/* XXX: add a is_user flag to have proper security support */
  543 +#define PUSHW(ssp, sp, sp_mask, val)\
  544 +{\
  545 + sp -= 2;\
  546 + stw_kernel((ssp) + (sp & (sp_mask)), (val));\
  547 +}
  548 +
  549 +#define PUSHL(ssp, sp, sp_mask, val)\
  550 +{\
  551 + sp -= 4;\
  552 + stl_kernel((ssp) + (sp & (sp_mask)), (val));\
  553 +}
  554 +
  555 +#define POPW(ssp, sp, sp_mask, val)\
  556 +{\
  557 + val = lduw_kernel((ssp) + (sp & (sp_mask)));\
  558 + sp += 2;\
  559 +}
  560 +
  561 +#define POPL(ssp, sp, sp_mask, val)\
  562 +{\
  563 + val = ldl_kernel((ssp) + (sp & (sp_mask)));\
  564 + sp += 4;\
  565 +}
  566 +
534 /* protected mode interrupt */ 567 /* protected mode interrupt */
535 static void do_interrupt_protected(int intno, int is_int, int error_code, 568 static void do_interrupt_protected(int intno, int is_int, int error_code,
536 unsigned int next_eip, int is_hw) 569 unsigned int next_eip, int is_hw)
537 { 570 {
538 SegmentCache *dt; 571 SegmentCache *dt;
539 uint8_t *ptr, *ssp; 572 uint8_t *ptr, *ssp;
540 - int type, dpl, selector, ss_dpl, cpl; 573 + int type, dpl, selector, ss_dpl, cpl, sp_mask;
541 int has_error_code, new_stack, shift; 574 int has_error_code, new_stack, shift;
542 - uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;  
543 - uint32_t old_cs, old_ss, old_esp, old_eip; 575 + uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
  576 + uint32_t old_eip;
544 577
545 #ifdef DEBUG_PCALL 578 #ifdef DEBUG_PCALL
546 if (loglevel) { 579 if (loglevel) {
@@ -659,96 +692,80 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, @@ -659,96 +692,80 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
659 if (!(ss_e2 & DESC_P_MASK)) 692 if (!(ss_e2 & DESC_P_MASK))
660 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 693 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
661 new_stack = 1; 694 new_stack = 1;
  695 + sp_mask = get_sp_mask(ss_e2);
  696 + ssp = get_seg_base(ss_e1, ss_e2);
662 } else if ((e2 & DESC_C_MASK) || dpl == cpl) { 697 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
663 /* to same priviledge */ 698 /* to same priviledge */
664 new_stack = 0; 699 new_stack = 0;
  700 + sp_mask = get_sp_mask(env->segs[R_SS].flags);
  701 + ssp = env->segs[R_SS].base;
  702 + esp = ESP;
665 } else { 703 } else {
666 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
667 new_stack = 0; /* avoid warning */ 705 new_stack = 0; /* avoid warning */
  706 + sp_mask = 0; /* avoid warning */
  707 + ssp = NULL; /* avoid warning */
  708 + esp = 0; /* avoid warning */
668 } 709 }
669 710
670 shift = type >> 3; 711 shift = type >> 3;
  712 +
  713 +#if 0
  714 + /* XXX: check that enough room is available */
671 push_size = 6 + (new_stack << 2) + (has_error_code << 1); 715 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
672 if (env->eflags & VM_MASK) 716 if (env->eflags & VM_MASK)
673 push_size += 8; 717 push_size += 8;
674 push_size <<= shift; 718 push_size <<= shift;
675 -  
676 - /* XXX: check that enough room is available */  
677 - if (new_stack) {  
678 - old_esp = ESP;  
679 - old_ss = env->segs[R_SS].selector;  
680 - ss = (ss & ~3) | dpl;  
681 - cpu_x86_load_seg_cache(env, R_SS, ss,  
682 - get_seg_base(ss_e1, ss_e2),  
683 - get_seg_limit(ss_e1, ss_e2),  
684 - ss_e2);  
685 - } else {  
686 - old_esp = 0;  
687 - old_ss = 0;  
688 - esp = ESP;  
689 - } 719 +#endif
690 if (is_int) 720 if (is_int)
691 old_eip = next_eip; 721 old_eip = next_eip;
692 else 722 else
693 old_eip = env->eip; 723 old_eip = env->eip;
694 - old_cs = env->segs[R_CS].selector;  
695 - selector = (selector & ~3) | dpl;  
696 - cpu_x86_load_seg_cache(env, R_CS, selector,  
697 - get_seg_base(e1, e2),  
698 - get_seg_limit(e1, e2),  
699 - e2);  
700 - cpu_x86_set_cpl(env, dpl);  
701 - env->eip = offset;  
702 - ESP = esp - push_size;  
703 - ssp = env->segs[R_SS].base + esp;  
704 if (shift == 1) { 724 if (shift == 1) {
705 - int old_eflags;  
706 if (env->eflags & VM_MASK) { 725 if (env->eflags & VM_MASK) {
707 - ssp -= 4;  
708 - stl_kernel(ssp, env->segs[R_GS].selector);  
709 - ssp -= 4;  
710 - stl_kernel(ssp, env->segs[R_FS].selector);  
711 - ssp -= 4;  
712 - stl_kernel(ssp, env->segs[R_DS].selector);  
713 - ssp -= 4;  
714 - stl_kernel(ssp, env->segs[R_ES].selector); 726 + PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
  727 + PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
  728 + PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
  729 + PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
715 } 730 }
716 if (new_stack) { 731 if (new_stack) {
717 - ssp -= 4;  
718 - stl_kernel(ssp, old_ss);  
719 - ssp -= 4;  
720 - stl_kernel(ssp, old_esp); 732 + PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
  733 + PUSHL(ssp, esp, sp_mask, ESP);
721 } 734 }
722 - ssp -= 4;  
723 - old_eflags = compute_eflags();  
724 - stl_kernel(ssp, old_eflags);  
725 - ssp -= 4;  
726 - stl_kernel(ssp, old_cs);  
727 - ssp -= 4;  
728 - stl_kernel(ssp, old_eip); 735 + PUSHL(ssp, esp, sp_mask, compute_eflags());
  736 + PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
  737 + PUSHL(ssp, esp, sp_mask, old_eip);
729 if (has_error_code) { 738 if (has_error_code) {
730 - ssp -= 4;  
731 - stl_kernel(ssp, error_code); 739 + PUSHL(ssp, esp, sp_mask, error_code);
732 } 740 }
733 } else { 741 } else {
734 if (new_stack) { 742 if (new_stack) {
735 - ssp -= 2;  
736 - stw_kernel(ssp, old_ss);  
737 - ssp -= 2;  
738 - stw_kernel(ssp, old_esp); 743 + PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
  744 + PUSHW(ssp, esp, sp_mask, ESP);
739 } 745 }
740 - ssp -= 2;  
741 - stw_kernel(ssp, compute_eflags());  
742 - ssp -= 2;  
743 - stw_kernel(ssp, old_cs);  
744 - ssp -= 2;  
745 - stw_kernel(ssp, old_eip); 746 + PUSHW(ssp, esp, sp_mask, compute_eflags());
  747 + PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
  748 + PUSHW(ssp, esp, sp_mask, old_eip);
746 if (has_error_code) { 749 if (has_error_code) {
747 - ssp -= 2;  
748 - stw_kernel(ssp, error_code); 750 + PUSHW(ssp, esp, sp_mask, error_code);
749 } 751 }
750 } 752 }
751 753
  754 + if (new_stack) {
  755 + ss = (ss & ~3) | dpl;
  756 + cpu_x86_load_seg_cache(env, R_SS, ss,
  757 + ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
  758 + }
  759 + ESP = (ESP & ~sp_mask) | (esp & sp_mask);
  760 +
  761 + selector = (selector & ~3) | dpl;
  762 + cpu_x86_load_seg_cache(env, R_CS, selector,
  763 + get_seg_base(e1, e2),
  764 + get_seg_limit(e1, e2),
  765 + e2);
  766 + cpu_x86_set_cpl(env, dpl);
  767 + env->eip = offset;
  768 +
752 /* interrupt gate clear IF mask */ 769 /* interrupt gate clear IF mask */
753 if ((type & 1) == 0) { 770 if ((type & 1) == 0) {
754 env->eflags &= ~IF_MASK; 771 env->eflags &= ~IF_MASK;
@@ -780,12 +797,10 @@ static void do_interrupt_real(int intno, int is_int, int error_code, @@ -780,12 +797,10 @@ static void do_interrupt_real(int intno, int is_int, int error_code,
780 else 797 else
781 old_eip = env->eip; 798 old_eip = env->eip;
782 old_cs = env->segs[R_CS].selector; 799 old_cs = env->segs[R_CS].selector;
783 - esp -= 2;  
784 - stw_kernel(ssp + (esp & 0xffff), compute_eflags());  
785 - esp -= 2;  
786 - stw_kernel(ssp + (esp & 0xffff), old_cs);  
787 - esp -= 2;  
788 - stw_kernel(ssp + (esp & 0xffff), old_eip); 800 + /* XXX: use SS segment size ? */
  801 + PUSHW(ssp, esp, 0xffff, compute_eflags());
  802 + PUSHW(ssp, esp, 0xffff, old_cs);
  803 + PUSHW(ssp, esp, 0xffff, old_eip);
789 804
790 /* update processor state */ 805 /* update processor state */
791 ESP = (ESP & ~0xffff) | (esp & 0xffff); 806 ESP = (ESP & ~0xffff) | (esp & 0xffff);
@@ -1247,26 +1262,17 @@ void helper_lcall_real_T0_T1(int shift, int next_eip) @@ -1247,26 +1262,17 @@ void helper_lcall_real_T0_T1(int shift, int next_eip)
1247 new_cs = T0; 1262 new_cs = T0;
1248 new_eip = T1; 1263 new_eip = T1;
1249 esp = ESP; 1264 esp = ESP;
1250 - esp_mask = 0xffffffff;  
1251 - if (!(env->segs[R_SS].flags & DESC_B_MASK))  
1252 - esp_mask = 0xffff; 1265 + esp_mask = get_sp_mask(env->segs[R_SS].flags);
1253 ssp = env->segs[R_SS].base; 1266 ssp = env->segs[R_SS].base;
1254 if (shift) { 1267 if (shift) {
1255 - esp -= 4;  
1256 - stl_kernel(ssp + (esp & esp_mask), env->segs[R_CS].selector);  
1257 - esp -= 4;  
1258 - stl_kernel(ssp + (esp & esp_mask), next_eip); 1268 + PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
  1269 + PUSHL(ssp, esp, esp_mask, next_eip);
1259 } else { 1270 } else {
1260 - esp -= 2;  
1261 - stw_kernel(ssp + (esp & esp_mask), env->segs[R_CS].selector);  
1262 - esp -= 2;  
1263 - stw_kernel(ssp + (esp & esp_mask), next_eip); 1271 + PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
  1272 + PUSHW(ssp, esp, esp_mask, next_eip);
1264 } 1273 }
1265 1274
1266 - if (!(env->segs[R_SS].flags & DESC_B_MASK))  
1267 - ESP = (ESP & ~0xffff) | (esp & 0xffff);  
1268 - else  
1269 - ESP = esp; 1275 + ESP = (ESP & ~esp_mask) | (esp & esp_mask);
1270 env->eip = new_eip; 1276 env->eip = new_eip;
1271 env->segs[R_CS].selector = new_cs; 1277 env->segs[R_CS].selector = new_cs;
1272 env->segs[R_CS].base = (uint8_t *)(new_cs << 4); 1278 env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
@@ -1275,10 +1281,10 @@ void helper_lcall_real_T0_T1(int shift, int next_eip) @@ -1275,10 +1281,10 @@ void helper_lcall_real_T0_T1(int shift, int next_eip)
1275 /* protected mode call */ 1281 /* protected mode call */
1276 void helper_lcall_protected_T0_T1(int shift, int next_eip) 1282 void helper_lcall_protected_T0_T1(int shift, int next_eip)
1277 { 1283 {
1278 - int new_cs, new_eip; 1284 + int new_cs, new_eip, new_stack, i;
1279 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; 1285 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1280 - uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl;  
1281 - uint32_t old_ss, old_esp, val, i, limit; 1286 + uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
  1287 + uint32_t val, limit, old_sp_mask;
1282 uint8_t *ssp, *old_ssp; 1288 uint8_t *ssp, *old_ssp;
1283 1289
1284 new_cs = T0; 1290 new_cs = T0;
@@ -1319,30 +1325,21 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) @@ -1319,30 +1325,21 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
1319 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); 1325 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1320 1326
1321 sp = ESP; 1327 sp = ESP;
1322 - if (!(env->segs[R_SS].flags & DESC_B_MASK))  
1323 - sp &= 0xffff;  
1324 - ssp = env->segs[R_SS].base + sp; 1328 + sp_mask = get_sp_mask(env->segs[R_SS].flags);
  1329 + ssp = env->segs[R_SS].base;
1325 if (shift) { 1330 if (shift) {
1326 - ssp -= 4;  
1327 - stl_kernel(ssp, env->segs[R_CS].selector);  
1328 - ssp -= 4;  
1329 - stl_kernel(ssp, next_eip); 1331 + PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
  1332 + PUSHL(ssp, sp, sp_mask, next_eip);
1330 } else { 1333 } else {
1331 - ssp -= 2;  
1332 - stw_kernel(ssp, env->segs[R_CS].selector);  
1333 - ssp -= 2;  
1334 - stw_kernel(ssp, next_eip); 1334 + PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
  1335 + PUSHW(ssp, sp, sp_mask, next_eip);
1335 } 1336 }
1336 - sp -= (4 << shift);  
1337 1337
1338 limit = get_seg_limit(e1, e2); 1338 limit = get_seg_limit(e1, e2);
1339 if (new_eip > limit) 1339 if (new_eip > limit)
1340 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 1340 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1341 /* from this point, not restartable */ 1341 /* from this point, not restartable */
1342 - if (!(env->segs[R_SS].flags & DESC_B_MASK))  
1343 - ESP = (ESP & 0xffff0000) | (sp & 0xffff);  
1344 - else  
1345 - ESP = sp; 1342 + ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1346 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1343 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1347 get_seg_base(e1, e2), limit, e2); 1344 get_seg_base(e1, e2), limit, e2);
1348 EIP = new_eip; 1345 EIP = new_eip;
@@ -1413,77 +1410,63 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) @@ -1413,77 +1410,63 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
1413 if (!(ss_e2 & DESC_P_MASK)) 1410 if (!(ss_e2 & DESC_P_MASK))
1414 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 1411 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1415 1412
1416 - push_size = ((param_count * 2) + 8) << shift; 1413 + // push_size = ((param_count * 2) + 8) << shift;
1417 1414
1418 - old_esp = ESP;  
1419 - old_ss = env->segs[R_SS].selector;  
1420 - if (!(env->segs[R_SS].flags & DESC_B_MASK))  
1421 - old_esp &= 0xffff;  
1422 - old_ssp = env->segs[R_SS].base + old_esp; 1415 + old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
  1416 + old_ssp = env->segs[R_SS].base;
1423 1417
1424 - /* XXX: from this point not restartable */  
1425 - ss = (ss & ~3) | dpl;  
1426 - cpu_x86_load_seg_cache(env, R_SS, ss,  
1427 - get_seg_base(ss_e1, ss_e2),  
1428 - get_seg_limit(ss_e1, ss_e2),  
1429 - ss_e2);  
1430 -  
1431 - if (!(ss_e2 & DESC_B_MASK))  
1432 - sp &= 0xffff;  
1433 - ssp = env->segs[R_SS].base + sp; 1418 + sp_mask = get_sp_mask(ss_e2);
  1419 + ssp = get_seg_base(ss_e1, ss_e2);
1434 if (shift) { 1420 if (shift) {
1435 - ssp -= 4;  
1436 - stl_kernel(ssp, old_ss);  
1437 - ssp -= 4;  
1438 - stl_kernel(ssp, old_esp);  
1439 - ssp -= 4 * param_count;  
1440 - for(i = 0; i < param_count; i++) {  
1441 - val = ldl_kernel(old_ssp + i * 4);  
1442 - stl_kernel(ssp + i * 4, val); 1421 + PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
  1422 + PUSHL(ssp, sp, sp_mask, ESP);
  1423 + for(i = param_count - 1; i >= 0; i--) {
  1424 + val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
  1425 + PUSHL(ssp, sp, sp_mask, val);
1443 } 1426 }
1444 } else { 1427 } else {
1445 - ssp -= 2;  
1446 - stw_kernel(ssp, old_ss);  
1447 - ssp -= 2;  
1448 - stw_kernel(ssp, old_esp);  
1449 - ssp -= 2 * param_count;  
1450 - for(i = 0; i < param_count; i++) {  
1451 - val = lduw_kernel(old_ssp + i * 2);  
1452 - stw_kernel(ssp + i * 2, val); 1428 + PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
  1429 + PUSHW(ssp, sp, sp_mask, ESP);
  1430 + for(i = param_count - 1; i >= 0; i--) {
  1431 + val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
  1432 + PUSHW(ssp, sp, sp_mask, val);
1453 } 1433 }
1454 } 1434 }
  1435 + new_stack = 1;
1455 } else { 1436 } else {
1456 /* to same priviledge */ 1437 /* to same priviledge */
1457 - if (!(env->segs[R_SS].flags & DESC_B_MASK))  
1458 - sp &= 0xffff;  
1459 - ssp = env->segs[R_SS].base + sp;  
1460 - push_size = (4 << shift); 1438 + sp = ESP;
  1439 + sp_mask = get_sp_mask(env->segs[R_SS].flags);
  1440 + ssp = env->segs[R_SS].base;
  1441 + // push_size = (4 << shift);
  1442 + new_stack = 0;
1461 } 1443 }
1462 1444
1463 if (shift) { 1445 if (shift) {
1464 - ssp -= 4;  
1465 - stl_kernel(ssp, env->segs[R_CS].selector);  
1466 - ssp -= 4;  
1467 - stl_kernel(ssp, next_eip); 1446 + PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
  1447 + PUSHL(ssp, sp, sp_mask, next_eip);
1468 } else { 1448 } else {
1469 - ssp -= 2;  
1470 - stw_kernel(ssp, env->segs[R_CS].selector);  
1471 - ssp -= 2;  
1472 - stw_kernel(ssp, next_eip); 1449 + PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
  1450 + PUSHW(ssp, sp, sp_mask, next_eip);
  1451 + }
  1452 +
  1453 + /* from this point, not restartable */
  1454 +
  1455 + if (new_stack) {
  1456 + ss = (ss & ~3) | dpl;
  1457 + cpu_x86_load_seg_cache(env, R_SS, ss,
  1458 + ssp,
  1459 + get_seg_limit(ss_e1, ss_e2),
  1460 + ss_e2);
1473 } 1461 }
1474 1462
1475 - sp -= push_size;  
1476 selector = (selector & ~3) | dpl; 1463 selector = (selector & ~3) | dpl;
1477 cpu_x86_load_seg_cache(env, R_CS, selector, 1464 cpu_x86_load_seg_cache(env, R_CS, selector,
1478 get_seg_base(e1, e2), 1465 get_seg_base(e1, e2),
1479 get_seg_limit(e1, e2), 1466 get_seg_limit(e1, e2),
1480 e2); 1467 e2);
1481 cpu_x86_set_cpl(env, dpl); 1468 cpu_x86_set_cpl(env, dpl);
1482 -  
1483 - if (!(env->segs[R_SS].flags & DESC_B_MASK))  
1484 - ESP = (ESP & 0xffff0000) | (sp & 0xffff);  
1485 - else  
1486 - ESP = sp; 1469 + ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1487 EIP = offset; 1470 EIP = offset;
1488 } 1471 }
1489 } 1472 }
@@ -1491,26 +1474,26 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) @@ -1491,26 +1474,26 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
1491 /* real and vm86 mode iret */ 1474 /* real and vm86 mode iret */
1492 void helper_iret_real(int shift) 1475 void helper_iret_real(int shift)
1493 { 1476 {
1494 - uint32_t sp, new_cs, new_eip, new_eflags, new_esp; 1477 + uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1495 uint8_t *ssp; 1478 uint8_t *ssp;
1496 int eflags_mask; 1479 int eflags_mask;
1497 1480
1498 - sp = ESP & 0xffff;  
1499 - ssp = env->segs[R_SS].base + sp; 1481 + sp_mask = 0xffff; /* XXXX: use SS segment size ? */
  1482 + sp = ESP;
  1483 + ssp = env->segs[R_SS].base;
1500 if (shift == 1) { 1484 if (shift == 1) {
1501 /* 32 bits */ 1485 /* 32 bits */
1502 - new_eflags = ldl_kernel(ssp + 8);  
1503 - new_cs = ldl_kernel(ssp + 4) & 0xffff;  
1504 - new_eip = ldl_kernel(ssp) & 0xffff; 1486 + POPL(ssp, sp, sp_mask, new_eip);
  1487 + POPL(ssp, sp, sp_mask, new_cs);
  1488 + new_cs &= 0xffff;
  1489 + POPL(ssp, sp, sp_mask, new_eflags);
1505 } else { 1490 } else {
1506 /* 16 bits */ 1491 /* 16 bits */
1507 - new_eflags = lduw_kernel(ssp + 4);  
1508 - new_cs = lduw_kernel(ssp + 2);  
1509 - new_eip = lduw_kernel(ssp); 1492 + POPW(ssp, sp, sp_mask, new_eip);
  1493 + POPW(ssp, sp, sp_mask, new_cs);
  1494 + POPW(ssp, sp, sp_mask, new_eflags);
1510 } 1495 }
1511 - new_esp = sp + (6 << shift);  
1512 - ESP = (ESP & 0xffff0000) |  
1513 - (new_esp & 0xffff); 1496 + ESP = (ESP & ~sp_mask) | (sp & 0xffff);
1514 load_seg_vm(R_CS, new_cs); 1497 load_seg_vm(R_CS, new_cs);
1515 env->eip = new_eip; 1498 env->eip = new_eip;
1516 if (env->eflags & VM_MASK) 1499 if (env->eflags & VM_MASK)
@@ -1525,31 +1508,38 @@ void helper_iret_real(int shift) @@ -1525,31 +1508,38 @@ void helper_iret_real(int shift)
1525 /* protected mode iret */ 1508 /* protected mode iret */
1526 static inline void helper_ret_protected(int shift, int is_iret, int addend) 1509 static inline void helper_ret_protected(int shift, int is_iret, int addend)
1527 { 1510 {
1528 - uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss; 1511 + uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss, sp_mask;
1529 uint32_t new_es, new_ds, new_fs, new_gs; 1512 uint32_t new_es, new_ds, new_fs, new_gs;
1530 uint32_t e1, e2, ss_e1, ss_e2; 1513 uint32_t e1, e2, ss_e1, ss_e2;
1531 int cpl, dpl, rpl, eflags_mask; 1514 int cpl, dpl, rpl, eflags_mask;
1532 uint8_t *ssp; 1515 uint8_t *ssp;
1533 1516
  1517 + sp_mask = get_sp_mask(env->segs[R_SS].flags);
1534 sp = ESP; 1518 sp = ESP;
1535 - if (!(env->segs[R_SS].flags & DESC_B_MASK))  
1536 - sp &= 0xffff;  
1537 - ssp = env->segs[R_SS].base + sp; 1519 + ssp = env->segs[R_SS].base;
1538 if (shift == 1) { 1520 if (shift == 1) {
1539 /* 32 bits */ 1521 /* 32 bits */
1540 - if (is_iret)  
1541 - new_eflags = ldl_kernel(ssp + 8);  
1542 - new_cs = ldl_kernel(ssp + 4) & 0xffff;  
1543 - new_eip = ldl_kernel(ssp);  
1544 - if (is_iret && (new_eflags & VM_MASK))  
1545 - goto return_to_vm86; 1522 + POPL(ssp, sp, sp_mask, new_eip);
  1523 + POPL(ssp, sp, sp_mask, new_cs);
  1524 + new_cs &= 0xffff;
  1525 + if (is_iret) {
  1526 + POPL(ssp, sp, sp_mask, new_eflags);
  1527 + if (new_eflags & VM_MASK)
  1528 + goto return_to_vm86;
  1529 + }
1546 } else { 1530 } else {
1547 /* 16 bits */ 1531 /* 16 bits */
  1532 + POPW(ssp, sp, sp_mask, new_eip);
  1533 + POPW(ssp, sp, sp_mask, new_cs);
1548 if (is_iret) 1534 if (is_iret)
1549 - new_eflags = lduw_kernel(ssp + 4);  
1550 - new_cs = lduw_kernel(ssp + 2);  
1551 - new_eip = lduw_kernel(ssp); 1535 + POPW(ssp, sp, sp_mask, new_eflags);
1552 } 1536 }
  1537 +#ifdef DEBUG_PCALL
  1538 + if (loglevel) {
  1539 + fprintf(logfile, "lret new %04x:%08x\n",
  1540 + new_cs, new_eip);
  1541 + }
  1542 +#endif
1553 if ((new_cs & 0xfffc) == 0) 1543 if ((new_cs & 0xfffc) == 0)
1554 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 1544 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1555 if (load_segment(&e1, &e2, new_cs) != 0) 1545 if (load_segment(&e1, &e2, new_cs) != 0)
@@ -1572,24 +1562,24 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) @@ -1572,24 +1562,24 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
1572 if (!(e2 & DESC_P_MASK)) 1562 if (!(e2 & DESC_P_MASK))
1573 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); 1563 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1574 1564
  1565 + sp += addend;
1575 if (rpl == cpl) { 1566 if (rpl == cpl) {
1576 /* return to same priledge level */ 1567 /* return to same priledge level */
1577 cpu_x86_load_seg_cache(env, R_CS, new_cs, 1568 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1578 get_seg_base(e1, e2), 1569 get_seg_base(e1, e2),
1579 get_seg_limit(e1, e2), 1570 get_seg_limit(e1, e2),
1580 e2); 1571 e2);
1581 - new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend;  
1582 } else { 1572 } else {
1583 /* return to different priviledge level */ 1573 /* return to different priviledge level */
1584 - ssp += (4 << shift) + ((2 * is_iret) << shift) + addend;  
1585 if (shift == 1) { 1574 if (shift == 1) {
1586 /* 32 bits */ 1575 /* 32 bits */
1587 - new_esp = ldl_kernel(ssp);  
1588 - new_ss = ldl_kernel(ssp + 4) & 0xffff; 1576 + POPL(ssp, sp, sp_mask, new_esp);
  1577 + POPL(ssp, sp, sp_mask, new_ss);
  1578 + new_ss &= 0xffff;
1589 } else { 1579 } else {
1590 /* 16 bits */ 1580 /* 16 bits */
1591 - new_esp = lduw_kernel(ssp);  
1592 - new_ss = lduw_kernel(ssp + 2); 1581 + POPW(ssp, sp, sp_mask, new_esp);
  1582 + POPW(ssp, sp, sp_mask, new_ss);
1593 } 1583 }
1594 1584
1595 if ((new_ss & 3) != rpl) 1585 if ((new_ss & 3) != rpl)
@@ -1615,12 +1605,10 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) @@ -1615,12 +1605,10 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
1615 get_seg_limit(ss_e1, ss_e2), 1605 get_seg_limit(ss_e1, ss_e2),
1616 ss_e2); 1606 ss_e2);
1617 cpu_x86_set_cpl(env, rpl); 1607 cpu_x86_set_cpl(env, rpl);
  1608 + sp = new_esp;
  1609 + /* XXX: change sp_mask according to old segment ? */
1618 } 1610 }
1619 - if (env->segs[R_SS].flags & DESC_B_MASK)  
1620 - ESP = new_esp;  
1621 - else  
1622 - ESP = (ESP & 0xffff0000) |  
1623 - (new_esp & 0xffff); 1611 + ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1624 env->eip = new_eip; 1612 env->eip = new_eip;
1625 if (is_iret) { 1613 if (is_iret) {
1626 /* NOTE: 'cpl' can be different from the current CPL */ 1614 /* NOTE: 'cpl' can be different from the current CPL */
@@ -1635,22 +1623,22 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) @@ -1635,22 +1623,22 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
1635 return; 1623 return;
1636 1624
1637 return_to_vm86: 1625 return_to_vm86:
1638 - new_esp = ldl_kernel(ssp + 12);  
1639 - new_ss = ldl_kernel(ssp + 16);  
1640 - new_es = ldl_kernel(ssp + 20);  
1641 - new_ds = ldl_kernel(ssp + 24);  
1642 - new_fs = ldl_kernel(ssp + 28);  
1643 - new_gs = ldl_kernel(ssp + 32); 1626 + POPL(ssp, sp, sp_mask, new_esp);
  1627 + POPL(ssp, sp, sp_mask, new_ss);
  1628 + POPL(ssp, sp, sp_mask, new_es);
  1629 + POPL(ssp, sp, sp_mask, new_ds);
  1630 + POPL(ssp, sp, sp_mask, new_fs);
  1631 + POPL(ssp, sp, sp_mask, new_gs);
1644 1632
1645 /* modify processor state */ 1633 /* modify processor state */
1646 load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK); 1634 load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
1647 - load_seg_vm(R_CS, new_cs); 1635 + load_seg_vm(R_CS, new_cs & 0xffff);
1648 cpu_x86_set_cpl(env, 3); 1636 cpu_x86_set_cpl(env, 3);
1649 - load_seg_vm(R_SS, new_ss);  
1650 - load_seg_vm(R_ES, new_es);  
1651 - load_seg_vm(R_DS, new_ds);  
1652 - load_seg_vm(R_FS, new_fs);  
1653 - load_seg_vm(R_GS, new_gs); 1637 + load_seg_vm(R_SS, new_ss & 0xffff);
  1638 + load_seg_vm(R_ES, new_es & 0xffff);
  1639 + load_seg_vm(R_DS, new_ds & 0xffff);
  1640 + load_seg_vm(R_FS, new_fs & 0xffff);
  1641 + load_seg_vm(R_GS, new_gs & 0xffff);
1654 1642
1655 env->eip = new_eip; 1643 env->eip = new_eip;
1656 ESP = new_esp; 1644 ESP = new_esp;