Commit 8e682019e37c8f8939244fcf44a592fa6347d127

Authored by bellard
1 parent cf495bcf

correct zero segment values when coming from VM86 mode - cache infos in CPUID - …

…simpler exception handling in load_seg() - validate segments after lret/iret


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@485 c046a42c-6fe2-441c-8c8c-71466251a162
target-i386/exec.h
@@ -122,7 +122,7 @@ typedef struct CCTable { @@ -122,7 +122,7 @@ typedef struct CCTable {
122 122
123 extern CCTable cc_table[]; 123 extern CCTable cc_table[];
124 124
125 -void load_seg(int seg_reg, int selector, unsigned cur_eip); 125 +void load_seg(int seg_reg, int selector);
126 void helper_ljmp_protected_T0_T1(void); 126 void helper_ljmp_protected_T0_T1(void);
127 void helper_lcall_real_T0_T1(int shift, int next_eip); 127 void helper_lcall_real_T0_T1(int shift, int next_eip);
128 void helper_lcall_protected_T0_T1(int shift, int next_eip); 128 void helper_lcall_protected_T0_T1(int shift, int next_eip);
target-i386/helper.c
@@ -676,6 +676,8 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, @@ -676,6 +676,8 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
676 ssp = get_seg_base(ss_e1, ss_e2); 676 ssp = get_seg_base(ss_e1, ss_e2);
677 } else if ((e2 & DESC_C_MASK) || dpl == cpl) { 677 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
678 /* to same priviledge */ 678 /* to same priviledge */
  679 + if (env->eflags & VM_MASK)
  680 + raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
679 new_stack = 0; 681 new_stack = 0;
680 sp_mask = get_sp_mask(env->segs[R_SS].flags); 682 sp_mask = get_sp_mask(env->segs[R_SS].flags);
681 ssp = env->segs[R_SS].base; 683 ssp = env->segs[R_SS].base;
@@ -702,13 +704,13 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, @@ -702,13 +704,13 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
702 else 704 else
703 old_eip = env->eip; 705 old_eip = env->eip;
704 if (shift == 1) { 706 if (shift == 1) {
705 - if (env->eflags & VM_MASK) {  
706 - PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);  
707 - PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);  
708 - PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);  
709 - PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);  
710 - }  
711 if (new_stack) { 707 if (new_stack) {
  708 + if (env->eflags & VM_MASK) {
  709 + PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
  710 + PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
  711 + PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
  712 + PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
  713 + }
712 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); 714 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
713 PUSHL(ssp, esp, sp_mask, ESP); 715 PUSHL(ssp, esp, sp_mask, ESP);
714 } 716 }
@@ -720,6 +722,12 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, @@ -720,6 +722,12 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
720 } 722 }
721 } else { 723 } else {
722 if (new_stack) { 724 if (new_stack) {
  725 + if (env->eflags & VM_MASK) {
  726 + PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
  727 + PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
  728 + PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
  729 + PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
  730 + }
723 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); 731 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
724 PUSHW(ssp, esp, sp_mask, ESP); 732 PUSHW(ssp, esp, sp_mask, ESP);
725 } 733 }
@@ -732,6 +740,18 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, @@ -732,6 +740,18 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
732 } 740 }
733 741
734 if (new_stack) { 742 if (new_stack) {
  743 + if (env->eflags & VM_MASK) {
  744 + /* XXX: explain me why W2K hangs if the whole segment cache is
  745 + reset ? */
  746 + env->segs[R_ES].selector = 0;
  747 + env->segs[R_ES].flags = 0;
  748 + env->segs[R_DS].selector = 0;
  749 + env->segs[R_DS].flags = 0;
  750 + env->segs[R_FS].selector = 0;
  751 + env->segs[R_FS].flags = 0;
  752 + env->segs[R_GS].selector = 0;
  753 + env->segs[R_GS].flags = 0;
  754 + }
735 ss = (ss & ~3) | dpl; 755 ss = (ss & ~3) | dpl;
736 cpu_x86_load_seg_cache(env, R_SS, ss, 756 cpu_x86_load_seg_cache(env, R_SS, ss,
737 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); 757 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
@@ -824,22 +844,37 @@ void do_interrupt_user(int intno, int is_int, int error_code, @@ -824,22 +844,37 @@ void do_interrupt_user(int intno, int is_int, int error_code,
824 void do_interrupt(int intno, int is_int, int error_code, 844 void do_interrupt(int intno, int is_int, int error_code,
825 unsigned int next_eip, int is_hw) 845 unsigned int next_eip, int is_hw)
826 { 846 {
  847 +#if 0
  848 + {
  849 + extern FILE *stdout;
  850 + static int count;
  851 + if (env->cr[0] & CR0_PE_MASK) {
  852 + fprintf(stdout, "%d: interrupt: vector=%02x error_code=%04x int=%d\n",
  853 + count, intno, error_code, is_int);
  854 + count++;
  855 + }
  856 + }
  857 + if ((env->cr[0] & CR0_PE_MASK) && intno == 0x10) {
  858 + tb_flush(env);
  859 + cpu_set_log(CPU_LOG_ALL);
  860 + }
  861 +#endif
827 #ifdef DEBUG_PCALL 862 #ifdef DEBUG_PCALL
828 if (loglevel) { 863 if (loglevel) {
829 static int count; 864 static int count;
830 fprintf(logfile, "%d: interrupt: vector=%02x error_code=%04x int=%d\n", 865 fprintf(logfile, "%d: interrupt: vector=%02x error_code=%04x int=%d\n",
831 count, intno, error_code, is_int); 866 count, intno, error_code, is_int);
832 cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP); 867 cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
833 -#if 0 868 +#if 1
834 { 869 {
835 int i; 870 int i;
836 uint8_t *ptr; 871 uint8_t *ptr;
837 - printf(" code="); 872 + fprintf(logfile, " code=");
838 ptr = env->segs[R_CS].base + env->eip; 873 ptr = env->segs[R_CS].base + env->eip;
839 for(i = 0; i < 16; i++) { 874 for(i = 0; i < 16; i++) {
840 - printf(" %02x", ldub(ptr + i)); 875 + fprintf(logfile, " %02x", ldub(ptr + i));
841 } 876 }
842 - printf("\n"); 877 + fprintf(logfile, "\n");
843 } 878 }
844 #endif 879 #endif
845 count++; 880 count++;
@@ -955,7 +990,6 @@ void helper_cmpxchg8b(void) @@ -955,7 +990,6 @@ void helper_cmpxchg8b(void)
955 CC_SRC = eflags; 990 CC_SRC = eflags;
956 } 991 }
957 992
958 -/* We simulate a pre-MMX pentium as in valgrind */  
959 #define CPUID_FP87 (1 << 0) 993 #define CPUID_FP87 (1 << 0)
960 #define CPUID_VME (1 << 1) 994 #define CPUID_VME (1 << 1)
961 #define CPUID_DE (1 << 2) 995 #define CPUID_DE (1 << 2)
@@ -979,31 +1013,43 @@ void helper_cmpxchg8b(void) @@ -979,31 +1013,43 @@ void helper_cmpxchg8b(void)
979 1013
980 void helper_cpuid(void) 1014 void helper_cpuid(void)
981 { 1015 {
982 - if (EAX == 0) {  
983 - EAX = 1; /* max EAX index supported */ 1016 + switch(EAX) {
  1017 + case 0:
  1018 + EAX = 2; /* max EAX index supported */
984 EBX = 0x756e6547; 1019 EBX = 0x756e6547;
985 ECX = 0x6c65746e; 1020 ECX = 0x6c65746e;
986 EDX = 0x49656e69; 1021 EDX = 0x49656e69;
987 - } else if (EAX == 1) {  
988 - int family, model, stepping;  
989 - /* EAX = 1 info */ 1022 + break;
  1023 + case 1:
  1024 + {
  1025 + int family, model, stepping;
  1026 + /* EAX = 1 info */
990 #if 0 1027 #if 0
991 - /* pentium 75-200 */  
992 - family = 5;  
993 - model = 2;  
994 - stepping = 11; 1028 + /* pentium 75-200 */
  1029 + family = 5;
  1030 + model = 2;
  1031 + stepping = 11;
995 #else 1032 #else
996 - /* pentium pro */  
997 - family = 6;  
998 - model = 1;  
999 - stepping = 3; 1033 + /* pentium pro */
  1034 + family = 6;
  1035 + model = 1;
  1036 + stepping = 3;
1000 #endif 1037 #endif
1001 - EAX = (family << 8) | (model << 4) | stepping; 1038 + EAX = (family << 8) | (model << 4) | stepping;
  1039 + EBX = 0;
  1040 + ECX = 0;
  1041 + EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
  1042 + CPUID_TSC | CPUID_MSR | CPUID_MCE |
  1043 + CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
  1044 + }
  1045 + break;
  1046 + default:
  1047 + /* cache info: needed for Pentium Pro compatibility */
  1048 + EAX = 0x410601;
1002 EBX = 0; 1049 EBX = 0;
1003 ECX = 0; 1050 ECX = 0;
1004 - EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |  
1005 - CPUID_TSC | CPUID_MSR | CPUID_MCE |  
1006 - CPUID_CX8 | CPUID_PGE | CPUID_CMOV; 1051 + EDX = 0;
  1052 + break;
1007 } 1053 }
1008 } 1054 }
1009 1055
@@ -1070,14 +1116,14 @@ void helper_ltr_T0(void) @@ -1070,14 +1116,14 @@ void helper_ltr_T0(void)
1070 if (!(e2 & DESC_P_MASK)) 1116 if (!(e2 & DESC_P_MASK))
1071 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); 1117 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1072 load_seg_cache_raw_dt(&env->tr, e1, e2); 1118 load_seg_cache_raw_dt(&env->tr, e1, e2);
1073 - e2 |= 0x00000200; /* set the busy bit */ 1119 + e2 |= DESC_TSS_BUSY_MASK;
1074 stl_kernel(ptr + 4, e2); 1120 stl_kernel(ptr + 4, e2);
1075 } 1121 }
1076 env->tr.selector = selector; 1122 env->tr.selector = selector;
1077 } 1123 }
1078 1124
1079 /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 1125 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1080 -void load_seg(int seg_reg, int selector, unsigned int cur_eip) 1126 +void load_seg(int seg_reg, int selector)
1081 { 1127 {
1082 uint32_t e1, e2; 1128 uint32_t e1, e2;
1083 int cpl, dpl, rpl; 1129 int cpl, dpl, rpl;
@@ -1085,14 +1131,12 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip) @@ -1085,14 +1131,12 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip)
1085 int index; 1131 int index;
1086 uint8_t *ptr; 1132 uint8_t *ptr;
1087 1133
  1134 + selector &= 0xffff;
1088 if ((selector & 0xfffc) == 0) { 1135 if ((selector & 0xfffc) == 0) {
1089 /* null selector case */ 1136 /* null selector case */
1090 - if (seg_reg == R_SS) {  
1091 - EIP = cur_eip; 1137 + if (seg_reg == R_SS)
1092 raise_exception_err(EXCP0D_GPF, 0); 1138 raise_exception_err(EXCP0D_GPF, 0);
1093 - } else {  
1094 - cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);  
1095 - } 1139 + cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
1096 } else { 1140 } else {
1097 1141
1098 if (selector & 0x4) 1142 if (selector & 0x4)
@@ -1100,49 +1144,36 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip) @@ -1100,49 +1144,36 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip)
1100 else 1144 else
1101 dt = &env->gdt; 1145 dt = &env->gdt;
1102 index = selector & ~7; 1146 index = selector & ~7;
1103 - if ((index + 7) > dt->limit) {  
1104 - EIP = cur_eip; 1147 + if ((index + 7) > dt->limit)
1105 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1148 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1106 - }  
1107 ptr = dt->base + index; 1149 ptr = dt->base + index;
1108 e1 = ldl_kernel(ptr); 1150 e1 = ldl_kernel(ptr);
1109 e2 = ldl_kernel(ptr + 4); 1151 e2 = ldl_kernel(ptr + 4);
1110 1152
1111 - if (!(e2 & DESC_S_MASK)) {  
1112 - EIP = cur_eip; 1153 + if (!(e2 & DESC_S_MASK))
1113 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1154 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1114 - }  
1115 rpl = selector & 3; 1155 rpl = selector & 3;
1116 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1156 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1117 cpl = env->hflags & HF_CPL_MASK; 1157 cpl = env->hflags & HF_CPL_MASK;
1118 if (seg_reg == R_SS) { 1158 if (seg_reg == R_SS) {
1119 /* must be writable segment */ 1159 /* must be writable segment */
1120 - if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {  
1121 - EIP = cur_eip; 1160 + if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1122 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1161 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1123 - }  
1124 - if (rpl != cpl || dpl != cpl) {  
1125 - EIP = cur_eip; 1162 + if (rpl != cpl || dpl != cpl)
1126 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1163 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1127 - }  
1128 } else { 1164 } else {
1129 /* must be readable segment */ 1165 /* must be readable segment */
1130 - if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {  
1131 - EIP = cur_eip; 1166 + if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1132 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1167 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1133 - }  
1134 1168
1135 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1169 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1136 /* if not conforming code, test rights */ 1170 /* if not conforming code, test rights */
1137 - if (dpl < cpl || dpl < rpl) {  
1138 - EIP = cur_eip; 1171 + if (dpl < cpl || dpl < rpl)
1139 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1172 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1140 - }  
1141 } 1173 }
1142 } 1174 }
1143 1175
1144 if (!(e2 & DESC_P_MASK)) { 1176 if (!(e2 & DESC_P_MASK)) {
1145 - EIP = cur_eip;  
1146 if (seg_reg == R_SS) 1177 if (seg_reg == R_SS)
1147 raise_exception_err(EXCP0C_STACK, selector & 0xfffc); 1178 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1148 else 1179 else
@@ -1507,6 +1538,21 @@ void helper_iret_real(int shift) @@ -1507,6 +1538,21 @@ void helper_iret_real(int shift)
1507 load_eflags(new_eflags, eflags_mask); 1538 load_eflags(new_eflags, eflags_mask);
1508 } 1539 }
1509 1540
  1541 +static inline void validate_seg(int seg_reg, int cpl)
  1542 +{
  1543 + int dpl;
  1544 + uint32_t e2;
  1545 +
  1546 + e2 = env->segs[seg_reg].flags;
  1547 + dpl = (e2 >> DESC_DPL_SHIFT) & 3;
  1548 + if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
  1549 + /* data or non conforming code segment */
  1550 + if (dpl < cpl) {
  1551 + cpu_x86_load_seg_cache(env, seg_reg, 0, NULL, 0, 0);
  1552 + }
  1553 + }
  1554 +}
  1555 +
1510 /* protected mode iret */ 1556 /* protected mode iret */
1511 static inline void helper_ret_protected(int shift, int is_iret, int addend) 1557 static inline void helper_ret_protected(int shift, int is_iret, int addend)
1512 { 1558 {
@@ -1610,6 +1656,12 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) @@ -1610,6 +1656,12 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
1610 cpu_x86_set_cpl(env, rpl); 1656 cpu_x86_set_cpl(env, rpl);
1611 sp = new_esp; 1657 sp = new_esp;
1612 /* XXX: change sp_mask according to old segment ? */ 1658 /* XXX: change sp_mask according to old segment ? */
  1659 +
  1660 + /* validate data segments */
  1661 + validate_seg(R_ES, cpl);
  1662 + validate_seg(R_DS, cpl);
  1663 + validate_seg(R_FS, cpl);
  1664 + validate_seg(R_GS, cpl);
1613 } 1665 }
1614 ESP = (ESP & ~sp_mask) | (sp & sp_mask); 1666 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1615 env->eip = new_eip; 1667 env->eip = new_eip;