Commit 8e682019e37c8f8939244fcf44a592fa6347d127
1 parent
cf495bcf
correct zero segment values when coming from VM86 mode - cache infos in CPUID - …
…simpler exception handling in load_seg() - validate segments after lret/iret git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@485 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
2 changed files
with
107 additions
and
55 deletions
target-i386/exec.h
| ... | ... | @@ -122,7 +122,7 @@ typedef struct CCTable { |
| 122 | 122 | |
| 123 | 123 | extern CCTable cc_table[]; |
| 124 | 124 | |
| 125 | -void load_seg(int seg_reg, int selector, unsigned cur_eip); | |
| 125 | +void load_seg(int seg_reg, int selector); | |
| 126 | 126 | void helper_ljmp_protected_T0_T1(void); |
| 127 | 127 | void helper_lcall_real_T0_T1(int shift, int next_eip); |
| 128 | 128 | void helper_lcall_protected_T0_T1(int shift, int next_eip); | ... | ... |
target-i386/helper.c
| ... | ... | @@ -676,6 +676,8 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, |
| 676 | 676 | ssp = get_seg_base(ss_e1, ss_e2); |
| 677 | 677 | } else if ((e2 & DESC_C_MASK) || dpl == cpl) { |
| 678 | 678 | /* to same priviledge */ |
| 679 | + if (env->eflags & VM_MASK) | |
| 680 | + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | |
| 679 | 681 | new_stack = 0; |
| 680 | 682 | sp_mask = get_sp_mask(env->segs[R_SS].flags); |
| 681 | 683 | ssp = env->segs[R_SS].base; |
| ... | ... | @@ -702,13 +704,13 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, |
| 702 | 704 | else |
| 703 | 705 | old_eip = env->eip; |
| 704 | 706 | if (shift == 1) { |
| 705 | - if (env->eflags & VM_MASK) { | |
| 706 | - PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); | |
| 707 | - PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); | |
| 708 | - PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); | |
| 709 | - PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); | |
| 710 | - } | |
| 711 | 707 | if (new_stack) { |
| 708 | + if (env->eflags & VM_MASK) { | |
| 709 | + PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); | |
| 710 | + PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); | |
| 711 | + PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); | |
| 712 | + PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); | |
| 713 | + } | |
| 712 | 714 | PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); |
| 713 | 715 | PUSHL(ssp, esp, sp_mask, ESP); |
| 714 | 716 | } |
| ... | ... | @@ -720,6 +722,12 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, |
| 720 | 722 | } |
| 721 | 723 | } else { |
| 722 | 724 | if (new_stack) { |
| 725 | + if (env->eflags & VM_MASK) { | |
| 726 | + PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); | |
| 727 | + PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); | |
| 728 | + PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); | |
| 729 | + PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); | |
| 730 | + } | |
| 723 | 731 | PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); |
| 724 | 732 | PUSHW(ssp, esp, sp_mask, ESP); |
| 725 | 733 | } |
| ... | ... | @@ -732,6 +740,18 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, |
| 732 | 740 | } |
| 733 | 741 | |
| 734 | 742 | if (new_stack) { |
| 743 | + if (env->eflags & VM_MASK) { | |
| 744 | + /* XXX: explain me why W2K hangs if the whole segment cache is | |
| 745 | + reset ? */ | |
| 746 | + env->segs[R_ES].selector = 0; | |
| 747 | + env->segs[R_ES].flags = 0; | |
| 748 | + env->segs[R_DS].selector = 0; | |
| 749 | + env->segs[R_DS].flags = 0; | |
| 750 | + env->segs[R_FS].selector = 0; | |
| 751 | + env->segs[R_FS].flags = 0; | |
| 752 | + env->segs[R_GS].selector = 0; | |
| 753 | + env->segs[R_GS].flags = 0; | |
| 754 | + } | |
| 735 | 755 | ss = (ss & ~3) | dpl; |
| 736 | 756 | cpu_x86_load_seg_cache(env, R_SS, ss, |
| 737 | 757 | ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); |
| ... | ... | @@ -824,22 +844,37 @@ void do_interrupt_user(int intno, int is_int, int error_code, |
| 824 | 844 | void do_interrupt(int intno, int is_int, int error_code, |
| 825 | 845 | unsigned int next_eip, int is_hw) |
| 826 | 846 | { |
| 847 | +#if 0 | |
| 848 | + { | |
| 849 | + extern FILE *stdout; | |
| 850 | + static int count; | |
| 851 | + if (env->cr[0] & CR0_PE_MASK) { | |
| 852 | + fprintf(stdout, "%d: interrupt: vector=%02x error_code=%04x int=%d\n", | |
| 853 | + count, intno, error_code, is_int); | |
| 854 | + count++; | |
| 855 | + } | |
| 856 | + } | |
| 857 | + if ((env->cr[0] & CR0_PE_MASK) && intno == 0x10) { | |
| 858 | + tb_flush(env); | |
| 859 | + cpu_set_log(CPU_LOG_ALL); | |
| 860 | + } | |
| 861 | +#endif | |
| 827 | 862 | #ifdef DEBUG_PCALL |
| 828 | 863 | if (loglevel) { |
| 829 | 864 | static int count; |
| 830 | 865 | fprintf(logfile, "%d: interrupt: vector=%02x error_code=%04x int=%d\n", |
| 831 | 866 | count, intno, error_code, is_int); |
| 832 | 867 | cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP); |
| 833 | -#if 0 | |
| 868 | +#if 1 | |
| 834 | 869 | { |
| 835 | 870 | int i; |
| 836 | 871 | uint8_t *ptr; |
| 837 | - printf(" code="); | |
| 872 | + fprintf(logfile, " code="); | |
| 838 | 873 | ptr = env->segs[R_CS].base + env->eip; |
| 839 | 874 | for(i = 0; i < 16; i++) { |
| 840 | - printf(" %02x", ldub(ptr + i)); | |
| 875 | + fprintf(logfile, " %02x", ldub(ptr + i)); | |
| 841 | 876 | } |
| 842 | - printf("\n"); | |
| 877 | + fprintf(logfile, "\n"); | |
| 843 | 878 | } |
| 844 | 879 | #endif |
| 845 | 880 | count++; |
| ... | ... | @@ -955,7 +990,6 @@ void helper_cmpxchg8b(void) |
| 955 | 990 | CC_SRC = eflags; |
| 956 | 991 | } |
| 957 | 992 | |
| 958 | -/* We simulate a pre-MMX pentium as in valgrind */ | |
| 959 | 993 | #define CPUID_FP87 (1 << 0) |
| 960 | 994 | #define CPUID_VME (1 << 1) |
| 961 | 995 | #define CPUID_DE (1 << 2) |
| ... | ... | @@ -979,31 +1013,43 @@ void helper_cmpxchg8b(void) |
| 979 | 1013 | |
| 980 | 1014 | void helper_cpuid(void) |
| 981 | 1015 | { |
| 982 | - if (EAX == 0) { | |
| 983 | - EAX = 1; /* max EAX index supported */ | |
| 1016 | + switch(EAX) { | |
| 1017 | + case 0: | |
| 1018 | + EAX = 2; /* max EAX index supported */ | |
| 984 | 1019 | EBX = 0x756e6547; |
| 985 | 1020 | ECX = 0x6c65746e; |
| 986 | 1021 | EDX = 0x49656e69; |
| 987 | - } else if (EAX == 1) { | |
| 988 | - int family, model, stepping; | |
| 989 | - /* EAX = 1 info */ | |
| 1022 | + break; | |
| 1023 | + case 1: | |
| 1024 | + { | |
| 1025 | + int family, model, stepping; | |
| 1026 | + /* EAX = 1 info */ | |
| 990 | 1027 | #if 0 |
| 991 | - /* pentium 75-200 */ | |
| 992 | - family = 5; | |
| 993 | - model = 2; | |
| 994 | - stepping = 11; | |
| 1028 | + /* pentium 75-200 */ | |
| 1029 | + family = 5; | |
| 1030 | + model = 2; | |
| 1031 | + stepping = 11; | |
| 995 | 1032 | #else |
| 996 | - /* pentium pro */ | |
| 997 | - family = 6; | |
| 998 | - model = 1; | |
| 999 | - stepping = 3; | |
| 1033 | + /* pentium pro */ | |
| 1034 | + family = 6; | |
| 1035 | + model = 1; | |
| 1036 | + stepping = 3; | |
| 1000 | 1037 | #endif |
| 1001 | - EAX = (family << 8) | (model << 4) | stepping; | |
| 1038 | + EAX = (family << 8) | (model << 4) | stepping; | |
| 1039 | + EBX = 0; | |
| 1040 | + ECX = 0; | |
| 1041 | + EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE | | |
| 1042 | + CPUID_TSC | CPUID_MSR | CPUID_MCE | | |
| 1043 | + CPUID_CX8 | CPUID_PGE | CPUID_CMOV; | |
| 1044 | + } | |
| 1045 | + break; | |
| 1046 | + default: | |
| 1047 | + /* cache info: needed for Pentium Pro compatibility */ | |
| 1048 | + EAX = 0x410601; | |
| 1002 | 1049 | EBX = 0; |
| 1003 | 1050 | ECX = 0; |
| 1004 | - EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE | | |
| 1005 | - CPUID_TSC | CPUID_MSR | CPUID_MCE | | |
| 1006 | - CPUID_CX8 | CPUID_PGE | CPUID_CMOV; | |
| 1051 | + EDX = 0; | |
| 1052 | + break; | |
| 1007 | 1053 | } |
| 1008 | 1054 | } |
| 1009 | 1055 | |
| ... | ... | @@ -1070,14 +1116,14 @@ void helper_ltr_T0(void) |
| 1070 | 1116 | if (!(e2 & DESC_P_MASK)) |
| 1071 | 1117 | raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
| 1072 | 1118 | load_seg_cache_raw_dt(&env->tr, e1, e2); |
| 1073 | - e2 |= 0x00000200; /* set the busy bit */ | |
| 1119 | + e2 |= DESC_TSS_BUSY_MASK; | |
| 1074 | 1120 | stl_kernel(ptr + 4, e2); |
| 1075 | 1121 | } |
| 1076 | 1122 | env->tr.selector = selector; |
| 1077 | 1123 | } |
| 1078 | 1124 | |
| 1079 | 1125 | /* only works if protected mode and not VM86. seg_reg must be != R_CS */ |
| 1080 | -void load_seg(int seg_reg, int selector, unsigned int cur_eip) | |
| 1126 | +void load_seg(int seg_reg, int selector) | |
| 1081 | 1127 | { |
| 1082 | 1128 | uint32_t e1, e2; |
| 1083 | 1129 | int cpl, dpl, rpl; |
| ... | ... | @@ -1085,14 +1131,12 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip) |
| 1085 | 1131 | int index; |
| 1086 | 1132 | uint8_t *ptr; |
| 1087 | 1133 | |
| 1134 | + selector &= 0xffff; | |
| 1088 | 1135 | if ((selector & 0xfffc) == 0) { |
| 1089 | 1136 | /* null selector case */ |
| 1090 | - if (seg_reg == R_SS) { | |
| 1091 | - EIP = cur_eip; | |
| 1137 | + if (seg_reg == R_SS) | |
| 1092 | 1138 | raise_exception_err(EXCP0D_GPF, 0); |
| 1093 | - } else { | |
| 1094 | - cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0); | |
| 1095 | - } | |
| 1139 | + cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0); | |
| 1096 | 1140 | } else { |
| 1097 | 1141 | |
| 1098 | 1142 | if (selector & 0x4) |
| ... | ... | @@ -1100,49 +1144,36 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip) |
| 1100 | 1144 | else |
| 1101 | 1145 | dt = &env->gdt; |
| 1102 | 1146 | index = selector & ~7; |
| 1103 | - if ((index + 7) > dt->limit) { | |
| 1104 | - EIP = cur_eip; | |
| 1147 | + if ((index + 7) > dt->limit) | |
| 1105 | 1148 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1106 | - } | |
| 1107 | 1149 | ptr = dt->base + index; |
| 1108 | 1150 | e1 = ldl_kernel(ptr); |
| 1109 | 1151 | e2 = ldl_kernel(ptr + 4); |
| 1110 | 1152 | |
| 1111 | - if (!(e2 & DESC_S_MASK)) { | |
| 1112 | - EIP = cur_eip; | |
| 1153 | + if (!(e2 & DESC_S_MASK)) | |
| 1113 | 1154 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1114 | - } | |
| 1115 | 1155 | rpl = selector & 3; |
| 1116 | 1156 | dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
| 1117 | 1157 | cpl = env->hflags & HF_CPL_MASK; |
| 1118 | 1158 | if (seg_reg == R_SS) { |
| 1119 | 1159 | /* must be writable segment */ |
| 1120 | - if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { | |
| 1121 | - EIP = cur_eip; | |
| 1160 | + if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) | |
| 1122 | 1161 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1123 | - } | |
| 1124 | - if (rpl != cpl || dpl != cpl) { | |
| 1125 | - EIP = cur_eip; | |
| 1162 | + if (rpl != cpl || dpl != cpl) | |
| 1126 | 1163 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1127 | - } | |
| 1128 | 1164 | } else { |
| 1129 | 1165 | /* must be readable segment */ |
| 1130 | - if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { | |
| 1131 | - EIP = cur_eip; | |
| 1166 | + if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) | |
| 1132 | 1167 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1133 | - } | |
| 1134 | 1168 | |
| 1135 | 1169 | if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { |
| 1136 | 1170 | /* if not conforming code, test rights */ |
| 1137 | - if (dpl < cpl || dpl < rpl) { | |
| 1138 | - EIP = cur_eip; | |
| 1171 | + if (dpl < cpl || dpl < rpl) | |
| 1139 | 1172 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1140 | - } | |
| 1141 | 1173 | } |
| 1142 | 1174 | } |
| 1143 | 1175 | |
| 1144 | 1176 | if (!(e2 & DESC_P_MASK)) { |
| 1145 | - EIP = cur_eip; | |
| 1146 | 1177 | if (seg_reg == R_SS) |
| 1147 | 1178 | raise_exception_err(EXCP0C_STACK, selector & 0xfffc); |
| 1148 | 1179 | else |
| ... | ... | @@ -1507,6 +1538,21 @@ void helper_iret_real(int shift) |
| 1507 | 1538 | load_eflags(new_eflags, eflags_mask); |
| 1508 | 1539 | } |
| 1509 | 1540 | |
| 1541 | +static inline void validate_seg(int seg_reg, int cpl) | |
| 1542 | +{ | |
| 1543 | + int dpl; | |
| 1544 | + uint32_t e2; | |
| 1545 | + | |
| 1546 | + e2 = env->segs[seg_reg].flags; | |
| 1547 | + dpl = (e2 >> DESC_DPL_SHIFT) & 3; | |
| 1548 | + if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { | |
| 1549 | + /* data or non conforming code segment */ | |
| 1550 | + if (dpl < cpl) { | |
| 1551 | + cpu_x86_load_seg_cache(env, seg_reg, 0, NULL, 0, 0); | |
| 1552 | + } | |
| 1553 | + } | |
| 1554 | +} | |
| 1555 | + | |
| 1510 | 1556 | /* protected mode iret */ |
| 1511 | 1557 | static inline void helper_ret_protected(int shift, int is_iret, int addend) |
| 1512 | 1558 | { |
| ... | ... | @@ -1610,6 +1656,12 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) |
| 1610 | 1656 | cpu_x86_set_cpl(env, rpl); |
| 1611 | 1657 | sp = new_esp; |
| 1612 | 1658 | /* XXX: change sp_mask according to old segment ? */ |
| 1659 | + | |
| 1660 | + /* validate data segments */ | |
| 1661 | + validate_seg(R_ES, cpl); | |
| 1662 | + validate_seg(R_DS, cpl); | |
| 1663 | + validate_seg(R_FS, cpl); | |
| 1664 | + validate_seg(R_GS, cpl); | |
| 1613 | 1665 | } |
| 1614 | 1666 | ESP = (ESP & ~sp_mask) | (sp & sp_mask); |
| 1615 | 1667 | env->eip = new_eip; | ... | ... |