Commit f419b32104ef9da81b52143cc10e90f235c2441a

Authored by bellard
1 parent 8d9bfc2b

sysret fix - better cpuid support - lcall support for x86_64 - efer access in i386 emulation


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1373 c046a42c-6fe2-441c-8c8c-71466251a162
Showing 1 changed file with 110 additions and 39 deletions
target-i386/helper.c
@@ -933,6 +933,7 @@ static void do_interrupt64(int intno, int is_int, int error_code, @@ -933,6 +933,7 @@ static void do_interrupt64(int intno, int is_int, int error_code,
933 } 933 }
934 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 934 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
935 } 935 }
  936 +#endif
936 937
937 void helper_syscall(int next_eip_addend) 938 void helper_syscall(int next_eip_addend)
938 { 939 {
@@ -942,6 +943,7 @@ void helper_syscall(int next_eip_addend) @@ -942,6 +943,7 @@ void helper_syscall(int next_eip_addend)
942 raise_exception_err(EXCP06_ILLOP, 0); 943 raise_exception_err(EXCP06_ILLOP, 0);
943 } 944 }
944 selector = (env->star >> 32) & 0xffff; 945 selector = (env->star >> 32) & 0xffff;
  946 +#ifdef TARGET_X86_64
945 if (env->hflags & HF_LMA_MASK) { 947 if (env->hflags & HF_LMA_MASK) {
946 ECX = env->eip + next_eip_addend; 948 ECX = env->eip + next_eip_addend;
947 env->regs[11] = compute_eflags(); 949 env->regs[11] = compute_eflags();
@@ -962,7 +964,9 @@ void helper_syscall(int next_eip_addend) @@ -962,7 +964,9 @@ void helper_syscall(int next_eip_addend)
962 env->eip = env->lstar; 964 env->eip = env->lstar;
963 else 965 else
964 env->eip = env->cstar; 966 env->eip = env->cstar;
965 - } else { 967 + } else
  968 +#endif
  969 + {
966 ECX = (uint32_t)(env->eip + next_eip_addend); 970 ECX = (uint32_t)(env->eip + next_eip_addend);
967 971
968 cpu_x86_set_cpl(env, 0); 972 cpu_x86_set_cpl(env, 0);
@@ -985,11 +989,15 @@ void helper_sysret(int dflag) @@ -985,11 +989,15 @@ void helper_sysret(int dflag)
985 { 989 {
986 int cpl, selector; 990 int cpl, selector;
987 991
  992 + if (!(env->efer & MSR_EFER_SCE)) {
  993 + raise_exception_err(EXCP06_ILLOP, 0);
  994 + }
988 cpl = env->hflags & HF_CPL_MASK; 995 cpl = env->hflags & HF_CPL_MASK;
989 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 996 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
990 raise_exception_err(EXCP0D_GPF, 0); 997 raise_exception_err(EXCP0D_GPF, 0);
991 } 998 }
992 selector = (env->star >> 48) & 0xffff; 999 selector = (env->star >> 48) & 0xffff;
  1000 +#ifdef TARGET_X86_64
993 if (env->hflags & HF_LMA_MASK) { 1001 if (env->hflags & HF_LMA_MASK) {
994 if (dflag == 2) { 1002 if (dflag == 2) {
995 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1003 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
@@ -1015,7 +1023,9 @@ void helper_sysret(int dflag) @@ -1015,7 +1023,9 @@ void helper_sysret(int dflag)
1015 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | 1023 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1016 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK); 1024 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1017 cpu_x86_set_cpl(env, 3); 1025 cpu_x86_set_cpl(env, 3);
1018 - } else { 1026 + } else
  1027 +#endif
  1028 + {
1019 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1029 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1020 0, 0xffffffff, 1030 0, 0xffffffff,
1021 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1031 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
@@ -1030,8 +1040,15 @@ void helper_sysret(int dflag) @@ -1030,8 +1040,15 @@ void helper_sysret(int dflag)
1030 env->eflags |= IF_MASK; 1040 env->eflags |= IF_MASK;
1031 cpu_x86_set_cpl(env, 3); 1041 cpu_x86_set_cpl(env, 3);
1032 } 1042 }
1033 -} 1043 +#ifdef USE_KQEMU
  1044 + if (kqemu_is_ok(env)) {
  1045 + if (env->hflags & HF_LMA_MASK)
  1046 + CC_OP = CC_OP_EFLAGS;
  1047 + env->exception_index = -1;
  1048 + cpu_loop_exit();
  1049 + }
1034 #endif 1050 #endif
  1051 +}
1035 1052
1036 /* real mode interrupt */ 1053 /* real mode interrupt */
1037 static void do_interrupt_real(int intno, int is_int, int error_code, 1054 static void do_interrupt_real(int intno, int is_int, int error_code,
@@ -1265,9 +1282,21 @@ void helper_cmpxchg8b(void) @@ -1265,9 +1282,21 @@ void helper_cmpxchg8b(void)
1265 1282
1266 void helper_cpuid(void) 1283 void helper_cpuid(void)
1267 { 1284 {
1268 - switch((uint32_t)EAX) { 1285 + uint32_t index;
  1286 + index = (uint32_t)EAX;
  1287 +
  1288 + /* test if maximum index reached */
  1289 + if (index & 0x80000000) {
  1290 + if (index > env->cpuid_xlevel)
  1291 + index = env->cpuid_level;
  1292 + } else {
  1293 + if (index > env->cpuid_level)
  1294 + index = env->cpuid_level;
  1295 + }
  1296 +
  1297 + switch(index) {
1269 case 0: 1298 case 0:
1270 - EAX = 2; /* max EAX index supported */ 1299 + EAX = env->cpuid_level;
1271 EBX = env->cpuid_vendor1; 1300 EBX = env->cpuid_vendor1;
1272 EDX = env->cpuid_vendor2; 1301 EDX = env->cpuid_vendor2;
1273 ECX = env->cpuid_vendor3; 1302 ECX = env->cpuid_vendor3;
@@ -1278,16 +1307,15 @@ void helper_cpuid(void) @@ -1278,16 +1307,15 @@ void helper_cpuid(void)
1278 ECX = env->cpuid_ext_features; 1307 ECX = env->cpuid_ext_features;
1279 EDX = env->cpuid_features; 1308 EDX = env->cpuid_features;
1280 break; 1309 break;
1281 - default: 1310 + case 2:
1282 /* cache info: needed for Pentium Pro compatibility */ 1311 /* cache info: needed for Pentium Pro compatibility */
1283 EAX = 0x410601; 1312 EAX = 0x410601;
1284 EBX = 0; 1313 EBX = 0;
1285 ECX = 0; 1314 ECX = 0;
1286 EDX = 0; 1315 EDX = 0;
1287 break; 1316 break;
1288 -#ifdef TARGET_X86_64  
1289 case 0x80000000: 1317 case 0x80000000:
1290 - EAX = 0x80000008; 1318 + EAX = env->cpuid_xlevel;
1291 EBX = env->cpuid_vendor1; 1319 EBX = env->cpuid_vendor1;
1292 EDX = env->cpuid_vendor2; 1320 EDX = env->cpuid_vendor2;
1293 ECX = env->cpuid_vendor3; 1321 ECX = env->cpuid_vendor3;
@@ -1296,8 +1324,15 @@ void helper_cpuid(void) @@ -1296,8 +1324,15 @@ void helper_cpuid(void)
1296 EAX = env->cpuid_features; 1324 EAX = env->cpuid_features;
1297 EBX = 0; 1325 EBX = 0;
1298 ECX = 0; 1326 ECX = 0;
1299 - /* long mode + syscall/sysret features */  
1300 - EDX = (env->cpuid_features & 0x0183F3FF) | (1 << 29) | (1 << 11); 1327 + EDX = env->cpuid_ext2_features;
  1328 + break;
  1329 + case 0x80000002:
  1330 + case 0x80000003:
  1331 + case 0x80000004:
  1332 + EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
  1333 + EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
  1334 + ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
  1335 + EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1301 break; 1336 break;
1302 case 0x80000008: 1337 case 0x80000008:
1303 /* virtual & phys address size in low 2 bytes. */ 1338 /* virtual & phys address size in low 2 bytes. */
@@ -1306,7 +1341,13 @@ void helper_cpuid(void) @@ -1306,7 +1341,13 @@ void helper_cpuid(void)
1306 ECX = 0; 1341 ECX = 0;
1307 EDX = 0; 1342 EDX = 0;
1308 break; 1343 break;
1309 -#endif 1344 + default:
  1345 + /* reserved values: zero */
  1346 + EAX = 0;
  1347 + EBX = 0;
  1348 + ECX = 0;
  1349 + EDX = 0;
  1350 + break;
1310 } 1351 }
1311 } 1352 }
1312 1353
@@ -1523,11 +1564,11 @@ void load_seg(int seg_reg, int selector) @@ -1523,11 +1564,11 @@ void load_seg(int seg_reg, int selector)
1523 } 1564 }
1524 1565
1525 /* protected mode jump */ 1566 /* protected mode jump */
1526 -void helper_ljmp_protected_T0_T1(int next_eip) 1567 +void helper_ljmp_protected_T0_T1(int next_eip_addend)
1527 { 1568 {
1528 int new_cs, gate_cs, type; 1569 int new_cs, gate_cs, type;
1529 uint32_t e1, e2, cpl, dpl, rpl, limit; 1570 uint32_t e1, e2, cpl, dpl, rpl, limit;
1530 - target_ulong new_eip; 1571 + target_ulong new_eip, next_eip;
1531 1572
1532 new_cs = T0; 1573 new_cs = T0;
1533 new_eip = T1; 1574 new_eip = T1;
@@ -1573,6 +1614,7 @@ void helper_ljmp_protected_T0_T1(int next_eip) @@ -1573,6 +1614,7 @@ void helper_ljmp_protected_T0_T1(int next_eip)
1573 case 5: /* task gate */ 1614 case 5: /* task gate */
1574 if (dpl < cpl || dpl < rpl) 1615 if (dpl < cpl || dpl < rpl)
1575 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 1616 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
  1617 + next_eip = env->eip + next_eip_addend;
1576 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip); 1618 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
1577 break; 1619 break;
1578 case 4: /* 286 call gate */ 1620 case 4: /* 286 call gate */
@@ -1638,16 +1680,17 @@ void helper_lcall_real_T0_T1(int shift, int next_eip) @@ -1638,16 +1680,17 @@ void helper_lcall_real_T0_T1(int shift, int next_eip)
1638 } 1680 }
1639 1681
1640 /* protected mode call */ 1682 /* protected mode call */
1641 -void helper_lcall_protected_T0_T1(int shift, int next_eip) 1683 +void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
1642 { 1684 {
1643 int new_cs, new_eip, new_stack, i; 1685 int new_cs, new_eip, new_stack, i;
1644 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; 1686 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1645 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask; 1687 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
1646 uint32_t val, limit, old_sp_mask; 1688 uint32_t val, limit, old_sp_mask;
1647 - target_ulong ssp, old_ssp; 1689 + target_ulong ssp, old_ssp, next_eip;
1648 1690
1649 new_cs = T0; 1691 new_cs = T0;
1650 new_eip = T1; 1692 new_eip = T1;
  1693 + next_eip = env->eip + next_eip_addend;
1651 #ifdef DEBUG_PCALL 1694 #ifdef DEBUG_PCALL
1652 if (loglevel & CPU_LOG_PCALL) { 1695 if (loglevel & CPU_LOG_PCALL) {
1653 fprintf(logfile, "lcall %04x:%08x s=%d\n", 1696 fprintf(logfile, "lcall %04x:%08x s=%d\n",
@@ -1684,25 +1727,43 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) @@ -1684,25 +1727,43 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
1684 if (!(e2 & DESC_P_MASK)) 1727 if (!(e2 & DESC_P_MASK))
1685 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); 1728 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1686 1729
1687 - sp = ESP;  
1688 - sp_mask = get_sp_mask(env->segs[R_SS].flags);  
1689 - ssp = env->segs[R_SS].base;  
1690 - if (shift) {  
1691 - PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);  
1692 - PUSHL(ssp, sp, sp_mask, next_eip);  
1693 - } else {  
1694 - PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);  
1695 - PUSHW(ssp, sp, sp_mask, next_eip); 1730 +#ifdef TARGET_X86_64
  1731 + /* XXX: check 16/32 bit cases in long mode */
  1732 + if (shift == 2) {
  1733 + target_ulong rsp;
  1734 + /* 64 bit case */
  1735 + rsp = ESP;
  1736 + PUSHQ(rsp, env->segs[R_CS].selector);
  1737 + PUSHQ(rsp, next_eip);
  1738 + /* from this point, not restartable */
  1739 + ESP = rsp;
  1740 + cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
  1741 + get_seg_base(e1, e2),
  1742 + get_seg_limit(e1, e2), e2);
  1743 + EIP = new_eip;
  1744 + } else
  1745 +#endif
  1746 + {
  1747 + sp = ESP;
  1748 + sp_mask = get_sp_mask(env->segs[R_SS].flags);
  1749 + ssp = env->segs[R_SS].base;
  1750 + if (shift) {
  1751 + PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
  1752 + PUSHL(ssp, sp, sp_mask, next_eip);
  1753 + } else {
  1754 + PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
  1755 + PUSHW(ssp, sp, sp_mask, next_eip);
  1756 + }
  1757 +
  1758 + limit = get_seg_limit(e1, e2);
  1759 + if (new_eip > limit)
  1760 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
  1761 + /* from this point, not restartable */
  1762 + ESP = (ESP & ~sp_mask) | (sp & sp_mask);
  1763 + cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
  1764 + get_seg_base(e1, e2), limit, e2);
  1765 + EIP = new_eip;
1696 } 1766 }
1697 -  
1698 - limit = get_seg_limit(e1, e2);  
1699 - if (new_eip > limit)  
1700 - raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);  
1701 - /* from this point, not restartable */  
1702 - ESP = (ESP & ~sp_mask) | (sp & sp_mask);  
1703 - cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,  
1704 - get_seg_base(e1, e2), limit, e2);  
1705 - EIP = new_eip;  
1706 } else { 1767 } else {
1707 /* check gate type */ 1768 /* check gate type */
1708 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1769 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
@@ -2245,16 +2306,26 @@ void helper_wrmsr(void) @@ -2245,16 +2306,26 @@ void helper_wrmsr(void)
2245 case MSR_IA32_APICBASE: 2306 case MSR_IA32_APICBASE:
2246 cpu_set_apic_base(env, val); 2307 cpu_set_apic_base(env, val);
2247 break; 2308 break;
2248 -#ifdef TARGET_X86_64  
2249 case MSR_EFER: 2309 case MSR_EFER:
2250 -#define MSR_EFER_UPDATE_MASK (MSR_EFER_SCE | MSR_EFER_LME | \  
2251 - MSR_EFER_NXE | MSR_EFER_FFXSR)  
2252 - env->efer = (env->efer & ~MSR_EFER_UPDATE_MASK) |  
2253 - (val & MSR_EFER_UPDATE_MASK); 2310 + {
  2311 + uint64_t update_mask;
  2312 + update_mask = 0;
  2313 + if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
  2314 + update_mask |= MSR_EFER_SCE;
  2315 + if (env->cpuid_ext2_features & CPUID_EXT2_LM)
  2316 + update_mask |= MSR_EFER_LME;
  2317 + if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
  2318 + update_mask |= MSR_EFER_FFXSR;
  2319 + if (env->cpuid_ext2_features & CPUID_EXT2_NX)
  2320 + update_mask |= MSR_EFER_NXE;
  2321 + env->efer = (env->efer & ~update_mask) |
  2322 + (val & update_mask);
  2323 + }
2254 break; 2324 break;
2255 case MSR_STAR: 2325 case MSR_STAR:
2256 env->star = val; 2326 env->star = val;
2257 break; 2327 break;
  2328 +#ifdef TARGET_X86_64
2258 case MSR_LSTAR: 2329 case MSR_LSTAR:
2259 env->lstar = val; 2330 env->lstar = val;
2260 break; 2331 break;
@@ -2296,13 +2367,13 @@ void helper_rdmsr(void) @@ -2296,13 +2367,13 @@ void helper_rdmsr(void)
2296 case MSR_IA32_APICBASE: 2367 case MSR_IA32_APICBASE:
2297 val = cpu_get_apic_base(env); 2368 val = cpu_get_apic_base(env);
2298 break; 2369 break;
2299 -#ifdef TARGET_X86_64  
2300 case MSR_EFER: 2370 case MSR_EFER:
2301 val = env->efer; 2371 val = env->efer;
2302 break; 2372 break;
2303 case MSR_STAR: 2373 case MSR_STAR:
2304 val = env->star; 2374 val = env->star;
2305 break; 2375 break;
  2376 +#ifdef TARGET_X86_64
2306 case MSR_LSTAR: 2377 case MSR_LSTAR:
2307 val = env->lstar; 2378 val = env->lstar;
2308 break; 2379 break;