Commit 8f091a59605092994c4b52c20b7173c514411e38

Authored by bellard
1 parent 2efbe911

x86_64 fixes (initial patch by Filip Navara)


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1517 c046a42c-6fe2-441c-8c8c-71466251a162
target-i386/cpu.h
@@ -214,6 +214,12 @@ @@ -214,6 +214,12 @@
214 #define MSR_IA32_SYSENTER_ESP 0x175 214 #define MSR_IA32_SYSENTER_ESP 0x175
215 #define MSR_IA32_SYSENTER_EIP 0x176 215 #define MSR_IA32_SYSENTER_EIP 0x176
216 216
  217 +#define MSR_MCG_CAP 0x179
  218 +#define MSR_MCG_STATUS 0x17a
  219 +#define MSR_MCG_CTL 0x17b
  220 +
  221 +#define MSR_PAT 0x277
  222 +
217 #define MSR_EFER 0xc0000080 223 #define MSR_EFER 0xc0000080
218 224
219 #define MSR_EFER_SCE (1 << 0) 225 #define MSR_EFER_SCE (1 << 0)
@@ -246,6 +252,8 @@ @@ -246,6 +252,8 @@
246 #define CPUID_PGE (1 << 13) 252 #define CPUID_PGE (1 << 13)
247 #define CPUID_MCA (1 << 14) 253 #define CPUID_MCA (1 << 14)
248 #define CPUID_CMOV (1 << 15) 254 #define CPUID_CMOV (1 << 15)
  255 +#define CPUID_PAT (1 << 16)
  256 +#define CPUID_CLFLUSH (1 << 19)
249 /* ... */ 257 /* ... */
250 #define CPUID_MMX (1 << 23) 258 #define CPUID_MMX (1 << 23)
251 #define CPUID_FXSR (1 << 24) 259 #define CPUID_FXSR (1 << 24)
@@ -474,6 +482,8 @@ typedef struct CPUX86State { @@ -474,6 +482,8 @@ typedef struct CPUX86State {
474 target_ulong kernelgsbase; 482 target_ulong kernelgsbase;
475 #endif 483 #endif
476 484
  485 + uint64_t pat;
  486 +
477 /* temporary data for USE_CODE_COPY mode */ 487 /* temporary data for USE_CODE_COPY mode */
478 #ifdef USE_CODE_COPY 488 #ifdef USE_CODE_COPY
479 uint32_t tmp0; 489 uint32_t tmp0;
target-i386/exec.h
@@ -157,11 +157,11 @@ void helper_lldt_T0(void); @@ -157,11 +157,11 @@ void helper_lldt_T0(void);
157 void helper_ltr_T0(void); 157 void helper_ltr_T0(void);
158 void helper_movl_crN_T0(int reg); 158 void helper_movl_crN_T0(int reg);
159 void helper_movl_drN_T0(int reg); 159 void helper_movl_drN_T0(int reg);
160 -void helper_invlpg(unsigned int addr); 160 +void helper_invlpg(target_ulong addr);
161 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); 161 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
162 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); 162 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
163 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); 163 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
164 -void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr); 164 +void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr);
165 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 165 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
166 int is_write, int is_user, int is_softmmu); 166 int is_write, int is_user, int is_softmmu);
167 void tlb_fill(target_ulong addr, int is_write, int is_user, 167 void tlb_fill(target_ulong addr, int is_write, int is_user,
@@ -190,6 +190,7 @@ void helper_idivq_EAX_T0(void); @@ -190,6 +190,7 @@ void helper_idivq_EAX_T0(void);
190 void helper_cmpxchg8b(void); 190 void helper_cmpxchg8b(void);
191 void helper_cpuid(void); 191 void helper_cpuid(void);
192 void helper_enter_level(int level, int data32); 192 void helper_enter_level(int level, int data32);
  193 +void helper_enter64_level(int level, int data64);
193 void helper_sysenter(void); 194 void helper_sysenter(void);
194 void helper_sysexit(void); 195 void helper_sysexit(void);
195 void helper_syscall(int next_eip_addend); 196 void helper_syscall(int next_eip_addend);
target-i386/helper.c
@@ -1334,6 +1334,20 @@ void helper_cpuid(void) @@ -1334,6 +1334,20 @@ void helper_cpuid(void)
1334 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 1334 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1335 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 1335 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1336 break; 1336 break;
  1337 + case 0x80000005:
  1338 + /* cache info (L1 cache) */
  1339 + EAX = 0x01ff01ff;
  1340 + EBX = 0x01ff01ff;
  1341 + ECX = 0x40020140;
  1342 + EDX = 0x40020140;
  1343 + break;
  1344 + case 0x80000006:
  1345 + /* cache info (L2 cache) */
  1346 + EAX = 0;
  1347 + EBX = 0x42004200;
  1348 + ECX = 0x02008140;
  1349 + EDX = 0;
  1350 + break;
1337 case 0x80000008: 1351 case 0x80000008:
1338 /* virtual & phys address size in low 2 bytes. */ 1352 /* virtual & phys address size in low 2 bytes. */
1339 EAX = 0x00003028; 1353 EAX = 0x00003028;
@@ -1383,6 +1397,37 @@ void helper_enter_level(int level, int data32) @@ -1383,6 +1397,37 @@ void helper_enter_level(int level, int data32)
1383 } 1397 }
1384 } 1398 }
1385 1399
  1400 +#ifdef TARGET_X86_64
  1401 +void helper_enter64_level(int level, int data64)
  1402 +{
  1403 + target_ulong esp, ebp;
  1404 + ebp = EBP;
  1405 + esp = ESP;
  1406 +
  1407 + if (data64) {
  1408 + /* 64 bit */
  1409 + esp -= 8;
  1410 + while (--level) {
  1411 + esp -= 8;
  1412 + ebp -= 8;
  1413 + stq(esp, ldq(ebp));
  1414 + }
  1415 + esp -= 8;
  1416 + stq(esp, T1);
  1417 + } else {
  1418 + /* 16 bit */
  1419 + esp -= 2;
  1420 + while (--level) {
  1421 + esp -= 2;
  1422 + ebp -= 2;
  1423 + stw(esp, lduw(ebp));
  1424 + }
  1425 + esp -= 2;
  1426 + stw(esp, T1);
  1427 + }
  1428 +}
  1429 +#endif
  1430 +
1386 void helper_lldt_T0(void) 1431 void helper_lldt_T0(void)
1387 { 1432 {
1388 int selector; 1433 int selector;
@@ -1963,6 +2008,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) @@ -1963,6 +2008,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
1963 #endif 2008 #endif
1964 sp_mask = get_sp_mask(env->segs[R_SS].flags); 2009 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1965 sp = ESP; 2010 sp = ESP;
  2011 + /* XXX: ssp is zero in 64 bit ? */
1966 ssp = env->segs[R_SS].base; 2012 ssp = env->segs[R_SS].base;
1967 new_eflags = 0; /* avoid warning */ 2013 new_eflags = 0; /* avoid warning */
1968 #ifdef TARGET_X86_64 2014 #ifdef TARGET_X86_64
@@ -2271,7 +2317,7 @@ void helper_movl_drN_T0(int reg) @@ -2271,7 +2317,7 @@ void helper_movl_drN_T0(int reg)
2271 env->dr[reg] = T0; 2317 env->dr[reg] = T0;
2272 } 2318 }
2273 2319
2274 -void helper_invlpg(unsigned int addr) 2320 +void helper_invlpg(target_ulong addr)
2275 { 2321 {
2276 cpu_x86_flush_tlb(env, addr); 2322 cpu_x86_flush_tlb(env, addr);
2277 } 2323 }
@@ -2332,6 +2378,9 @@ void helper_wrmsr(void) @@ -2332,6 +2378,9 @@ void helper_wrmsr(void)
2332 case MSR_STAR: 2378 case MSR_STAR:
2333 env->star = val; 2379 env->star = val;
2334 break; 2380 break;
  2381 + case MSR_PAT:
  2382 + env->pat = val;
  2383 + break;
2335 #ifdef TARGET_X86_64 2384 #ifdef TARGET_X86_64
2336 case MSR_LSTAR: 2385 case MSR_LSTAR:
2337 env->lstar = val; 2386 env->lstar = val;
@@ -2380,6 +2429,9 @@ void helper_rdmsr(void) @@ -2380,6 +2429,9 @@ void helper_rdmsr(void)
2380 case MSR_STAR: 2429 case MSR_STAR:
2381 val = env->star; 2430 val = env->star;
2382 break; 2431 break;
  2432 + case MSR_PAT:
  2433 + val = env->pat;
  2434 + break;
2383 #ifdef TARGET_X86_64 2435 #ifdef TARGET_X86_64
2384 case MSR_LSTAR: 2436 case MSR_LSTAR:
2385 val = env->lstar; 2437 val = env->lstar;
target-i386/helper2.c
@@ -106,7 +106,9 @@ CPUX86State *cpu_x86_init(void) @@ -106,7 +106,9 @@ CPUX86State *cpu_x86_init(void)
106 env->cpuid_version = (family << 8) | (model << 4) | stepping; 106 env->cpuid_version = (family << 8) | (model << 4) | stepping;
107 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE | 107 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
108 CPUID_TSC | CPUID_MSR | CPUID_MCE | 108 CPUID_TSC | CPUID_MSR | CPUID_MCE |
109 - CPUID_CX8 | CPUID_PGE | CPUID_CMOV); 109 + CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
  110 + CPUID_PAT);
  111 + env->pat = 0x0007040600070406ULL;
110 env->cpuid_ext_features = 0; 112 env->cpuid_ext_features = 0;
111 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP; 113 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
112 env->cpuid_xlevel = 0; 114 env->cpuid_xlevel = 0;
@@ -128,6 +130,9 @@ CPUX86State *cpu_x86_init(void) @@ -128,6 +130,9 @@ CPUX86State *cpu_x86_init(void)
128 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF); 130 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
129 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL; 131 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL;
130 env->cpuid_xlevel = 0x80000008; 132 env->cpuid_xlevel = 0x80000008;
  133 +
  134 + /* these features are needed for Win64 and aren't fully implemented */
  135 + env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
131 #endif 136 #endif
132 } 137 }
133 cpu_single_env = env; 138 cpu_single_env = env;
@@ -546,7 +551,7 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) @@ -546,7 +551,7 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
546 } 551 }
547 552
548 /* XXX: also flush 4MB pages */ 553 /* XXX: also flush 4MB pages */
549 -void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr) 554 +void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
550 { 555 {
551 tlb_flush_page(env, addr); 556 tlb_flush_page(env, addr);
552 } 557 }
target-i386/op.c
@@ -898,6 +898,11 @@ void op_addw_ESP_im(void) @@ -898,6 +898,11 @@ void op_addw_ESP_im(void)
898 } 898 }
899 899
900 #ifdef TARGET_X86_64 900 #ifdef TARGET_X86_64
  901 +void op_subq_A0_2(void)
  902 +{
  903 + A0 -= 2;
  904 +}
  905 +
901 void op_subq_A0_8(void) 906 void op_subq_A0_8(void)
902 { 907 {
903 A0 -= 8; 908 A0 -= 8;
@@ -929,6 +934,13 @@ void OPPROTO op_enter_level(void) @@ -929,6 +934,13 @@ void OPPROTO op_enter_level(void)
929 helper_enter_level(PARAM1, PARAM2); 934 helper_enter_level(PARAM1, PARAM2);
930 } 935 }
931 936
  937 +#ifdef TARGET_X86_64
  938 +void OPPROTO op_enter64_level(void)
  939 +{
  940 + helper_enter64_level(PARAM1, PARAM2);
  941 +}
  942 +#endif
  943 +
932 void OPPROTO op_sysenter(void) 944 void OPPROTO op_sysenter(void)
933 { 945 {
934 helper_sysenter(); 946 helper_sysenter();
target-i386/translate.c
@@ -1627,7 +1627,14 @@ static void gen_add_A0_ds_seg(DisasContext *s) @@ -1627,7 +1627,14 @@ static void gen_add_A0_ds_seg(DisasContext *s)
1627 override = R_DS; 1627 override = R_DS;
1628 } 1628 }
1629 if (must_add_seg) { 1629 if (must_add_seg) {
1630 - gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base)); 1630 +#ifdef TARGET_X86_64
  1631 + if (CODE64(s)) {
  1632 + gen_op_addq_A0_seg(offsetof(CPUX86State,segs[override].base));
  1633 + } else
  1634 +#endif
  1635 + {
  1636 + gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
  1637 + }
1631 } 1638 }
1632 } 1639 }
1633 1640
@@ -1948,10 +1955,14 @@ static void gen_push_T0(DisasContext *s) @@ -1948,10 +1955,14 @@ static void gen_push_T0(DisasContext *s)
1948 { 1955 {
1949 #ifdef TARGET_X86_64 1956 #ifdef TARGET_X86_64
1950 if (CODE64(s)) { 1957 if (CODE64(s)) {
1951 - /* XXX: check 16 bit behaviour */  
1952 gen_op_movq_A0_reg[R_ESP](); 1958 gen_op_movq_A0_reg[R_ESP]();
1953 - gen_op_subq_A0_8();  
1954 - gen_op_st_T0_A0[OT_QUAD + s->mem_index](); 1959 + if (s->dflag) {
  1960 + gen_op_subq_A0_8();
  1961 + gen_op_st_T0_A0[OT_QUAD + s->mem_index]();
  1962 + } else {
  1963 + gen_op_subq_A0_2();
  1964 + gen_op_st_T0_A0[OT_WORD + s->mem_index]();
  1965 + }
1955 gen_op_movq_ESP_A0(); 1966 gen_op_movq_ESP_A0();
1956 } else 1967 } else
1957 #endif 1968 #endif
@@ -1985,10 +1996,14 @@ static void gen_push_T1(DisasContext *s) @@ -1985,10 +1996,14 @@ static void gen_push_T1(DisasContext *s)
1985 { 1996 {
1986 #ifdef TARGET_X86_64 1997 #ifdef TARGET_X86_64
1987 if (CODE64(s)) { 1998 if (CODE64(s)) {
1988 - /* XXX: check 16 bit behaviour */  
1989 gen_op_movq_A0_reg[R_ESP](); 1999 gen_op_movq_A0_reg[R_ESP]();
1990 - gen_op_subq_A0_8();  
1991 - gen_op_st_T1_A0[OT_QUAD + s->mem_index](); 2000 + if (s->dflag) {
  2001 + gen_op_subq_A0_8();
  2002 + gen_op_st_T1_A0[OT_QUAD + s->mem_index]();
  2003 + } else {
  2004 + gen_op_subq_A0_2();
  2005 + gen_op_st_T0_A0[OT_WORD + s->mem_index]();
  2006 + }
1992 gen_op_movq_ESP_A0(); 2007 gen_op_movq_ESP_A0();
1993 } else 2008 } else
1994 #endif 2009 #endif
@@ -2020,9 +2035,8 @@ static void gen_pop_T0(DisasContext *s) @@ -2020,9 +2035,8 @@ static void gen_pop_T0(DisasContext *s)
2020 { 2035 {
2021 #ifdef TARGET_X86_64 2036 #ifdef TARGET_X86_64
2022 if (CODE64(s)) { 2037 if (CODE64(s)) {
2023 - /* XXX: check 16 bit behaviour */  
2024 gen_op_movq_A0_reg[R_ESP](); 2038 gen_op_movq_A0_reg[R_ESP]();
2025 - gen_op_ld_T0_A0[OT_QUAD + s->mem_index](); 2039 + gen_op_ld_T0_A0[(s->dflag ? OT_QUAD : OT_WORD) + s->mem_index]();
2026 } else 2040 } else
2027 #endif 2041 #endif
2028 { 2042 {
@@ -2041,7 +2055,7 @@ static void gen_pop_T0(DisasContext *s) @@ -2041,7 +2055,7 @@ static void gen_pop_T0(DisasContext *s)
2041 static void gen_pop_update(DisasContext *s) 2055 static void gen_pop_update(DisasContext *s)
2042 { 2056 {
2043 #ifdef TARGET_X86_64 2057 #ifdef TARGET_X86_64
2044 - if (CODE64(s)) { 2058 + if (CODE64(s) && s->dflag) {
2045 gen_stack_update(s, 8); 2059 gen_stack_update(s, 8);
2046 } else 2060 } else
2047 #endif 2061 #endif
@@ -2105,26 +2119,48 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) @@ -2105,26 +2119,48 @@ static void gen_enter(DisasContext *s, int esp_addend, int level)
2105 { 2119 {
2106 int ot, opsize; 2120 int ot, opsize;
2107 2121
2108 - ot = s->dflag + OT_WORD;  
2109 level &= 0x1f; 2122 level &= 0x1f;
2110 - opsize = 2 << s->dflag;  
2111 -  
2112 - gen_op_movl_A0_ESP();  
2113 - gen_op_addl_A0_im(-opsize);  
2114 - if (!s->ss32)  
2115 - gen_op_andl_A0_ffff();  
2116 - gen_op_movl_T1_A0();  
2117 - if (s->addseg)  
2118 - gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base));  
2119 - /* push bp */  
2120 - gen_op_mov_TN_reg[OT_LONG][0][R_EBP]();  
2121 - gen_op_st_T0_A0[ot + s->mem_index]();  
2122 - if (level) {  
2123 - gen_op_enter_level(level, s->dflag); 2123 +#ifdef TARGET_X86_64
  2124 + if (CODE64(s)) {
  2125 + ot = s->dflag ? OT_QUAD : OT_WORD;
  2126 + opsize = 1 << ot;
  2127 +
  2128 + gen_op_movl_A0_ESP();
  2129 + gen_op_addq_A0_im(-opsize);
  2130 + gen_op_movl_T1_A0();
  2131 +
  2132 + /* push bp */
  2133 + gen_op_mov_TN_reg[OT_LONG][0][R_EBP]();
  2134 + gen_op_st_T0_A0[ot + s->mem_index]();
  2135 + if (level) {
  2136 + gen_op_enter64_level(level, (ot == OT_QUAD));
  2137 + }
  2138 + gen_op_mov_reg_T1[ot][R_EBP]();
  2139 + gen_op_addl_T1_im( -esp_addend + (-opsize * level) );
  2140 + gen_op_mov_reg_T1[OT_QUAD][R_ESP]();
  2141 + } else
  2142 +#endif
  2143 + {
  2144 + ot = s->dflag + OT_WORD;
  2145 + opsize = 2 << s->dflag;
  2146 +
  2147 + gen_op_movl_A0_ESP();
  2148 + gen_op_addl_A0_im(-opsize);
  2149 + if (!s->ss32)
  2150 + gen_op_andl_A0_ffff();
  2151 + gen_op_movl_T1_A0();
  2152 + if (s->addseg)
  2153 + gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base));
  2154 + /* push bp */
  2155 + gen_op_mov_TN_reg[OT_LONG][0][R_EBP]();
  2156 + gen_op_st_T0_A0[ot + s->mem_index]();
  2157 + if (level) {
  2158 + gen_op_enter_level(level, s->dflag);
  2159 + }
  2160 + gen_op_mov_reg_T1[ot][R_EBP]();
  2161 + gen_op_addl_T1_im( -esp_addend + (-opsize * level) );
  2162 + gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP]();
2124 } 2163 }
2125 - gen_op_mov_reg_T1[ot][R_EBP]();  
2126 - gen_op_addl_T1_im( -esp_addend + (-opsize * level) );  
2127 - gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP]();  
2128 } 2164 }
2129 2165
2130 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) 2166 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
@@ -2901,7 +2937,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) @@ -2901,7 +2937,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
2901 if (mod != 3) 2937 if (mod != 3)
2902 goto illegal_op; 2938 goto illegal_op;
2903 #ifdef TARGET_X86_64 2939 #ifdef TARGET_X86_64
2904 - if (CODE64(s)) { 2940 + if (s->aflag == 2) {
2905 gen_op_movq_A0_reg[R_EDI](); 2941 gen_op_movq_A0_reg[R_EDI]();
2906 } else 2942 } else
2907 #endif 2943 #endif
@@ -3697,7 +3733,6 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) @@ -3697,7 +3733,6 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
3697 break; 3733 break;
3698 case 0xc8: /* enter */ 3734 case 0xc8: /* enter */
3699 { 3735 {
3700 - /* XXX: long mode support */  
3701 int level; 3736 int level;
3702 val = lduw_code(s->pc); 3737 val = lduw_code(s->pc);
3703 s->pc += 2; 3738 s->pc += 2;
@@ -3707,7 +3742,6 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) @@ -3707,7 +3742,6 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
3707 break; 3742 break;
3708 case 0xc9: /* leave */ 3743 case 0xc9: /* leave */
3709 /* XXX: exception not precise (ESP is updated before potential exception) */ 3744 /* XXX: exception not precise (ESP is updated before potential exception) */
3710 - /* XXX: may be invalid for 16 bit in long mode */  
3711 if (CODE64(s)) { 3745 if (CODE64(s)) {
3712 gen_op_mov_TN_reg[OT_QUAD][0][R_EBP](); 3746 gen_op_mov_TN_reg[OT_QUAD][0][R_EBP]();
3713 gen_op_mov_reg_T0[OT_QUAD][R_ESP](); 3747 gen_op_mov_reg_T0[OT_QUAD][R_ESP]();
@@ -3926,7 +3960,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) @@ -3926,7 +3960,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
3926 else 3960 else
3927 ot = dflag + OT_WORD; 3961 ot = dflag + OT_WORD;
3928 #ifdef TARGET_X86_64 3962 #ifdef TARGET_X86_64
3929 - if (CODE64(s)) { 3963 + if (s->aflag == 2) {
3930 offset_addr = ldq_code(s->pc); 3964 offset_addr = ldq_code(s->pc);
3931 s->pc += 8; 3965 s->pc += 8;
3932 if (offset_addr == (int32_t)offset_addr) 3966 if (offset_addr == (int32_t)offset_addr)
@@ -3955,7 +3989,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) @@ -3955,7 +3989,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
3955 break; 3989 break;
3956 case 0xd7: /* xlat */ 3990 case 0xd7: /* xlat */
3957 #ifdef TARGET_X86_64 3991 #ifdef TARGET_X86_64
3958 - if (CODE64(s)) { 3992 + if (s->aflag == 2) {
3959 gen_op_movq_A0_reg[R_EBX](); 3993 gen_op_movq_A0_reg[R_EBX]();
3960 gen_op_addq_A0_AL(); 3994 gen_op_addq_A0_AL();
3961 } else 3995 } else
@@ -4779,6 +4813,8 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) @@ -4779,6 +4813,8 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
4779 val = ldsw_code(s->pc); 4813 val = ldsw_code(s->pc);
4780 s->pc += 2; 4814 s->pc += 2;
4781 gen_pop_T0(s); 4815 gen_pop_T0(s);
  4816 + if (CODE64(s) && s->dflag)
  4817 + s->dflag = 2;
4782 gen_stack_update(s, val + (2 << s->dflag)); 4818 gen_stack_update(s, val + (2 << s->dflag));
4783 if (s->dflag == 0) 4819 if (s->dflag == 0)
4784 gen_op_andl_T0_ffff(); 4820 gen_op_andl_T0_ffff();
@@ -5782,14 +5818,30 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) @@ -5782,14 +5818,30 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
5782 break; 5818 break;
5783 case 5: /* lfence */ 5819 case 5: /* lfence */
5784 case 6: /* mfence */ 5820 case 6: /* mfence */
5785 - case 7: /* sfence */  
5786 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE)) 5821 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
5787 goto illegal_op; 5822 goto illegal_op;
5788 break; 5823 break;
  5824 + case 7: /* sfence / clflush */
  5825 + if ((modrm & 0xc7) == 0xc0) {
  5826 + /* sfence */
  5827 + if (!(s->cpuid_features & CPUID_SSE))
  5828 + goto illegal_op;
  5829 + } else {
  5830 + /* clflush */
  5831 + if (!(s->cpuid_features & CPUID_CLFLUSH))
  5832 + goto illegal_op;
  5833 + gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
  5834 + }
  5835 + break;
5789 default: 5836 default:
5790 goto illegal_op; 5837 goto illegal_op;
5791 } 5838 }
5792 break; 5839 break;
  5840 + case 0x10d: /* prefetch */
  5841 + modrm = ldub_code(s->pc++);
  5842 + gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
  5843 + /* ignore for now */
  5844 + break;
5793 case 0x110 ... 0x117: 5845 case 0x110 ... 0x117:
5794 case 0x128 ... 0x12f: 5846 case 0x128 ... 0x12f:
5795 case 0x150 ... 0x177: 5847 case 0x150 ... 0x177: