Commit 2e255c6b9f05f78a7effc4d1246c8a420680b810

Authored by bellard
1 parent 3f337316

faster and more accurate segment handling


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@373 c046a42c-6fe2-441c-8c8c-71466251a162
cpu-exec.c
@@ -182,7 +182,7 @@ int cpu_exec(CPUState *env1) @@ -182,7 +182,7 @@ int cpu_exec(CPUState *env1)
182 tmp_T0 = T0; 182 tmp_T0 = T0;
183 #endif 183 #endif
184 interrupt_request = env->interrupt_request; 184 interrupt_request = env->interrupt_request;
185 - if (interrupt_request) { 185 + if (__builtin_expect(interrupt_request, 0)) {
186 #if defined(TARGET_I386) 186 #if defined(TARGET_I386)
187 /* if hardware interrupt pending, we execute it */ 187 /* if hardware interrupt pending, we execute it */
188 if ((interrupt_request & CPU_INTERRUPT_HARD) && 188 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -238,15 +238,7 @@ int cpu_exec(CPUState *env1) @@ -238,15 +238,7 @@ int cpu_exec(CPUState *env1)
238 always be the same before a given translated block 238 always be the same before a given translated block
239 is executed. */ 239 is executed. */
240 #if defined(TARGET_I386) 240 #if defined(TARGET_I386)
241 - flags = (env->segs[R_CS].flags & DESC_B_MASK)  
242 - >> (DESC_B_SHIFT - HF_CS32_SHIFT);  
243 - flags |= (env->segs[R_SS].flags & DESC_B_MASK)  
244 - >> (DESC_B_SHIFT - HF_SS32_SHIFT);  
245 - flags |= (((unsigned long)env->segs[R_DS].base |  
246 - (unsigned long)env->segs[R_ES].base |  
247 - (unsigned long)env->segs[R_SS].base) != 0) <<  
248 - HF_ADDSEG_SHIFT;  
249 - flags |= env->hflags; 241 + flags = env->hflags;
250 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 242 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
251 cs_base = env->segs[R_CS].base; 243 cs_base = env->segs[R_CS].base;
252 pc = cs_base + env->eip; 244 pc = cs_base + env->eip;
@@ -402,13 +394,9 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) @@ -402,13 +394,9 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
402 saved_env = env; 394 saved_env = env;
403 env = s; 395 env = s;
404 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { 396 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
405 - SegmentCache *sc;  
406 selector &= 0xffff; 397 selector &= 0xffff;
407 - sc = &env->segs[seg_reg];  
408 - sc->base = (void *)(selector << 4);  
409 - sc->limit = 0xffff;  
410 - sc->flags = 0;  
411 - sc->selector = selector; 398 + cpu_x86_load_seg_cache(env, seg_reg, selector,
  399 + (uint8_t *)(selector << 4), 0xffff, 0);
412 } else { 400 } else {
413 load_seg(seg_reg, selector, 0); 401 load_seg(seg_reg, selector, 0);
414 } 402 }
cpu-i386.h
@@ -109,7 +109,7 @@ @@ -109,7 +109,7 @@
109 #define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT) 109 #define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
110 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) 110 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
111 #define HF_CS32_MASK (1 << HF_CS32_SHIFT) 111 #define HF_CS32_MASK (1 << HF_CS32_SHIFT)
112 -#define HF_SS32_MASK (1 << HF_CS32_SHIFT) 112 +#define HF_SS32_MASK (1 << HF_SS32_SHIFT)
113 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) 113 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
114 114
115 #define CR0_PE_MASK (1 << 0) 115 #define CR0_PE_MASK (1 << 0)
@@ -323,8 +323,43 @@ int cpu_x86_exec(CPUX86State *s); @@ -323,8 +323,43 @@ int cpu_x86_exec(CPUX86State *s);
323 void cpu_x86_close(CPUX86State *s); 323 void cpu_x86_close(CPUX86State *s);
324 int cpu_x86_get_pic_interrupt(CPUX86State *s); 324 int cpu_x86_get_pic_interrupt(CPUX86State *s);
325 325
326 -/* needed to load some predefinied segment registers */  
327 -void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); 326 +/* this function must always be used to load data in the segment
  327 + cache: it synchronizes the hflags with the segment cache values */
  328 +static inline void cpu_x86_load_seg_cache(CPUX86State *env,
  329 + int seg_reg, unsigned int selector,
  330 + uint8_t *base, unsigned int limit,
  331 + unsigned int flags)
  332 +{
  333 + SegmentCache *sc;
  334 + unsigned int new_hflags;
  335 +
  336 + sc = &env->segs[seg_reg];
  337 + sc->selector = selector;
  338 + sc->base = base;
  339 + sc->limit = limit;
  340 + sc->flags = flags;
  341 +
  342 + /* update the hidden flags */
  343 + new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
  344 + >> (DESC_B_SHIFT - HF_CS32_SHIFT);
  345 + new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
  346 + >> (DESC_B_SHIFT - HF_SS32_SHIFT);
  347 + if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
  348 + /* XXX: try to avoid this test. The problem comes from the
  349 + fact that is real mode or vm86 mode we only modify the
  350 + 'base' and 'selector' fields of the segment cache to go
  351 + faster. A solution may be to force addseg to one in
  352 + translate-i386.c. */
  353 + new_hflags |= HF_ADDSEG_MASK;
  354 + } else {
  355 + new_hflags |= (((unsigned long)env->segs[R_DS].base |
  356 + (unsigned long)env->segs[R_ES].base |
  357 + (unsigned long)env->segs[R_SS].base) != 0) <<
  358 + HF_ADDSEG_SHIFT;
  359 + }
  360 + env->hflags = (env->hflags &
  361 + ~(HF_CS32_MASK | HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
  362 +}
328 363
329 /* wrapper, just in case memory mappings must be changed */ 364 /* wrapper, just in case memory mappings must be changed */
330 static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) 365 static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
@@ -336,7 +371,9 @@ static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) @@ -336,7 +371,9 @@ static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
336 #endif 371 #endif
337 } 372 }
338 373
339 -/* simulate fsave/frstor */ 374 +/* the following helpers are only usable in user mode simulation as
  375 + they can trigger unexpected exceptions */
  376 +void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
340 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32); 377 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32);
341 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32); 378 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32);
342 379
helper-i386.c
@@ -182,6 +182,34 @@ static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, @@ -182,6 +182,34 @@ static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
182 return 0; 182 return 0;
183 } 183 }
184 184
  185 +static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
  186 +{
  187 + unsigned int limit;
  188 + limit = (e1 & 0xffff) | (e2 & 0x000f0000);
  189 + if (e2 & DESC_G_MASK)
  190 + limit = (limit << 12) | 0xfff;
  191 + return limit;
  192 +}
  193 +
  194 +static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
  195 +{
  196 + return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
  197 +}
  198 +
  199 +static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
  200 +{
  201 + sc->base = get_seg_base(e1, e2);
  202 + sc->limit = get_seg_limit(e1, e2);
  203 + sc->flags = e2;
  204 +}
  205 +
  206 +/* init the segment cache in vm86 mode. */
  207 +static inline void load_seg_vm(int seg, int selector)
  208 +{
  209 + selector &= 0xffff;
  210 + cpu_x86_load_seg_cache(env, seg, selector,
  211 + (uint8_t *)(selector << 4), 0xffff, 0);
  212 +}
185 213
186 /* protected mode interrupt */ 214 /* protected mode interrupt */
187 static void do_interrupt_protected(int intno, int is_int, int error_code, 215 static void do_interrupt_protected(int intno, int is_int, int error_code,
@@ -288,7 +316,11 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, @@ -288,7 +316,11 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
288 if (new_stack) { 316 if (new_stack) {
289 old_esp = ESP; 317 old_esp = ESP;
290 old_ss = env->segs[R_SS].selector; 318 old_ss = env->segs[R_SS].selector;
291 - load_seg(R_SS, ss, env->eip); 319 + ss = (ss & ~3) | dpl;
  320 + cpu_x86_load_seg_cache(env, R_SS, ss,
  321 + get_seg_base(ss_e1, ss_e2),
  322 + get_seg_limit(ss_e1, ss_e2),
  323 + ss_e2);
292 } else { 324 } else {
293 old_esp = 0; 325 old_esp = 0;
294 old_ss = 0; 326 old_ss = 0;
@@ -299,7 +331,12 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, @@ -299,7 +331,12 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
299 else 331 else
300 old_eip = env->eip; 332 old_eip = env->eip;
301 old_cs = env->segs[R_CS].selector; 333 old_cs = env->segs[R_CS].selector;
302 - load_seg(R_CS, selector, env->eip); 334 + selector = (selector & ~3) | dpl;
  335 + cpu_x86_load_seg_cache(env, R_CS, selector,
  336 + get_seg_base(e1, e2),
  337 + get_seg_limit(e1, e2),
  338 + e2);
  339 + cpu_x86_set_cpl(env, dpl);
303 env->eip = offset; 340 env->eip = offset;
304 ESP = esp - push_size; 341 ESP = esp - push_size;
305 ssp = env->segs[R_SS].base + esp; 342 ssp = env->segs[R_SS].base + esp;
@@ -593,15 +630,6 @@ void helper_cpuid(void) @@ -593,15 +630,6 @@ void helper_cpuid(void)
593 } 630 }
594 } 631 }
595 632
596 -static inline void load_seg_cache(SegmentCache *sc, uint32_t e1, uint32_t e2)  
597 -{  
598 - sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));  
599 - sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000);  
600 - if (e2 & DESC_G_MASK)  
601 - sc->limit = (sc->limit << 12) | 0xfff;  
602 - sc->flags = e2;  
603 -}  
604 -  
605 void helper_lldt_T0(void) 633 void helper_lldt_T0(void)
606 { 634 {
607 int selector; 635 int selector;
@@ -629,7 +657,7 @@ void helper_lldt_T0(void) @@ -629,7 +657,7 @@ void helper_lldt_T0(void)
629 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 657 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
630 if (!(e2 & DESC_P_MASK)) 658 if (!(e2 & DESC_P_MASK))
631 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); 659 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
632 - load_seg_cache(&env->ldt, e1, e2); 660 + load_seg_cache_raw_dt(&env->ldt, e1, e2);
633 } 661 }
634 env->ldt.selector = selector; 662 env->ldt.selector = selector;
635 } 663 }
@@ -664,30 +692,26 @@ void helper_ltr_T0(void) @@ -664,30 +692,26 @@ void helper_ltr_T0(void)
664 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 692 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
665 if (!(e2 & DESC_P_MASK)) 693 if (!(e2 & DESC_P_MASK))
666 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); 694 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
667 - load_seg_cache(&env->tr, e1, e2); 695 + load_seg_cache_raw_dt(&env->tr, e1, e2);
668 e2 |= 0x00000200; /* set the busy bit */ 696 e2 |= 0x00000200; /* set the busy bit */
669 stl(ptr + 4, e2); 697 stl(ptr + 4, e2);
670 } 698 }
671 env->tr.selector = selector; 699 env->tr.selector = selector;
672 } 700 }
673 701
674 -/* only works if protected mode and not VM86 */ 702 +/* only works if protected mode and not VM86. Calling load_seg with
  703 + seg_reg == R_CS is discouraged */
675 void load_seg(int seg_reg, int selector, unsigned int cur_eip) 704 void load_seg(int seg_reg, int selector, unsigned int cur_eip)
676 { 705 {
677 - SegmentCache *sc;  
678 uint32_t e1, e2; 706 uint32_t e1, e2;
679 707
680 - sc = &env->segs[seg_reg];  
681 if ((selector & 0xfffc) == 0) { 708 if ((selector & 0xfffc) == 0) {
682 /* null selector case */ 709 /* null selector case */
683 if (seg_reg == R_SS) { 710 if (seg_reg == R_SS) {
684 EIP = cur_eip; 711 EIP = cur_eip;
685 raise_exception_err(EXCP0D_GPF, 0); 712 raise_exception_err(EXCP0D_GPF, 0);
686 } else { 713 } else {
687 - /* XXX: each access should trigger an exception */  
688 - sc->base = NULL;  
689 - sc->limit = 0;  
690 - sc->flags = 0; 714 + cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
691 } 715 }
692 } else { 716 } else {
693 if (load_segment(&e1, &e2, selector) != 0) { 717 if (load_segment(&e1, &e2, selector) != 0) {
@@ -719,24 +743,22 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip) @@ -719,24 +743,22 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip)
719 else 743 else
720 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); 744 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
721 } 745 }
722 - load_seg_cache(sc, e1, e2); 746 + cpu_x86_load_seg_cache(env, seg_reg, selector,
  747 + get_seg_base(e1, e2),
  748 + get_seg_limit(e1, e2),
  749 + e2);
723 #if 0 750 #if 0
724 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 751 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
725 selector, (unsigned long)sc->base, sc->limit, sc->flags); 752 selector, (unsigned long)sc->base, sc->limit, sc->flags);
726 #endif 753 #endif
727 } 754 }
728 - if (seg_reg == R_CS) {  
729 - cpu_x86_set_cpl(env, selector & 3);  
730 - }  
731 - sc->selector = selector;  
732 } 755 }
733 756
734 /* protected mode jump */ 757 /* protected mode jump */
735 void helper_ljmp_protected_T0_T1(void) 758 void helper_ljmp_protected_T0_T1(void)
736 { 759 {
737 int new_cs, new_eip; 760 int new_cs, new_eip;
738 - SegmentCache sc1;  
739 - uint32_t e1, e2, cpl, dpl, rpl; 761 + uint32_t e1, e2, cpl, dpl, rpl, limit;
740 762
741 new_cs = T0; 763 new_cs = T0;
742 new_eip = T1; 764 new_eip = T1;
@@ -763,13 +785,11 @@ void helper_ljmp_protected_T0_T1(void) @@ -763,13 +785,11 @@ void helper_ljmp_protected_T0_T1(void)
763 } 785 }
764 if (!(e2 & DESC_P_MASK)) 786 if (!(e2 & DESC_P_MASK))
765 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); 787 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
766 - load_seg_cache(&sc1, e1, e2);  
767 - if (new_eip > sc1.limit) 788 + limit = get_seg_limit(e1, e2);
  789 + if (new_eip > limit)
768 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 790 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
769 - env->segs[R_CS].base = sc1.base;  
770 - env->segs[R_CS].limit = sc1.limit;  
771 - env->segs[R_CS].flags = sc1.flags;  
772 - env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl; 791 + cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
  792 + get_seg_base(e1, e2), limit, e2);
773 EIP = new_eip; 793 EIP = new_eip;
774 } else { 794 } else {
775 cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", 795 cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x",
@@ -816,10 +836,9 @@ void helper_lcall_real_T0_T1(int shift, int next_eip) @@ -816,10 +836,9 @@ void helper_lcall_real_T0_T1(int shift, int next_eip)
816 void helper_lcall_protected_T0_T1(int shift, int next_eip) 836 void helper_lcall_protected_T0_T1(int shift, int next_eip)
817 { 837 {
818 int new_cs, new_eip; 838 int new_cs, new_eip;
819 - SegmentCache sc1;  
820 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; 839 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
821 uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl; 840 uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl;
822 - uint32_t old_ss, old_esp, val, i; 841 + uint32_t old_ss, old_esp, val, i, limit;
823 uint8_t *ssp, *old_ssp; 842 uint8_t *ssp, *old_ssp;
824 843
825 new_cs = T0; 844 new_cs = T0;
@@ -865,18 +884,16 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) @@ -865,18 +884,16 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
865 } 884 }
866 sp -= (4 << shift); 885 sp -= (4 << shift);
867 886
868 - load_seg_cache(&sc1, e1, e2);  
869 - if (new_eip > sc1.limit) 887 + limit = get_seg_limit(e1, e2);
  888 + if (new_eip > limit)
870 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 889 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
871 /* from this point, not restartable */ 890 /* from this point, not restartable */
872 if (!(env->segs[R_SS].flags & DESC_B_MASK)) 891 if (!(env->segs[R_SS].flags & DESC_B_MASK))
873 ESP = (ESP & 0xffff0000) | (sp & 0xffff); 892 ESP = (ESP & 0xffff0000) | (sp & 0xffff);
874 else 893 else
875 ESP = sp; 894 ESP = sp;
876 - env->segs[R_CS].base = sc1.base;  
877 - env->segs[R_CS].limit = sc1.limit;  
878 - env->segs[R_CS].flags = sc1.flags;  
879 - env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl; 895 + cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
  896 + get_seg_base(e1, e2), limit, e2);
880 EIP = new_eip; 897 EIP = new_eip;
881 } else { 898 } else {
882 /* check gate type */ 899 /* check gate type */
@@ -947,7 +964,11 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) @@ -947,7 +964,11 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
947 old_ssp = env->segs[R_SS].base + old_esp; 964 old_ssp = env->segs[R_SS].base + old_esp;
948 965
949 /* XXX: from this point not restartable */ 966 /* XXX: from this point not restartable */
950 - load_seg(R_SS, ss, env->eip); 967 + ss = (ss & ~3) | dpl;
  968 + cpu_x86_load_seg_cache(env, R_SS, ss,
  969 + get_seg_base(ss_e1, ss_e2),
  970 + get_seg_limit(ss_e1, ss_e2),
  971 + ss_e2);
951 972
952 if (!(env->segs[R_SS].flags & DESC_B_MASK)) 973 if (!(env->segs[R_SS].flags & DESC_B_MASK))
953 sp &= 0xffff; 974 sp &= 0xffff;
@@ -994,7 +1015,13 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) @@ -994,7 +1015,13 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
994 } 1015 }
995 1016
996 sp -= push_size; 1017 sp -= push_size;
997 - load_seg(R_CS, selector, env->eip); 1018 + selector = (selector & ~3) | dpl;
  1019 + cpu_x86_load_seg_cache(env, R_CS, selector,
  1020 + get_seg_base(e1, e2),
  1021 + get_seg_limit(e1, e2),
  1022 + e2);
  1023 + cpu_x86_set_cpl(env, dpl);
  1024 +
998 /* from this point, not restartable if same priviledge */ 1025 /* from this point, not restartable if same priviledge */
999 if (!(env->segs[R_SS].flags & DESC_B_MASK)) 1026 if (!(env->segs[R_SS].flags & DESC_B_MASK))
1000 ESP = (ESP & 0xffff0000) | (sp & 0xffff); 1027 ESP = (ESP & 0xffff0000) | (sp & 0xffff);
@@ -1004,17 +1031,6 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) @@ -1004,17 +1031,6 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
1004 } 1031 }
1005 } 1032 }
1006 1033
1007 -/* init the segment cache in vm86 mode */  
1008 -static inline void load_seg_vm(int seg, int selector)  
1009 -{  
1010 - SegmentCache *sc = &env->segs[seg];  
1011 - selector &= 0xffff;  
1012 - sc->base = (uint8_t *)(selector << 4);  
1013 - sc->selector = selector;  
1014 - sc->flags = 0;  
1015 - sc->limit = 0xffff;  
1016 -}  
1017 -  
1018 /* real mode iret */ 1034 /* real mode iret */
1019 void helper_iret_real(int shift) 1035 void helper_iret_real(int shift)
1020 { 1036 {
@@ -1051,7 +1067,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) @@ -1051,7 +1067,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
1051 { 1067 {
1052 uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss; 1068 uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
1053 uint32_t new_es, new_ds, new_fs, new_gs; 1069 uint32_t new_es, new_ds, new_fs, new_gs;
1054 - uint32_t e1, e2; 1070 + uint32_t e1, e2, ss_e1, ss_e2;
1055 int cpl, dpl, rpl, eflags_mask; 1071 int cpl, dpl, rpl, eflags_mask;
1056 uint8_t *ssp; 1072 uint8_t *ssp;
1057 1073
@@ -1098,7 +1114,10 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) @@ -1098,7 +1114,10 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
1098 1114
1099 if (rpl == cpl) { 1115 if (rpl == cpl) {
1100 /* return to same priledge level */ 1116 /* return to same priledge level */
1101 - load_seg(R_CS, new_cs, env->eip); 1117 + cpu_x86_load_seg_cache(env, R_CS, new_cs,
  1118 + get_seg_base(e1, e2),
  1119 + get_seg_limit(e1, e2),
  1120 + e2);
1102 new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend; 1121 new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend;
1103 } else { 1122 } else {
1104 /* return to different priviledge level */ 1123 /* return to different priviledge level */
@@ -1115,20 +1134,27 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) @@ -1115,20 +1134,27 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
1115 1134
1116 if ((new_ss & 3) != rpl) 1135 if ((new_ss & 3) != rpl)
1117 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); 1136 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1118 - if (load_segment(&e1, &e2, new_ss) != 0) 1137 + if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1119 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); 1138 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1120 - if (!(e2 & DESC_S_MASK) ||  
1121 - (e2 & DESC_CS_MASK) ||  
1122 - !(e2 & DESC_W_MASK)) 1139 + if (!(ss_e2 & DESC_S_MASK) ||
  1140 + (ss_e2 & DESC_CS_MASK) ||
  1141 + !(ss_e2 & DESC_W_MASK))
1123 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); 1142 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1124 - dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1143 + dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1125 if (dpl != rpl) 1144 if (dpl != rpl)
1126 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); 1145 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1127 - if (!(e2 & DESC_P_MASK)) 1146 + if (!(ss_e2 & DESC_P_MASK))
1128 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc); 1147 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1129 1148
1130 - load_seg(R_CS, new_cs, env->eip);  
1131 - load_seg(R_SS, new_ss, env->eip); 1149 + cpu_x86_load_seg_cache(env, R_CS, new_cs,
  1150 + get_seg_base(e1, e2),
  1151 + get_seg_limit(e1, e2),
  1152 + e2);
  1153 + cpu_x86_load_seg_cache(env, R_SS, new_ss,
  1154 + get_seg_base(ss_e1, ss_e2),
  1155 + get_seg_limit(ss_e1, ss_e2),
  1156 + ss_e2);
  1157 + cpu_x86_set_cpl(env, rpl);
1132 } 1158 }
1133 if (env->segs[R_SS].flags & DESC_B_MASK) 1159 if (env->segs[R_SS].flags & DESC_B_MASK)
1134 ESP = new_esp; 1160 ESP = new_esp;
@@ -1137,6 +1163,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) @@ -1137,6 +1163,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
1137 (new_esp & 0xffff); 1163 (new_esp & 0xffff);
1138 env->eip = new_eip; 1164 env->eip = new_eip;
1139 if (is_iret) { 1165 if (is_iret) {
  1166 + /* NOTE: 'cpl' can be different from the current CPL */
1140 if (cpl == 0) 1167 if (cpl == 0)
1141 eflags_mask = FL_UPDATE_CPL0_MASK; 1168 eflags_mask = FL_UPDATE_CPL0_MASK;
1142 else 1169 else
linux-user/main.c
@@ -433,6 +433,8 @@ int main(int argc, char **argv) @@ -433,6 +433,8 @@ int main(int argc, char **argv)
433 env->user_mode_only = 1; 433 env->user_mode_only = 1;
434 434
435 #if defined(TARGET_I386) 435 #if defined(TARGET_I386)
  436 + cpu_x86_set_cpl(env, 3);
  437 +
436 env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK; 438 env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
437 439
438 /* linux register setup */ 440 /* linux register setup */
op-i386.c
@@ -890,6 +890,7 @@ void OPPROTO op_das(void) @@ -890,6 +890,7 @@ void OPPROTO op_das(void)
890 890
891 /* segment handling */ 891 /* segment handling */
892 892
  893 +/* never use it with R_CS */
893 void OPPROTO op_movl_seg_T0(void) 894 void OPPROTO op_movl_seg_T0(void)
894 { 895 {
895 load_seg(PARAM1, T0 & 0xffff, PARAM2); 896 load_seg(PARAM1, T0 & 0xffff, PARAM2);
translate-i386.c
@@ -1544,7 +1544,8 @@ static void gen_setcc(DisasContext *s, int b) @@ -1544,7 +1544,8 @@ static void gen_setcc(DisasContext *s, int b)
1544 } 1544 }
1545 } 1545 }
1546 1546
1547 -/* move T0 to seg_reg and compute if the CPU state may change */ 1547 +/* move T0 to seg_reg and compute if the CPU state may change. Never
  1548 + call this function with seg_reg == R_CS */
1548 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, unsigned int cur_eip) 1549 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, unsigned int cur_eip)
1549 { 1550 {
1550 if (s->pe && !s->vm86) 1551 if (s->pe && !s->vm86)
@@ -3590,12 +3590,12 @@ int main(int argc, char **argv) @@ -3590,12 +3590,12 @@ int main(int argc, char **argv)
3590 env->gdt.base = (void *)params->gdt_table; 3590 env->gdt.base = (void *)params->gdt_table;
3591 env->gdt.limit = sizeof(params->gdt_table) - 1; 3591 env->gdt.limit = sizeof(params->gdt_table) - 1;
3592 3592
3593 - cpu_x86_load_seg(env, R_CS, KERNEL_CS);  
3594 - cpu_x86_load_seg(env, R_DS, KERNEL_DS);  
3595 - cpu_x86_load_seg(env, R_ES, KERNEL_DS);  
3596 - cpu_x86_load_seg(env, R_SS, KERNEL_DS);  
3597 - cpu_x86_load_seg(env, R_FS, KERNEL_DS);  
3598 - cpu_x86_load_seg(env, R_GS, KERNEL_DS); 3593 + cpu_x86_load_seg_cache(env, R_CS, KERNEL_CS, NULL, 0xffffffff, 0x00cf9a00);
  3594 + cpu_x86_load_seg_cache(env, R_DS, KERNEL_DS, NULL, 0xffffffff, 0x00cf9200);
  3595 + cpu_x86_load_seg_cache(env, R_ES, KERNEL_DS, NULL, 0xffffffff, 0x00cf9200);
  3596 + cpu_x86_load_seg_cache(env, R_SS, KERNEL_DS, NULL, 0xffffffff, 0x00cf9200);
  3597 + cpu_x86_load_seg_cache(env, R_FS, KERNEL_DS, NULL, 0xffffffff, 0x00cf9200);
  3598 + cpu_x86_load_seg_cache(env, R_GS, KERNEL_DS, NULL, 0xffffffff, 0x00cf9200);
3599 3599
3600 env->eip = KERNEL_LOAD_ADDR; 3600 env->eip = KERNEL_LOAD_ADDR;
3601 env->regs[R_ESI] = KERNEL_PARAMS_ADDR; 3601 env->regs[R_ESI] = KERNEL_PARAMS_ADDR;
@@ -3627,12 +3627,12 @@ int main(int argc, char **argv) @@ -3627,12 +3627,12 @@ int main(int argc, char **argv)
3627 env->ldt.limit = 0xffff; 3627 env->ldt.limit = 0xffff;
3628 3628
3629 /* not correct (CS base=0xffff0000) */ 3629 /* not correct (CS base=0xffff0000) */
3630 - cpu_x86_load_seg(env, R_CS, 0xf000);  
3631 - cpu_x86_load_seg(env, R_DS, 0);  
3632 - cpu_x86_load_seg(env, R_ES, 0);  
3633 - cpu_x86_load_seg(env, R_SS, 0);  
3634 - cpu_x86_load_seg(env, R_FS, 0);  
3635 - cpu_x86_load_seg(env, R_GS, 0); 3630 + cpu_x86_load_seg_cache(env, R_CS, 0xf000, (uint8_t *)0x000f0000, 0xffff, 0);
  3631 + cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0xffff, 0);
  3632 + cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0xffff, 0);
  3633 + cpu_x86_load_seg_cache(env, R_SS, 0, NULL, 0xffff, 0);
  3634 + cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0xffff, 0);
  3635 + cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0xffff, 0);
3636 3636
3637 env->eip = 0xfff0; 3637 env->eip = 0xfff0;
3638 env->regs[R_EDX] = 0x600; /* indicate P6 processor */ 3638 env->regs[R_EDX] = 0x600; /* indicate P6 processor */