Commit b6d78bfa0dc11a7a23cb7ccc9b00b217e6fb68fe

Authored by bellard
1 parent c33a346e

correct CPL support (should fix flat real mode support)


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@343 c046a42c-6fe2-441c-8c8c-71466251a162
cpu-exec.c
... ... @@ -244,13 +244,8 @@ int cpu_exec(CPUState *env1)
244 244 (unsigned long)env->segs[R_ES].base |
245 245 (unsigned long)env->segs[R_SS].base) != 0) <<
246 246 GEN_FLAG_ADDSEG_SHIFT;
247   - if (env->cr[0] & CR0_PE_MASK) {
248   - if (!(env->eflags & VM_MASK))
249   - flags |= (env->segs[R_CS].selector & 3) <<
250   - GEN_FLAG_CPL_SHIFT;
251   - else
252   - flags |= (1 << GEN_FLAG_VM_SHIFT);
253   - }
  247 + flags |= env->cpl << GEN_FLAG_CPL_SHIFT;
  248 + flags |= (env->eflags & VM_MASK) >> (17 - GEN_FLAG_VM_SHIFT);
254 249 flags |= (env->eflags & (IOPL_MASK | TF_MASK));
255 250 cs_base = env->segs[R_CS].base;
256 251 pc = cs_base + env->eip;
... ...
cpu-i386.h
... ... @@ -256,6 +256,7 @@ typedef struct CPUX86State {
256 256 SegmentCache tr;
257 257 SegmentCache gdt; /* only base and limit are used */
258 258 SegmentCache idt; /* only base and limit are used */
  259 + int cpl; /* current cpl */
259 260  
260 261 /* sysenter registers */
261 262 uint32_t sysenter_cs;
... ... @@ -276,6 +277,7 @@ typedef struct CPUX86State {
276 277  
277 278 uint32_t breakpoints[MAX_BREAKPOINTS];
278 279 int nb_breakpoints;
  280 + int singlestep_enabled;
279 281  
280 282 /* user data */
281 283 void *opaque;
... ... @@ -298,6 +300,12 @@ int cpu_x86_get_pic_interrupt(CPUX86State *s);
298 300 /* needed to load some predefinied segment registers */
299 301 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
300 302  
  303 +/* wrapper, just in case memory mappings must be changed */
  304 +static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
  305 +{
  306 + s->cpl = cpl;
  307 +}
  308 +
301 309 /* simulate fsave/frstor */
302 310 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32);
303 311 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32);
... ...
helper-i386.c
... ... @@ -189,7 +189,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
189 189 {
190 190 SegmentCache *dt;
191 191 uint8_t *ptr, *ssp;
192   - int type, dpl, cpl, selector, ss_dpl;
  192 + int type, dpl, selector, ss_dpl;
193 193 int has_error_code, new_stack, shift;
194 194 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
195 195 uint32_t old_cs, old_ss, old_esp, old_eip;
... ... @@ -216,12 +216,8 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
216 216 break;
217 217 }
218 218 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
219   - if (env->eflags & VM_MASK)
220   - cpl = 3;
221   - else
222   - cpl = env->segs[R_CS].selector & 3;
223 219 /* check privledge if software int */
224   - if (is_int && dpl < cpl)
  220 + if (is_int && dpl < env->cpl)
225 221 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
226 222 /* check valid bit */
227 223 if (!(e2 & DESC_P_MASK))
... ... @@ -236,11 +232,11 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
236 232 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
237 233 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
238 234 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
239   - if (dpl > cpl)
  235 + if (dpl > env->cpl)
240 236 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
241 237 if (!(e2 & DESC_P_MASK))
242 238 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
243   - if (!(e2 & DESC_C_MASK) && dpl < cpl) {
  239 + if (!(e2 & DESC_C_MASK) && dpl < env->cpl) {
244 240 /* to inner priviledge */
245 241 get_ss_esp_from_tss(&ss, &esp, dpl);
246 242 if ((ss & 0xfffc) == 0)
... ... @@ -259,7 +255,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
259 255 if (!(ss_e2 & DESC_P_MASK))
260 256 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
261 257 new_stack = 1;
262   - } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
  258 + } else if ((e2 & DESC_C_MASK) || dpl == env->cpl) {
263 259 /* to same priviledge */
264 260 new_stack = 0;
265 261 } else {
... ... @@ -406,7 +402,7 @@ void do_interrupt_user(int intno, int is_int, int error_code,
406 402 {
407 403 SegmentCache *dt;
408 404 uint8_t *ptr;
409   - int dpl, cpl;
  405 + int dpl;
410 406 uint32_t e2;
411 407  
412 408 dt = &env->idt;
... ... @@ -414,9 +410,8 @@ void do_interrupt_user(int intno, int is_int, int error_code,
414 410 e2 = ldl(ptr + 4);
415 411  
416 412 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
417   - cpl = 3;
418 413 /* check privledge if software int */
419   - if (is_int && dpl < cpl)
  414 + if (is_int && dpl < env->cpl)
420 415 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
421 416  
422 417 /* Since we emulate only user space, we cannot do more than
... ... @@ -728,6 +723,9 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip)
728 723 selector, (unsigned long)sc->base, sc->limit, sc->flags);
729 724 #endif
730 725 }
  726 + if (seg_reg == R_CS) {
  727 + cpu_x86_set_cpl(env, selector & 3);
  728 + }
731 729 sc->selector = selector;
732 730 }
733 731  
... ... @@ -744,7 +742,7 @@ void helper_ljmp_protected_T0_T1(void)
744 742 raise_exception_err(EXCP0D_GPF, 0);
745 743 if (load_segment(&e1, &e2, new_cs) != 0)
746 744 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
747   - cpl = env->segs[R_CS].selector & 3;
  745 + cpl = env->cpl;
748 746 if (e2 & DESC_S_MASK) {
749 747 if (!(e2 & DESC_CS_MASK))
750 748 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
... ... @@ -828,7 +826,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
828 826 raise_exception_err(EXCP0D_GPF, 0);
829 827 if (load_segment(&e1, &e2, new_cs) != 0)
830 828 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
831   - cpl = env->segs[R_CS].selector & 3;
  829 + cpl = env->cpl;
832 830 if (e2 & DESC_S_MASK) {
833 831 if (!(e2 & DESC_CS_MASK))
834 832 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
... ... @@ -1081,7 +1079,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
1081 1079 if (!(e2 & DESC_S_MASK) ||
1082 1080 !(e2 & DESC_CS_MASK))
1083 1081 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1084   - cpl = env->segs[R_CS].selector & 3;
  1082 + cpl = env->cpl;
1085 1083 rpl = new_cs & 3;
1086 1084 if (rpl < cpl)
1087 1085 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
... ... @@ -1158,12 +1156,13 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
1158 1156 /* modify processor state */
1159 1157 load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
1160 1158 load_seg_vm(R_CS, new_cs);
  1159 + cpu_x86_set_cpl(env, 3);
1161 1160 load_seg_vm(R_SS, new_ss);
1162 1161 load_seg_vm(R_ES, new_es);
1163 1162 load_seg_vm(R_DS, new_ds);
1164 1163 load_seg_vm(R_FS, new_fs);
1165 1164 load_seg_vm(R_GS, new_gs);
1166   -
  1165 +
1167 1166 env->eip = new_eip;
1168 1167 env->regs[R_ESP] = new_esp;
1169 1168 }
... ...
translate-i386.c
... ... @@ -4113,13 +4113,7 @@ static inline int gen_intermediate_code_internal(CPUState *env,
4113 4113 dc->addseg = (flags >> GEN_FLAG_ADDSEG_SHIFT) & 1;
4114 4114 dc->f_st = (flags >> GEN_FLAG_ST_SHIFT) & 7;
4115 4115 dc->vm86 = (flags >> GEN_FLAG_VM_SHIFT) & 1;
4116   - /* CPL is implicit if real mode or vm86 mode */
4117   - if (!dc->pe)
4118   - dc->cpl = 0;
4119   - else if (dc->vm86)
4120   - dc->cpl = 3;
4121   - else
4122   - dc->cpl = (flags >> GEN_FLAG_CPL_SHIFT) & 3;
  4116 + dc->cpl = (flags >> GEN_FLAG_CPL_SHIFT) & 3;
4123 4117 dc->iopl = (flags >> GEN_FLAG_IOPL_SHIFT) & 3;
4124 4118 dc->tf = (flags >> GEN_FLAG_TF_SHIFT) & 1;
4125 4119 dc->cc_op = CC_OP_DYNAMIC;
... ... @@ -4362,7 +4356,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
4362 4356 int cpl, error_code, is_dirty, is_user, prot, page_size;
4363 4357 void *map_addr;
4364 4358  
4365   - cpl = env->segs[R_CS].selector & 3;
  4359 + cpl = env->cpl;
4366 4360 is_user = (cpl == 3);
4367 4361  
4368 4362 #ifdef DEBUG_MMU
... ...