Commit 90a9fdae1f1acc791abc2c20731eddf01ba73ae6
1 parent
3fb2ded1
more ring 0 operations
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@261 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
3 changed files
with
478 additions
and
87 deletions
exec-i386.h
... | ... | @@ -124,12 +124,22 @@ extern CCTable cc_table[]; |
124 | 124 | |
125 | 125 | void load_seg(int seg_reg, int selector, unsigned cur_eip); |
126 | 126 | void jmp_seg(int selector, unsigned int new_eip); |
127 | +void helper_iret_protected(int shift); | |
127 | 128 | void helper_lldt_T0(void); |
128 | 129 | void helper_ltr_T0(void); |
129 | 130 | void helper_movl_crN_T0(int reg); |
130 | 131 | void helper_movl_drN_T0(int reg); |
132 | +void helper_invlpg(unsigned int addr); | |
133 | +void cpu_x86_update_cr0(CPUX86State *env); | |
134 | +void cpu_x86_update_cr3(CPUX86State *env); | |
135 | +void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr); | |
136 | +int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write); | |
131 | 137 | void __hidden cpu_lock(void); |
132 | 138 | void __hidden cpu_unlock(void); |
139 | +void do_interrupt(int intno, int is_int, int error_code, | |
140 | + unsigned int next_eip); | |
141 | +void do_interrupt_user(int intno, int is_int, int error_code, | |
142 | + unsigned int next_eip); | |
133 | 143 | void raise_interrupt(int intno, int is_int, int error_code, |
134 | 144 | unsigned int next_eip); |
135 | 145 | void raise_exception_err(int exception_index, int error_code); |
... | ... | @@ -329,3 +339,22 @@ void helper_frstor(uint8_t *ptr, int data32); |
329 | 339 | const uint8_t parity_table[256]; |
330 | 340 | const uint8_t rclw_table[32]; |
331 | 341 | const uint8_t rclb_table[32]; |
342 | + | |
343 | +static inline uint32_t compute_eflags(void) | |
344 | +{ | |
345 | + return env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); | |
346 | +} | |
347 | + | |
348 | +#define FL_UPDATE_MASK32 (TF_MASK | AC_MASK | ID_MASK) | |
349 | + | |
350 | +#define FL_UPDATE_CPL0_MASK (TF_MASK | IF_MASK | IOPL_MASK | NT_MASK | \ | |
351 | + RF_MASK | AC_MASK | ID_MASK) | |
352 | + | |
353 | +/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */ | |
354 | +static inline void load_eflags(int eflags, int update_mask) | |
355 | +{ | |
356 | + CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); | |
357 | + DF = 1 - (2 * ((eflags >> 10) & 1)); | |
358 | + env->eflags = (env->eflags & ~update_mask) | | |
359 | + (eflags & update_mask); | |
360 | +} | ... | ... |
helper-i386.c
... | ... | @@ -126,17 +126,74 @@ void cpu_loop_exit(void) |
126 | 126 | longjmp(env->jmp_env, 1); |
127 | 127 | } |
128 | 128 | |
129 | +static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, | |
130 | + uint32_t *esp_ptr, int dpl) | |
131 | +{ | |
132 | + int type, index, shift; | |
133 | + | |
129 | 134 | #if 0 |
130 | -/* full interrupt support (only useful for real CPU emulation, not | |
131 | - finished) - I won't do it any time soon, finish it if you want ! */ | |
132 | -void raise_interrupt(int intno, int is_int, int error_code, | |
133 | - unsigned int next_eip) | |
135 | + { | |
136 | + int i; | |
137 | + printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); | |
138 | + for(i=0;i<env->tr.limit;i++) { | |
139 | + printf("%02x ", env->tr.base[i]); | |
140 | + if ((i & 7) == 7) printf("\n"); | |
141 | + } | |
142 | + printf("\n"); | |
143 | + } | |
144 | +#endif | |
145 | + | |
146 | + if (!(env->tr.flags & DESC_P_MASK)) | |
147 | + cpu_abort(env, "invalid tss"); | |
148 | + type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; | |
149 | + if ((type & 7) != 1) | |
150 | + cpu_abort(env, "invalid tss type"); | |
151 | + shift = type >> 3; | |
152 | + index = (dpl * 4 + 2) << shift; | |
153 | + if (index + (4 << shift) - 1 > env->tr.limit) | |
154 | + raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc); | |
155 | + if (shift == 0) { | |
156 | + *esp_ptr = lduw(env->tr.base + index); | |
157 | + *ss_ptr = lduw(env->tr.base + index + 2); | |
158 | + } else { | |
159 | + *esp_ptr = ldl(env->tr.base + index); | |
160 | + *ss_ptr = lduw(env->tr.base + index + 4); | |
161 | + } | |
162 | +} | |
163 | + | |
164 | +/* return non zero if error */ | |
165 | +static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, | |
166 | + int selector) | |
134 | 167 | { |
135 | - SegmentDescriptorTable *dt; | |
168 | + SegmentCache *dt; | |
169 | + int index; | |
136 | 170 | uint8_t *ptr; |
137 | - int type, dpl, cpl; | |
138 | - uint32_t e1, e2; | |
139 | - | |
171 | + | |
172 | + if (selector & 0x4) | |
173 | + dt = &env->ldt; | |
174 | + else | |
175 | + dt = &env->gdt; | |
176 | + index = selector & ~7; | |
177 | + if ((index + 7) > dt->limit) | |
178 | + return -1; | |
179 | + ptr = dt->base + index; | |
180 | + *e1_ptr = ldl(ptr); | |
181 | + *e2_ptr = ldl(ptr + 4); | |
182 | + return 0; | |
183 | +} | |
184 | + | |
185 | + | |
186 | +/* protected mode interrupt */ | |
187 | +static void do_interrupt_protected(int intno, int is_int, int error_code, | |
188 | + unsigned int next_eip) | |
189 | +{ | |
190 | + SegmentCache *dt; | |
191 | + uint8_t *ptr, *ssp; | |
192 | + int type, dpl, cpl, selector, ss_dpl; | |
193 | + int has_error_code, new_stack, shift; | |
194 | + uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size; | |
195 | + uint32_t old_cs, old_ss, old_esp, old_eip; | |
196 | + | |
140 | 197 | dt = &env->idt; |
141 | 198 | if (intno * 8 + 7 > dt->limit) |
142 | 199 | raise_exception_err(EXCP0D_GPF, intno * 8 + 2); |
... | ... | @@ -147,6 +204,8 @@ void raise_interrupt(int intno, int is_int, int error_code, |
147 | 204 | type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; |
148 | 205 | switch(type) { |
149 | 206 | case 5: /* task gate */ |
207 | + cpu_abort(env, "task gate not supported"); | |
208 | + break; | |
150 | 209 | case 6: /* 286 interrupt gate */ |
151 | 210 | case 7: /* 286 trap gate */ |
152 | 211 | case 14: /* 386 interrupt gate */ |
... | ... | @@ -164,17 +223,184 @@ void raise_interrupt(int intno, int is_int, int error_code, |
164 | 223 | /* check valid bit */ |
165 | 224 | if (!(e2 & DESC_P_MASK)) |
166 | 225 | raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2); |
226 | + selector = e1 >> 16; | |
227 | + offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); | |
228 | + if ((selector & 0xfffc) == 0) | |
229 | + raise_exception_err(EXCP0D_GPF, 0); | |
230 | + | |
231 | + if (load_segment(&e1, &e2, selector) != 0) | |
232 | + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | |
233 | + if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) | |
234 | + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | |
235 | + dpl = (e2 >> DESC_DPL_SHIFT) & 3; | |
236 | + if (dpl > cpl) | |
237 | + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | |
238 | + if (!(e2 & DESC_P_MASK)) | |
239 | + raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); | |
240 | + if (!(e2 & DESC_C_MASK) && dpl < cpl) { | |
241 | + /* to inner priviledge */ | |
242 | + get_ss_esp_from_tss(&ss, &esp, dpl); | |
243 | + if ((ss & 0xfffc) == 0) | |
244 | + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); | |
245 | + if ((ss & 3) != dpl) | |
246 | + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); | |
247 | + if (load_segment(&ss_e1, &ss_e2, ss) != 0) | |
248 | + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); | |
249 | + ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; | |
250 | + if (ss_dpl != dpl) | |
251 | + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); | |
252 | + if (!(ss_e2 & DESC_S_MASK) || | |
253 | + (ss_e2 & DESC_CS_MASK) || | |
254 | + !(ss_e2 & DESC_W_MASK)) | |
255 | + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); | |
256 | + if (!(ss_e2 & DESC_P_MASK)) | |
257 | + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); | |
258 | + new_stack = 1; | |
259 | + } else if ((e2 & DESC_C_MASK) || dpl == cpl) { | |
260 | + /* to same priviledge */ | |
261 | + new_stack = 0; | |
262 | + } else { | |
263 | + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | |
264 | + new_stack = 0; /* avoid warning */ | |
265 | + } | |
266 | + | |
267 | + shift = type >> 3; | |
268 | + has_error_code = 0; | |
269 | + if (!is_int) { | |
270 | + switch(intno) { | |
271 | + case 8: | |
272 | + case 10: | |
273 | + case 11: | |
274 | + case 12: | |
275 | + case 13: | |
276 | + case 14: | |
277 | + case 17: | |
278 | + has_error_code = 1; | |
279 | + break; | |
280 | + } | |
281 | + } | |
282 | + push_size = 6 + (new_stack << 2) + (has_error_code << 1); | |
283 | + if (env->eflags & VM_MASK) | |
284 | + push_size += 8; | |
285 | + push_size <<= shift; | |
286 | + | |
287 | + /* XXX: check that enough room is available */ | |
288 | + if (new_stack) { | |
289 | + old_esp = env->regs[R_ESP]; | |
290 | + old_ss = env->segs[R_SS].selector; | |
291 | + load_seg(R_SS, ss, env->eip); | |
292 | + } else { | |
293 | + old_esp = 0; | |
294 | + old_ss = 0; | |
295 | + esp = env->regs[R_ESP]; | |
296 | + } | |
297 | + if (is_int) | |
298 | + old_eip = next_eip; | |
299 | + else | |
300 | + old_eip = env->eip; | |
301 | + old_cs = env->segs[R_CS].selector; | |
302 | + load_seg(R_CS, selector, env->eip); | |
303 | + env->eip = offset; | |
304 | + env->regs[R_ESP] = esp - push_size; | |
305 | + ssp = env->segs[R_SS].base + esp; | |
306 | + if (shift == 1) { | |
307 | + int old_eflags; | |
308 | + if (env->eflags & VM_MASK) { | |
309 | + ssp -= 4; | |
310 | + stl(ssp, env->segs[R_GS].selector); | |
311 | + ssp -= 4; | |
312 | + stl(ssp, env->segs[R_FS].selector); | |
313 | + ssp -= 4; | |
314 | + stl(ssp, env->segs[R_DS].selector); | |
315 | + ssp -= 4; | |
316 | + stl(ssp, env->segs[R_ES].selector); | |
317 | + } | |
318 | + if (new_stack) { | |
319 | + ssp -= 4; | |
320 | + stl(ssp, old_ss); | |
321 | + ssp -= 4; | |
322 | + stl(ssp, old_esp); | |
323 | + } | |
324 | + ssp -= 4; | |
325 | + old_eflags = compute_eflags(); | |
326 | + stl(ssp, old_eflags); | |
327 | + ssp -= 4; | |
328 | + stl(ssp, old_cs); | |
329 | + ssp -= 4; | |
330 | + stl(ssp, old_eip); | |
331 | + if (has_error_code) { | |
332 | + ssp -= 4; | |
333 | + stl(ssp, error_code); | |
334 | + } | |
335 | + } else { | |
336 | + if (new_stack) { | |
337 | + ssp -= 2; | |
338 | + stw(ssp, old_ss); | |
339 | + ssp -= 2; | |
340 | + stw(ssp, old_esp); | |
341 | + } | |
342 | + ssp -= 2; | |
343 | + stw(ssp, compute_eflags()); | |
344 | + ssp -= 2; | |
345 | + stw(ssp, old_cs); | |
346 | + ssp -= 2; | |
347 | + stw(ssp, old_eip); | |
348 | + if (has_error_code) { | |
349 | + ssp -= 2; | |
350 | + stw(ssp, error_code); | |
351 | + } | |
352 | + } | |
353 | + | |
354 | + /* interrupt gate clear IF mask */ | |
355 | + if ((type & 1) == 0) { | |
356 | + env->eflags &= ~IF_MASK; | |
357 | + } | |
358 | + env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); | |
167 | 359 | } |
168 | 360 | |
169 | -#else | |
361 | +/* real mode interrupt */ | |
362 | +static void do_interrupt_real(int intno, int is_int, int error_code, | |
363 | + unsigned int next_eip) | |
364 | +{ | |
365 | + SegmentCache *dt; | |
366 | + uint8_t *ptr, *ssp; | |
367 | + int selector; | |
368 | + uint32_t offset, esp; | |
369 | + uint32_t old_cs, old_eip; | |
170 | 370 | |
171 | -/* | |
172 | - * is_int is TRUE if coming from the int instruction. next_eip is the | |
173 | - * EIP value AFTER the interrupt instruction. It is only relevant if | |
174 | - * is_int is TRUE. | |
175 | - */ | |
176 | -void raise_interrupt(int intno, int is_int, int error_code, | |
177 | - unsigned int next_eip) | |
371 | + /* real mode (simpler !) */ | |
372 | + dt = &env->idt; | |
373 | + if (intno * 4 + 3 > dt->limit) | |
374 | + raise_exception_err(EXCP0D_GPF, intno * 8 + 2); | |
375 | + ptr = dt->base + intno * 4; | |
376 | + offset = lduw(ptr); | |
377 | + selector = lduw(ptr + 2); | |
378 | + esp = env->regs[R_ESP] & 0xffff; | |
379 | + ssp = env->segs[R_SS].base + esp; | |
380 | + if (is_int) | |
381 | + old_eip = next_eip; | |
382 | + else | |
383 | + old_eip = env->eip; | |
384 | + old_cs = env->segs[R_CS].selector; | |
385 | + ssp -= 2; | |
386 | + stw(ssp, compute_eflags()); | |
387 | + ssp -= 2; | |
388 | + stw(ssp, old_cs); | |
389 | + ssp -= 2; | |
390 | + stw(ssp, old_eip); | |
391 | + esp -= 6; | |
392 | + | |
393 | + /* update processor state */ | |
394 | + env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); | |
395 | + env->eip = offset; | |
396 | + env->segs[R_CS].selector = selector; | |
397 | + env->segs[R_CS].base = (uint8_t *)(selector << 4); | |
398 | + env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); | |
399 | +} | |
400 | + | |
401 | +/* fake user mode interrupt */ | |
402 | +void do_interrupt_user(int intno, int is_int, int error_code, | |
403 | + unsigned int next_eip) | |
178 | 404 | { |
179 | 405 | SegmentCache *dt; |
180 | 406 | uint8_t *ptr; |
... | ... | @@ -196,14 +422,39 @@ void raise_interrupt(int intno, int is_int, int error_code, |
196 | 422 | code */ |
197 | 423 | if (is_int) |
198 | 424 | EIP = next_eip; |
425 | +} | |
426 | + | |
427 | +/* | |
428 | + * Begin excution of an interruption. is_int is TRUE if coming from | |
429 | + * the int instruction. next_eip is the EIP value AFTER the interrupt | |
430 | + * instruction. It is only relevant if is_int is TRUE. | |
431 | + */ | |
432 | +void do_interrupt(int intno, int is_int, int error_code, | |
433 | + unsigned int next_eip) | |
434 | +{ | |
435 | + if (env->cr[0] & CR0_PE_MASK) { | |
436 | + do_interrupt_protected(intno, is_int, error_code, next_eip); | |
437 | + } else { | |
438 | + do_interrupt_real(intno, is_int, error_code, next_eip); | |
439 | + } | |
440 | +} | |
441 | + | |
442 | +/* | |
443 | + * Signal an interruption. It is executed in the main CPU loop. | |
444 | + * is_int is TRUE if coming from the int instruction. next_eip is the | |
445 | + * EIP value AFTER the interrupt instruction. It is only relevant if | |
446 | + * is_int is TRUE. | |
447 | + */ | |
448 | +void raise_interrupt(int intno, int is_int, int error_code, | |
449 | + unsigned int next_eip) | |
450 | +{ | |
199 | 451 | env->exception_index = intno; |
200 | 452 | env->error_code = error_code; |
201 | - | |
453 | + env->exception_is_int = is_int; | |
454 | + env->exception_next_eip = next_eip; | |
202 | 455 | cpu_loop_exit(); |
203 | 456 | } |
204 | 457 | |
205 | -#endif | |
206 | - | |
207 | 458 | /* shortcuts to generate exceptions */ |
208 | 459 | void raise_exception_err(int exception_index, int error_code) |
209 | 460 | { |
... | ... | @@ -335,9 +586,9 @@ static inline void load_seg_cache(SegmentCache *sc, uint32_t e1, uint32_t e2) |
335 | 586 | { |
336 | 587 | sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); |
337 | 588 | sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000); |
338 | - if (e2 & (1 << 23)) | |
589 | + if (e2 & DESC_G_MASK) | |
339 | 590 | sc->limit = (sc->limit << 12) | 0xfff; |
340 | - sc->seg_32bit = (e2 >> 22) & 1; | |
591 | + sc->flags = e2; | |
341 | 592 | } |
342 | 593 | |
343 | 594 | void helper_lldt_T0(void) |
... | ... | @@ -382,9 +633,10 @@ void helper_ltr_T0(void) |
382 | 633 | |
383 | 634 | selector = T0 & 0xffff; |
384 | 635 | if ((selector & 0xfffc) == 0) { |
385 | - /* XXX: NULL selector case: invalid LDT */ | |
636 | + /* NULL selector case: invalid LDT */ | |
386 | 637 | env->tr.base = NULL; |
387 | 638 | env->tr.limit = 0; |
639 | + env->tr.flags = 0; | |
388 | 640 | } else { |
389 | 641 | if (selector & 0x4) |
390 | 642 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
... | ... | @@ -412,10 +664,7 @@ void helper_ltr_T0(void) |
412 | 664 | void load_seg(int seg_reg, int selector, unsigned int cur_eip) |
413 | 665 | { |
414 | 666 | SegmentCache *sc; |
415 | - SegmentCache *dt; | |
416 | - int index; | |
417 | 667 | uint32_t e1, e2; |
418 | - uint8_t *ptr; | |
419 | 668 | |
420 | 669 | sc = &env->segs[seg_reg]; |
421 | 670 | if ((selector & 0xfffc) == 0) { |
... | ... | @@ -427,21 +676,13 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip) |
427 | 676 | /* XXX: each access should trigger an exception */ |
428 | 677 | sc->base = NULL; |
429 | 678 | sc->limit = 0; |
430 | - sc->seg_32bit = 1; | |
679 | + sc->flags = 0; | |
431 | 680 | } |
432 | 681 | } else { |
433 | - if (selector & 0x4) | |
434 | - dt = &env->ldt; | |
435 | - else | |
436 | - dt = &env->gdt; | |
437 | - index = selector & ~7; | |
438 | - if ((index + 7) > dt->limit) { | |
682 | + if (load_segment(&e1, &e2, selector) != 0) { | |
439 | 683 | EIP = cur_eip; |
440 | 684 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
441 | 685 | } |
442 | - ptr = dt->base + index; | |
443 | - e1 = ldl(ptr); | |
444 | - e2 = ldl(ptr + 4); | |
445 | 686 | if (!(e2 & DESC_S_MASK) || |
446 | 687 | (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { |
447 | 688 | EIP = cur_eip; |
... | ... | @@ -469,8 +710,8 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip) |
469 | 710 | } |
470 | 711 | load_seg_cache(sc, e1, e2); |
471 | 712 | #if 0 |
472 | - fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx seg_32bit=%d\n", | |
473 | - selector, (unsigned long)sc->base, sc->limit, sc->seg_32bit); | |
713 | + fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", | |
714 | + selector, (unsigned long)sc->base, sc->limit, sc->flags); | |
474 | 715 | #endif |
475 | 716 | } |
476 | 717 | sc->selector = selector; |
... | ... | @@ -480,25 +721,14 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip) |
480 | 721 | void jmp_seg(int selector, unsigned int new_eip) |
481 | 722 | { |
482 | 723 | SegmentCache sc1; |
483 | - SegmentCache *dt; | |
484 | - int index; | |
485 | 724 | uint32_t e1, e2, cpl, dpl, rpl; |
486 | - uint8_t *ptr; | |
487 | 725 | |
488 | 726 | if ((selector & 0xfffc) == 0) { |
489 | 727 | raise_exception_err(EXCP0D_GPF, 0); |
490 | 728 | } |
491 | 729 | |
492 | - if (selector & 0x4) | |
493 | - dt = &env->ldt; | |
494 | - else | |
495 | - dt = &env->gdt; | |
496 | - index = selector & ~7; | |
497 | - if ((index + 7) > dt->limit) | |
730 | + if (load_segment(&e1, &e2, selector) != 0) | |
498 | 731 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
499 | - ptr = dt->base + index; | |
500 | - e1 = ldl(ptr); | |
501 | - e2 = ldl(ptr + 4); | |
502 | 732 | cpl = env->segs[R_CS].selector & 3; |
503 | 733 | if (e2 & DESC_S_MASK) { |
504 | 734 | if (!(e2 & DESC_CS_MASK)) |
... | ... | @@ -530,22 +760,143 @@ void jmp_seg(int selector, unsigned int new_eip) |
530 | 760 | } |
531 | 761 | } |
532 | 762 | |
533 | -/* XXX: do more */ | |
763 | +/* init the segment cache in vm86 mode */ | |
764 | +static inline void load_seg_vm(int seg, int selector) | |
765 | +{ | |
766 | + SegmentCache *sc = &env->segs[seg]; | |
767 | + selector &= 0xffff; | |
768 | + sc->base = (uint8_t *)(selector << 4); | |
769 | + sc->selector = selector; | |
770 | + sc->flags = 0; | |
771 | + sc->limit = 0xffff; | |
772 | +} | |
773 | + | |
774 | +/* protected mode iret */ | |
775 | +void helper_iret_protected(int shift) | |
776 | +{ | |
777 | + uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss; | |
778 | + uint32_t new_es, new_ds, new_fs, new_gs; | |
779 | + uint32_t e1, e2; | |
780 | + int cpl, dpl, rpl, eflags_mask; | |
781 | + uint8_t *ssp; | |
782 | + | |
783 | + sp = env->regs[R_ESP]; | |
784 | + if (!(env->segs[R_SS].flags & DESC_B_MASK)) | |
785 | + sp &= 0xffff; | |
786 | + ssp = env->segs[R_SS].base + sp; | |
787 | + if (shift == 1) { | |
788 | + /* 32 bits */ | |
789 | + new_eflags = ldl(ssp + 8); | |
790 | + new_cs = ldl(ssp + 4) & 0xffff; | |
791 | + new_eip = ldl(ssp); | |
792 | + if (new_eflags & VM_MASK) | |
793 | + goto return_to_vm86; | |
794 | + } else { | |
795 | + /* 16 bits */ | |
796 | + new_eflags = lduw(ssp + 4); | |
797 | + new_cs = lduw(ssp + 2); | |
798 | + new_eip = lduw(ssp); | |
799 | + } | |
800 | + if ((new_cs & 0xfffc) == 0) | |
801 | + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | |
802 | + if (load_segment(&e1, &e2, new_cs) != 0) | |
803 | + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | |
804 | + if (!(e2 & DESC_S_MASK) || | |
805 | + !(e2 & DESC_CS_MASK)) | |
806 | + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | |
807 | + cpl = env->segs[R_CS].selector & 3; | |
808 | + rpl = new_cs & 3; | |
809 | + if (rpl < cpl) | |
810 | + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | |
811 | + dpl = (e2 >> DESC_DPL_SHIFT) & 3; | |
812 | + if (e2 & DESC_CS_MASK) { | |
813 | + if (dpl > rpl) | |
814 | + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | |
815 | + } else { | |
816 | + if (dpl != rpl) | |
817 | + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | |
818 | + } | |
819 | + if (!(e2 & DESC_P_MASK)) | |
820 | + raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); | |
821 | + | |
822 | + if (rpl == cpl) { | |
823 | + /* return to same priledge level */ | |
824 | + load_seg(R_CS, new_cs, env->eip); | |
825 | + new_esp = sp + (6 << shift); | |
826 | + } else { | |
827 | + /* return to differentr priviledge level */ | |
828 | + if (shift == 1) { | |
829 | + /* 32 bits */ | |
830 | + new_esp = ldl(ssp + 12); | |
831 | + new_ss = ldl(ssp + 16) & 0xffff; | |
832 | + } else { | |
833 | + /* 16 bits */ | |
834 | + new_esp = lduw(ssp + 6); | |
835 | + new_ss = lduw(ssp + 8); | |
836 | + } | |
837 | + | |
838 | + if ((new_ss & 3) != rpl) | |
839 | + raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); | |
840 | + if (load_segment(&e1, &e2, new_ss) != 0) | |
841 | + raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); | |
842 | + if (!(e2 & DESC_S_MASK) || | |
843 | + (e2 & DESC_CS_MASK) || | |
844 | + !(e2 & DESC_W_MASK)) | |
845 | + raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); | |
846 | + dpl = (e2 >> DESC_DPL_SHIFT) & 3; | |
847 | + if (dpl != rpl) | |
848 | + raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); | |
849 | + if (!(e2 & DESC_P_MASK)) | |
850 | + raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc); | |
851 | + | |
852 | + load_seg(R_CS, new_cs, env->eip); | |
853 | + load_seg(R_SS, new_ss, env->eip); | |
854 | + } | |
855 | + if (env->segs[R_SS].flags & DESC_B_MASK) | |
856 | + env->regs[R_ESP] = new_esp; | |
857 | + else | |
858 | + env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | | |
859 | + (new_esp & 0xffff); | |
860 | + env->eip = new_eip; | |
861 | + if (cpl == 0) | |
862 | + eflags_mask = FL_UPDATE_CPL0_MASK; | |
863 | + else | |
864 | + eflags_mask = FL_UPDATE_MASK32; | |
865 | + if (shift == 0) | |
866 | + eflags_mask &= 0xffff; | |
867 | + load_eflags(new_eflags, eflags_mask); | |
868 | + return; | |
869 | + | |
870 | + return_to_vm86: | |
871 | + new_esp = ldl(ssp + 12); | |
872 | + new_ss = ldl(ssp + 16); | |
873 | + new_es = ldl(ssp + 20); | |
874 | + new_ds = ldl(ssp + 24); | |
875 | + new_fs = ldl(ssp + 28); | |
876 | + new_gs = ldl(ssp + 32); | |
877 | + | |
878 | + /* modify processor state */ | |
879 | + load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK); | |
880 | + load_seg_vm(R_CS, new_cs); | |
881 | + load_seg_vm(R_SS, new_ss); | |
882 | + load_seg_vm(R_ES, new_es); | |
883 | + load_seg_vm(R_DS, new_ds); | |
884 | + load_seg_vm(R_FS, new_fs); | |
885 | + load_seg_vm(R_GS, new_gs); | |
886 | + | |
887 | + env->eip = new_eip; | |
888 | + env->regs[R_ESP] = new_esp; | |
889 | +} | |
890 | + | |
534 | 891 | void helper_movl_crN_T0(int reg) |
535 | 892 | { |
893 | + env->cr[reg] = T0; | |
536 | 894 | switch(reg) { |
537 | 895 | case 0: |
538 | - default: | |
539 | - env->cr[0] = reg; | |
540 | - break; | |
541 | - case 2: | |
542 | - env->cr[2] = reg; | |
896 | + cpu_x86_update_cr0(env); | |
543 | 897 | break; |
544 | 898 | case 3: |
545 | - env->cr[3] = reg; | |
546 | - break; | |
547 | - case 4: | |
548 | - env->cr[4] = reg; | |
899 | + cpu_x86_update_cr3(env); | |
549 | 900 | break; |
550 | 901 | } |
551 | 902 | } |
... | ... | @@ -556,6 +907,11 @@ void helper_movl_drN_T0(int reg) |
556 | 907 | env->dr[reg] = T0; |
557 | 908 | } |
558 | 909 | |
910 | +void helper_invlpg(unsigned int addr) | |
911 | +{ | |
912 | + cpu_x86_flush_tlb(env, addr); | |
913 | +} | |
914 | + | |
559 | 915 | /* rdtsc */ |
560 | 916 | #ifndef __i386__ |
561 | 917 | uint64_t emu_time; |
... | ... | @@ -577,23 +933,12 @@ void helper_rdtsc(void) |
577 | 933 | void helper_lsl(void) |
578 | 934 | { |
579 | 935 | unsigned int selector, limit; |
580 | - SegmentCache *dt; | |
581 | - int index; | |
582 | 936 | uint32_t e1, e2; |
583 | - uint8_t *ptr; | |
584 | 937 | |
585 | 938 | CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z; |
586 | 939 | selector = T0 & 0xffff; |
587 | - if (selector & 0x4) | |
588 | - dt = &env->ldt; | |
589 | - else | |
590 | - dt = &env->gdt; | |
591 | - index = selector & ~7; | |
592 | - if ((index + 7) > dt->limit) | |
940 | + if (load_segment(&e1, &e2, selector) != 0) | |
593 | 941 | return; |
594 | - ptr = dt->base + index; | |
595 | - e1 = ldl(ptr); | |
596 | - e2 = ldl(ptr + 4); | |
597 | 942 | limit = (e1 & 0xffff) | (e2 & 0x000f0000); |
598 | 943 | if (e2 & (1 << 23)) |
599 | 944 | limit = (limit << 12) | 0xfff; |
... | ... | @@ -604,22 +949,12 @@ void helper_lsl(void) |
604 | 949 | void helper_lar(void) |
605 | 950 | { |
606 | 951 | unsigned int selector; |
607 | - SegmentCache *dt; | |
608 | - int index; | |
609 | - uint32_t e2; | |
610 | - uint8_t *ptr; | |
952 | + uint32_t e1, e2; | |
611 | 953 | |
612 | 954 | CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z; |
613 | 955 | selector = T0 & 0xffff; |
614 | - if (selector & 0x4) | |
615 | - dt = &env->ldt; | |
616 | - else | |
617 | - dt = &env->gdt; | |
618 | - index = selector & ~7; | |
619 | - if ((index + 7) > dt->limit) | |
956 | + if (load_segment(&e1, &e2, selector) != 0) | |
620 | 957 | return; |
621 | - ptr = dt->base + index; | |
622 | - e2 = ldl(ptr + 4); | |
623 | 958 | T1 = e2 & 0x00f0ff00; |
624 | 959 | CC_SRC |= CC_Z; |
625 | 960 | } | ... | ... |
op-i386.c
... | ... | @@ -493,6 +493,12 @@ void OPPROTO op_jmp_im(void) |
493 | 493 | EIP = PARAM1; |
494 | 494 | } |
495 | 495 | |
496 | +void OPPROTO op_hlt(void) | |
497 | +{ | |
498 | + env->exception_index = EXCP_HLT; | |
499 | + cpu_loop_exit(); | |
500 | +} | |
501 | + | |
496 | 502 | void OPPROTO op_raise_interrupt(void) |
497 | 503 | { |
498 | 504 | int intno; |
... | ... | @@ -954,6 +960,11 @@ void OPPROTO op_ljmp_T0_T1(void) |
954 | 960 | jmp_seg(T0 & 0xffff, T1); |
955 | 961 | } |
956 | 962 | |
963 | +void OPPROTO op_iret_protected(void) | |
964 | +{ | |
965 | + helper_iret_protected(PARAM1); | |
966 | +} | |
967 | + | |
957 | 968 | void OPPROTO op_lldt_T0(void) |
958 | 969 | { |
959 | 970 | helper_lldt_T0(); |
... | ... | @@ -983,6 +994,11 @@ void OPPROTO op_lmsw_T0(void) |
983 | 994 | helper_movl_crN_T0(0); |
984 | 995 | } |
985 | 996 | |
997 | +void OPPROTO op_invlpg_A0(void) | |
998 | +{ | |
999 | + helper_invlpg(A0); | |
1000 | +} | |
1001 | + | |
986 | 1002 | void OPPROTO op_movl_T0_env(void) |
987 | 1003 | { |
988 | 1004 | T0 = *(uint32_t *)((char *)env + PARAM1); |
... | ... | @@ -1082,8 +1098,7 @@ void OPPROTO op_set_cc_op(void) |
1082 | 1098 | CC_OP = PARAM1; |
1083 | 1099 | } |
1084 | 1100 | |
1085 | -#define FL_UPDATE_MASK32 (TF_MASK | AC_MASK | ID_MASK) | |
1086 | -#define FL_UPDATE_MASK16 (TF_MASK) | |
1101 | +#define FL_UPDATE_MASK16 (FL_UPDATE_MASK32 & 0xffff) | |
1087 | 1102 | |
1088 | 1103 | void OPPROTO op_movl_eflags_T0(void) |
1089 | 1104 | { |
... | ... | @@ -1092,7 +1107,8 @@ void OPPROTO op_movl_eflags_T0(void) |
1092 | 1107 | CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
1093 | 1108 | DF = 1 - (2 * ((eflags >> 10) & 1)); |
1094 | 1109 | /* we also update some system flags as in user mode */ |
1095 | - env->eflags = (env->eflags & ~FL_UPDATE_MASK32) | (eflags & FL_UPDATE_MASK32); | |
1110 | + env->eflags = (env->eflags & ~FL_UPDATE_MASK32) | | |
1111 | + (eflags & FL_UPDATE_MASK32); | |
1096 | 1112 | } |
1097 | 1113 | |
1098 | 1114 | void OPPROTO op_movw_eflags_T0(void) |
... | ... | @@ -1102,7 +1118,18 @@ void OPPROTO op_movw_eflags_T0(void) |
1102 | 1118 | CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
1103 | 1119 | DF = 1 - (2 * ((eflags >> 10) & 1)); |
1104 | 1120 | /* we also update some system flags as in user mode */ |
1105 | - env->eflags = (env->eflags & ~FL_UPDATE_MASK16) | (eflags & FL_UPDATE_MASK16); | |
1121 | + env->eflags = (env->eflags & ~FL_UPDATE_MASK16) | | |
1122 | + (eflags & FL_UPDATE_MASK16); | |
1123 | +} | |
1124 | + | |
1125 | +void OPPROTO op_movl_eflags_T0_cpl0(void) | |
1126 | +{ | |
1127 | + load_eflags(T0, FL_UPDATE_CPL0_MASK); | |
1128 | +} | |
1129 | + | |
1130 | +void OPPROTO op_movw_eflags_T0_cpl0(void) | |
1131 | +{ | |
1132 | + load_eflags(T0, FL_UPDATE_CPL0_MASK & 0xffff); | |
1106 | 1133 | } |
1107 | 1134 | |
1108 | 1135 | #if 0 | ... | ... |