Commit 7e84c2498f0ff3999937d18d1e9abaa030400000
1 parent
e670b89e
full TSS support - IO map check support - conforming segment check fixes - iret in vm86 mode fix
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@450 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
1 changed file
with
465 additions
and
60 deletions
target-i386/helper.c
... | ... | @@ -126,6 +126,56 @@ void cpu_loop_exit(void) |
126 | 126 | longjmp(env->jmp_env, 1); |
127 | 127 | } |
128 | 128 | |
129 | +/* return non zero if error */ | |
130 | +static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, | |
131 | + int selector) | |
132 | +{ | |
133 | + SegmentCache *dt; | |
134 | + int index; | |
135 | + uint8_t *ptr; | |
136 | + | |
137 | + if (selector & 0x4) | |
138 | + dt = &env->ldt; | |
139 | + else | |
140 | + dt = &env->gdt; | |
141 | + index = selector & ~7; | |
142 | + if ((index + 7) > dt->limit) | |
143 | + return -1; | |
144 | + ptr = dt->base + index; | |
145 | + *e1_ptr = ldl_kernel(ptr); | |
146 | + *e2_ptr = ldl_kernel(ptr + 4); | |
147 | + return 0; | |
148 | +} | |
149 | + | |
150 | +static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) | |
151 | +{ | |
152 | + unsigned int limit; | |
153 | + limit = (e1 & 0xffff) | (e2 & 0x000f0000); | |
154 | + if (e2 & DESC_G_MASK) | |
155 | + limit = (limit << 12) | 0xfff; | |
156 | + return limit; | |
157 | +} | |
158 | + | |
159 | +static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2) | |
160 | +{ | |
161 | + return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); | |
162 | +} | |
163 | + | |
164 | +static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2) | |
165 | +{ | |
166 | + sc->base = get_seg_base(e1, e2); | |
167 | + sc->limit = get_seg_limit(e1, e2); | |
168 | + sc->flags = e2; | |
169 | +} | |
170 | + | |
171 | +/* init the segment cache in vm86 mode. */ | |
172 | +static inline void load_seg_vm(int seg, int selector) | |
173 | +{ | |
174 | + selector &= 0xffff; | |
175 | + cpu_x86_load_seg_cache(env, seg, selector, | |
176 | + (uint8_t *)(selector << 4), 0xffff, 0); | |
177 | +} | |
178 | + | |
129 | 179 | static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, |
130 | 180 | uint32_t *esp_ptr, int dpl) |
131 | 181 | { |
... | ... | @@ -161,54 +211,322 @@ static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, |
161 | 211 | } |
162 | 212 | } |
163 | 213 | |
164 | -/* return non zero if error */ | |
165 | -static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, | |
166 | - int selector) | |
214 | +/* XXX: merge with load_seg() */ | |
215 | +static void tss_load_seg(int seg_reg, int selector) | |
167 | 216 | { |
217 | + uint32_t e1, e2; | |
218 | + int rpl, dpl, cpl; | |
219 | + | |
220 | + if ((selector & 0xfffc) != 0) { | |
221 | + if (load_segment(&e1, &e2, selector) != 0) | |
222 | + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); | |
223 | + if (!(e2 & DESC_S_MASK)) | |
224 | + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); | |
225 | + rpl = selector & 3; | |
226 | + dpl = (e2 >> DESC_DPL_SHIFT) & 3; | |
227 | + cpl = env->hflags & HF_CPL_MASK; | |
228 | + if (seg_reg == R_CS) { | |
229 | + if (!(e2 & DESC_CS_MASK)) | |
230 | + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); | |
231 | + if (dpl != rpl) | |
232 | + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); | |
233 | + if ((e2 & DESC_C_MASK) && dpl > rpl) | |
234 | + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); | |
235 | + | |
236 | + } else if (seg_reg == R_SS) { | |
237 | + /* SS must be writable data */ | |
238 | + if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) | |
239 | + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); | |
240 | + if (dpl != cpl || dpl != rpl) | |
241 | + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); | |
242 | + } else { | |
243 | + /* not readable code */ | |
244 | + if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) | |
245 | + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); | |
246 | + /* if data or non conforming code, checks the rights */ | |
247 | + if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { | |
248 | + if (dpl < cpl || dpl < rpl) | |
249 | + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); | |
250 | + } | |
251 | + } | |
252 | + if (!(e2 & DESC_P_MASK)) | |
253 | + raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); | |
254 | + cpu_x86_load_seg_cache(env, seg_reg, selector, | |
255 | + get_seg_base(e1, e2), | |
256 | + get_seg_limit(e1, e2), | |
257 | + e2); | |
258 | + } else { | |
259 | + if (seg_reg == R_SS || seg_reg == R_CS) | |
260 | + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); | |
261 | + } | |
262 | +} | |
263 | + | |
264 | +#define SWITCH_TSS_JMP 0 | |
265 | +#define SWITCH_TSS_IRET 1 | |
266 | +#define SWITCH_TSS_CALL 2 | |
267 | + | |
268 | +/* XXX: restore CPU state in registers (PowerPC case) */ | |
269 | +static void switch_tss(int tss_selector, | |
270 | + uint32_t e1, uint32_t e2, int source) | |
271 | +{ | |
272 | + int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; | |
273 | + uint8_t *tss_base; | |
274 | + uint32_t new_regs[8], new_segs[6]; | |
275 | + uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; | |
276 | + uint32_t old_eflags, eflags_mask; | |
168 | 277 | SegmentCache *dt; |
169 | 278 | int index; |
170 | 279 | uint8_t *ptr; |
171 | 280 | |
172 | - if (selector & 0x4) | |
173 | - dt = &env->ldt; | |
281 | + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; | |
282 | + | |
283 | + /* if task gate, we read the TSS segment and we load it */ | |
284 | + if (type == 5) { | |
285 | + if (!(e2 & DESC_P_MASK)) | |
286 | + raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc); | |
287 | + tss_selector = e1 >> 16; | |
288 | + if (tss_selector & 4) | |
289 | + raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); | |
290 | + if (load_segment(&e1, &e2, tss_selector) != 0) | |
291 | + raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); | |
292 | + if (e2 & DESC_S_MASK) | |
293 | + raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); | |
294 | + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; | |
295 | + if ((type & 7) != 1) | |
296 | + raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); | |
297 | + } | |
298 | + | |
299 | + if (!(e2 & DESC_P_MASK)) | |
300 | + raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc); | |
301 | + | |
302 | + if (type & 8) | |
303 | + tss_limit_max = 103; | |
174 | 304 | else |
175 | - dt = &env->gdt; | |
176 | - index = selector & ~7; | |
305 | + tss_limit_max = 43; | |
306 | + tss_limit = get_seg_limit(e1, e2); | |
307 | + tss_base = get_seg_base(e1, e2); | |
308 | + if ((tss_selector & 4) != 0 || | |
309 | + tss_limit < tss_limit_max) | |
310 | + raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); | |
311 | + old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; | |
312 | + if (old_type & 8) | |
313 | + old_tss_limit_max = 103; | |
314 | + else | |
315 | + old_tss_limit_max = 43; | |
316 | + | |
317 | + /* read all the registers from the new TSS */ | |
318 | + if (type & 8) { | |
319 | + /* 32 bit */ | |
320 | + new_cr3 = ldl_kernel(tss_base + 0x1c); | |
321 | + new_eip = ldl_kernel(tss_base + 0x20); | |
322 | + new_eflags = ldl_kernel(tss_base + 0x24); | |
323 | + for(i = 0; i < 8; i++) | |
324 | + new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4)); | |
325 | + for(i = 0; i < 6; i++) | |
326 | + new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4)); | |
327 | + new_ldt = lduw_kernel(tss_base + 0x60); | |
328 | + new_trap = ldl_kernel(tss_base + 0x64); | |
329 | + } else { | |
330 | + /* 16 bit */ | |
331 | + new_cr3 = 0; | |
332 | + new_eip = lduw_kernel(tss_base + 0x0e); | |
333 | + new_eflags = lduw_kernel(tss_base + 0x10); | |
334 | + for(i = 0; i < 8; i++) | |
335 | + new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000; | |
336 | + for(i = 0; i < 4; i++) | |
337 | + new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4)); | |
338 | + new_ldt = lduw_kernel(tss_base + 0x2a); | |
339 | + new_segs[R_FS] = 0; | |
340 | + new_segs[R_GS] = 0; | |
341 | + new_trap = 0; | |
342 | + } | |
343 | + | |
344 | + /* NOTE: we must avoid memory exceptions during the task switch, | |
345 | + so we make dummy accesses before */ | |
346 | + /* XXX: it can still fail in some cases, so a bigger hack is | |
347 | + necessary to valid the TLB after having done the accesses */ | |
348 | + | |
349 | + v1 = ldub_kernel(env->tr.base); | |
350 | + v2 = ldub(env->tr.base + old_tss_limit_max); | |
351 | + stb_kernel(env->tr.base, v1); | |
352 | + stb_kernel(env->tr.base + old_tss_limit_max, v2); | |
353 | + | |
354 | + /* clear busy bit (it is restartable) */ | |
355 | + if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { | |
356 | + uint8_t *ptr; | |
357 | + uint32_t e2; | |
358 | + ptr = env->gdt.base + (env->tr.selector << 3); | |
359 | + e2 = ldl_kernel(ptr + 4); | |
360 | + e2 &= ~DESC_TSS_BUSY_MASK; | |
361 | + stl_kernel(ptr + 4, e2); | |
362 | + } | |
363 | + old_eflags = compute_eflags(); | |
364 | + if (source == SWITCH_TSS_IRET) | |
365 | + old_eflags &= ~NT_MASK; | |
366 | + | |
367 | + /* save the current state in the old TSS */ | |
368 | + if (type & 8) { | |
369 | + /* 32 bit */ | |
370 | + stl_kernel(env->tr.base + 0x20, env->eip); | |
371 | + stl_kernel(env->tr.base + 0x24, old_eflags); | |
372 | + for(i = 0; i < 8; i++) | |
373 | + stl_kernel(env->tr.base + (0x28 + i * 4), env->regs[i]); | |
374 | + for(i = 0; i < 6; i++) | |
375 | + stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector); | |
376 | + } else { | |
377 | + /* 16 bit */ | |
378 | + stw_kernel(env->tr.base + 0x0e, new_eip); | |
379 | + stw_kernel(env->tr.base + 0x10, old_eflags); | |
380 | + for(i = 0; i < 8; i++) | |
381 | + stw_kernel(env->tr.base + (0x12 + i * 2), env->regs[i]); | |
382 | + for(i = 0; i < 4; i++) | |
383 | + stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector); | |
384 | + } | |
385 | + | |
386 | + /* now if an exception occurs, it will occurs in the next task | |
387 | + context */ | |
388 | + | |
389 | + if (source == SWITCH_TSS_CALL) { | |
390 | + stw_kernel(tss_base, env->tr.selector); | |
391 | + new_eflags |= NT_MASK; | |
392 | + } | |
393 | + | |
394 | + /* set busy bit */ | |
395 | + if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { | |
396 | + uint8_t *ptr; | |
397 | + uint32_t e2; | |
398 | + ptr = env->gdt.base + (tss_selector << 3); | |
399 | + e2 = ldl_kernel(ptr + 4); | |
400 | + e2 |= DESC_TSS_BUSY_MASK; | |
401 | + stl_kernel(ptr + 4, e2); | |
402 | + } | |
403 | + | |
404 | + /* set the new CPU state */ | |
405 | + /* from this point, any exception which occurs can give problems */ | |
406 | + env->cr[0] |= CR0_TS_MASK; | |
407 | + env->tr.selector = tss_selector; | |
408 | + env->tr.base = tss_base; | |
409 | + env->tr.limit = tss_limit; | |
410 | + env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; | |
411 | + | |
412 | + if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { | |
413 | + env->cr[3] = new_cr3; | |
414 | + cpu_x86_update_cr3(env); | |
415 | + } | |
416 | + | |
417 | + /* load all registers without an exception, then reload them with | |
418 | + possible exception */ | |
419 | + env->eip = new_eip; | |
420 | + eflags_mask = FL_UPDATE_CPL0_MASK; | |
421 | + if (!(type & 8)) | |
422 | + eflags_mask &= 0xffff; | |
423 | + load_eflags(new_eflags, eflags_mask); | |
424 | + for(i = 0; i < 8; i++) | |
425 | + env->regs[i] = new_regs[i]; | |
426 | + if (new_eflags & VM_MASK) { | |
427 | + for(i = 0; i < 6; i++) | |
428 | + load_seg_vm(i, new_segs[i]); | |
429 | + /* in vm86, CPL is always 3 */ | |
430 | + cpu_x86_set_cpl(env, 3); | |
431 | + } else { | |
432 | + /* CPL is set the RPL of CS */ | |
433 | + cpu_x86_set_cpl(env, new_segs[R_CS] & 3); | |
434 | + /* first just selectors as the rest may trigger exceptions */ | |
435 | + for(i = 0; i < 6; i++) | |
436 | + cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0); | |
437 | + } | |
438 | + | |
439 | + env->ldt.selector = new_ldt & ~4; | |
440 | + env->ldt.base = NULL; | |
441 | + env->ldt.limit = 0; | |
442 | + env->ldt.flags = 0; | |
443 | + | |
444 | + /* load the LDT */ | |
445 | + if (new_ldt & 4) | |
446 | + raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); | |
447 | + | |
448 | + dt = &env->gdt; | |
449 | + index = new_ldt & ~7; | |
177 | 450 | if ((index + 7) > dt->limit) |
178 | - return -1; | |
451 | + raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); | |
179 | 452 | ptr = dt->base + index; |
180 | - *e1_ptr = ldl_kernel(ptr); | |
181 | - *e2_ptr = ldl_kernel(ptr + 4); | |
182 | - return 0; | |
453 | + e1 = ldl_kernel(ptr); | |
454 | + e2 = ldl_kernel(ptr + 4); | |
455 | + if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) | |
456 | + raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); | |
457 | + if (!(e2 & DESC_P_MASK)) | |
458 | + raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); | |
459 | + load_seg_cache_raw_dt(&env->ldt, e1, e2); | |
460 | + | |
461 | + /* load the segments */ | |
462 | + if (!(new_eflags & VM_MASK)) { | |
463 | + tss_load_seg(R_CS, new_segs[R_CS]); | |
464 | + tss_load_seg(R_SS, new_segs[R_SS]); | |
465 | + tss_load_seg(R_ES, new_segs[R_ES]); | |
466 | + tss_load_seg(R_DS, new_segs[R_DS]); | |
467 | + tss_load_seg(R_FS, new_segs[R_FS]); | |
468 | + tss_load_seg(R_GS, new_segs[R_GS]); | |
469 | + } | |
470 | + | |
471 | + /* check that EIP is in the CS segment limits */ | |
472 | + if (new_eip > env->segs[R_CS].limit) { | |
473 | + raise_exception_err(EXCP0D_GPF, 0); | |
474 | + } | |
183 | 475 | } |
184 | - | |
185 | -static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) | |
476 | + | |
477 | +/* check if Port I/O is allowed in TSS */ | |
478 | +static inline void check_io(int addr, int size) | |
186 | 479 | { |
187 | - unsigned int limit; | |
188 | - limit = (e1 & 0xffff) | (e2 & 0x000f0000); | |
189 | - if (e2 & DESC_G_MASK) | |
190 | - limit = (limit << 12) | 0xfff; | |
191 | - return limit; | |
480 | + int io_offset, val, mask; | |
481 | + | |
482 | + /* TSS must be a valid 32 bit one */ | |
483 | + if (!(env->tr.flags & DESC_P_MASK) || | |
484 | + ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || | |
485 | + env->tr.limit < 103) | |
486 | + goto fail; | |
487 | + io_offset = lduw_kernel(env->tr.base + 0x66); | |
488 | + io_offset += (addr >> 3); | |
489 | + /* Note: the check needs two bytes */ | |
490 | + if ((io_offset + 1) > env->tr.limit) | |
491 | + goto fail; | |
492 | + val = lduw_kernel(env->tr.base + io_offset); | |
493 | + val >>= (addr & 7); | |
494 | + mask = (1 << size) - 1; | |
495 | + /* all bits must be zero to allow the I/O */ | |
496 | + if ((val & mask) != 0) { | |
497 | + fail: | |
498 | + raise_exception_err(EXCP0D_GPF, 0); | |
499 | + } | |
192 | 500 | } |
193 | 501 | |
194 | -static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2) | |
502 | +void check_iob_T0(void) | |
195 | 503 | { |
196 | - return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); | |
504 | + check_io(T0, 1); | |
197 | 505 | } |
198 | 506 | |
199 | -static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2) | |
507 | +void check_iow_T0(void) | |
200 | 508 | { |
201 | - sc->base = get_seg_base(e1, e2); | |
202 | - sc->limit = get_seg_limit(e1, e2); | |
203 | - sc->flags = e2; | |
509 | + check_io(T0, 2); | |
204 | 510 | } |
205 | 511 | |
206 | -/* init the segment cache in vm86 mode. */ | |
207 | -static inline void load_seg_vm(int seg, int selector) | |
512 | +void check_iol_T0(void) | |
208 | 513 | { |
209 | - selector &= 0xffff; | |
210 | - cpu_x86_load_seg_cache(env, seg, selector, | |
211 | - (uint8_t *)(selector << 4), 0xffff, 0); | |
514 | + check_io(T0, 4); | |
515 | +} | |
516 | + | |
517 | +void check_iob_DX(void) | |
518 | +{ | |
519 | + check_io(EDX & 0xffff, 1); | |
520 | +} | |
521 | + | |
522 | +void check_iow_DX(void) | |
523 | +{ | |
524 | + check_io(EDX & 0xffff, 2); | |
525 | +} | |
526 | + | |
527 | +void check_iol_DX(void) | |
528 | +{ | |
529 | + check_io(EDX & 0xffff, 4); | |
212 | 530 | } |
213 | 531 | |
214 | 532 | /* protected mode interrupt */ |
... | ... | @@ -222,6 +540,21 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, |
222 | 540 | uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size; |
223 | 541 | uint32_t old_cs, old_ss, old_esp, old_eip; |
224 | 542 | |
543 | + has_error_code = 0; | |
544 | + if (!is_int && !is_hw) { | |
545 | + switch(intno) { | |
546 | + case 8: | |
547 | + case 10: | |
548 | + case 11: | |
549 | + case 12: | |
550 | + case 13: | |
551 | + case 14: | |
552 | + case 17: | |
553 | + has_error_code = 1; | |
554 | + break; | |
555 | + } | |
556 | + } | |
557 | + | |
225 | 558 | dt = &env->idt; |
226 | 559 | if (intno * 8 + 7 > dt->limit) |
227 | 560 | raise_exception_err(EXCP0D_GPF, intno * 8 + 2); |
... | ... | @@ -232,8 +565,27 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, |
232 | 565 | type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; |
233 | 566 | switch(type) { |
234 | 567 | case 5: /* task gate */ |
235 | - cpu_abort(env, "task gate not supported"); | |
236 | - break; | |
568 | + /* must do that check here to return the correct error code */ | |
569 | + if (!(e2 & DESC_P_MASK)) | |
570 | + raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2); | |
571 | + switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL); | |
572 | + if (has_error_code) { | |
573 | + int mask; | |
574 | + /* push the error code */ | |
575 | + shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1; | |
576 | + if (env->segs[R_SS].flags & DESC_B_MASK) | |
577 | + mask = 0xffffffff; | |
578 | + else | |
579 | + mask = 0xffff; | |
580 | + esp = (env->regs[R_ESP] - (2 << shift)) & mask; | |
581 | + ssp = env->segs[R_SS].base + esp; | |
582 | + if (shift) | |
583 | + stl_kernel(ssp, error_code); | |
584 | + else | |
585 | + stw_kernel(ssp, error_code); | |
586 | + env->regs[R_ESP] = (esp & mask) | (env->regs[R_ESP] & ~mask); | |
587 | + } | |
588 | + return; | |
237 | 589 | case 6: /* 286 interrupt gate */ |
238 | 590 | case 7: /* 286 trap gate */ |
239 | 591 | case 14: /* 386 interrupt gate */ |
... | ... | @@ -293,20 +645,6 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, |
293 | 645 | } |
294 | 646 | |
295 | 647 | shift = type >> 3; |
296 | - has_error_code = 0; | |
297 | - if (!is_int && !is_hw) { | |
298 | - switch(intno) { | |
299 | - case 8: | |
300 | - case 10: | |
301 | - case 11: | |
302 | - case 12: | |
303 | - case 13: | |
304 | - case 14: | |
305 | - case 17: | |
306 | - has_error_code = 1; | |
307 | - break; | |
308 | - } | |
309 | - } | |
310 | 648 | push_size = 6 + (new_stack << 2) + (has_error_code << 1); |
311 | 649 | if (env->eflags & VM_MASK) |
312 | 650 | push_size += 8; |
... | ... | @@ -688,7 +1026,7 @@ void helper_ltr_T0(void) |
688 | 1026 | e2 = ldl_kernel(ptr + 4); |
689 | 1027 | type = (e2 >> DESC_TYPE_SHIFT) & 0xf; |
690 | 1028 | if ((e2 & DESC_S_MASK) || |
691 | - (type != 2 && type != 9)) | |
1029 | + (type != 1 && type != 9)) | |
692 | 1030 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
693 | 1031 | if (!(e2 & DESC_P_MASK)) |
694 | 1032 | raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
... | ... | @@ -701,6 +1039,7 @@ void helper_ltr_T0(void) |
701 | 1039 | |
702 | 1040 | /* only works if protected mode and not VM86. Calling load_seg with |
703 | 1041 | seg_reg == R_CS is discouraged */ |
1042 | +/* XXX: add ring level checks */ | |
704 | 1043 | void load_seg(int seg_reg, int selector, unsigned int cur_eip) |
705 | 1044 | { |
706 | 1045 | uint32_t e1, e2; |
... | ... | @@ -725,7 +1064,7 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip) |
725 | 1064 | } |
726 | 1065 | |
727 | 1066 | if (seg_reg == R_SS) { |
728 | - if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) { | |
1067 | + if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { | |
729 | 1068 | EIP = cur_eip; |
730 | 1069 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
731 | 1070 | } |
... | ... | @@ -757,7 +1096,7 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip) |
757 | 1096 | /* protected mode jump */ |
758 | 1097 | void helper_ljmp_protected_T0_T1(void) |
759 | 1098 | { |
760 | - int new_cs, new_eip; | |
1099 | + int new_cs, new_eip, gate_cs, type; | |
761 | 1100 | uint32_t e1, e2, cpl, dpl, rpl, limit; |
762 | 1101 | |
763 | 1102 | new_cs = T0; |
... | ... | @@ -771,7 +1110,7 @@ void helper_ljmp_protected_T0_T1(void) |
771 | 1110 | if (!(e2 & DESC_CS_MASK)) |
772 | 1111 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
773 | 1112 | dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
774 | - if (e2 & DESC_CS_MASK) { | |
1113 | + if (e2 & DESC_C_MASK) { | |
775 | 1114 | /* conforming code segment */ |
776 | 1115 | if (dpl > cpl) |
777 | 1116 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
... | ... | @@ -792,8 +1131,52 @@ void helper_ljmp_protected_T0_T1(void) |
792 | 1131 | get_seg_base(e1, e2), limit, e2); |
793 | 1132 | EIP = new_eip; |
794 | 1133 | } else { |
795 | - cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x", | |
796 | - new_cs, new_eip); | |
1134 | + /* jump to call or task gate */ | |
1135 | + dpl = (e2 >> DESC_DPL_SHIFT) & 3; | |
1136 | + rpl = new_cs & 3; | |
1137 | + cpl = env->hflags & HF_CPL_MASK; | |
1138 | + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; | |
1139 | + switch(type) { | |
1140 | + case 1: /* 286 TSS */ | |
1141 | + case 9: /* 386 TSS */ | |
1142 | + case 5: /* task gate */ | |
1143 | + if (dpl < cpl || dpl < rpl) | |
1144 | + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | |
1145 | + switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP); | |
1146 | + break; | |
1147 | + case 4: /* 286 call gate */ | |
1148 | + case 12: /* 386 call gate */ | |
1149 | + if ((dpl < cpl) || (dpl < rpl)) | |
1150 | + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | |
1151 | + if (!(e2 & DESC_P_MASK)) | |
1152 | + raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); | |
1153 | + gate_cs = e1 >> 16; | |
1154 | + if (load_segment(&e1, &e2, gate_cs) != 0) | |
1155 | + raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); | |
1156 | + dpl = (e2 >> DESC_DPL_SHIFT) & 3; | |
1157 | + /* must be code segment */ | |
1158 | + if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != | |
1159 | + (DESC_S_MASK | DESC_CS_MASK))) | |
1160 | + raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); | |
1161 | + if (((e2 & DESC_C_MASK) && (dpl > cpl)) || | |
1162 | + (!(e2 & DESC_C_MASK) && (dpl != cpl))) | |
1163 | + raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); | |
1164 | + if (!(e2 & DESC_P_MASK)) | |
1165 | + raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); | |
1166 | + new_eip = (e1 & 0xffff); | |
1167 | + if (type == 12) | |
1168 | + new_eip |= (e2 & 0xffff0000); | |
1169 | + limit = get_seg_limit(e1, e2); | |
1170 | + if (new_eip > limit) | |
1171 | + raise_exception_err(EXCP0D_GPF, 0); | |
1172 | + cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, | |
1173 | + get_seg_base(e1, e2), limit, e2); | |
1174 | + EIP = new_eip; | |
1175 | + break; | |
1176 | + default: | |
1177 | + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | |
1178 | + break; | |
1179 | + } | |
797 | 1180 | } |
798 | 1181 | } |
799 | 1182 | |
... | ... | @@ -852,7 +1235,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) |
852 | 1235 | if (!(e2 & DESC_CS_MASK)) |
853 | 1236 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
854 | 1237 | dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
855 | - if (e2 & DESC_CS_MASK) { | |
1238 | + if (e2 & DESC_C_MASK) { | |
856 | 1239 | /* conforming code segment */ |
857 | 1240 | if (dpl > cpl) |
858 | 1241 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
... | ... | @@ -898,11 +1281,15 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) |
898 | 1281 | } else { |
899 | 1282 | /* check gate type */ |
900 | 1283 | type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; |
1284 | + dpl = (e2 >> DESC_DPL_SHIFT) & 3; | |
1285 | + rpl = new_cs & 3; | |
901 | 1286 | switch(type) { |
902 | 1287 | case 1: /* available 286 TSS */ |
903 | 1288 | case 9: /* available 386 TSS */ |
904 | 1289 | case 5: /* task gate */ |
905 | - cpu_abort(env, "task gate not supported"); | |
1290 | + if (dpl < cpl || dpl < rpl) | |
1291 | + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); | |
1292 | + switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL); | |
906 | 1293 | break; |
907 | 1294 | case 4: /* 286 call gate */ |
908 | 1295 | case 12: /* 386 call gate */ |
... | ... | @@ -913,8 +1300,6 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) |
913 | 1300 | } |
914 | 1301 | shift = type >> 3; |
915 | 1302 | |
916 | - dpl = (e2 >> DESC_DPL_SHIFT) & 3; | |
917 | - rpl = new_cs & 3; | |
918 | 1303 | if (dpl < cpl || dpl < rpl) |
919 | 1304 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
920 | 1305 | /* check valid bit */ |
... | ... | @@ -1031,13 +1416,13 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) |
1031 | 1416 | } |
1032 | 1417 | } |
1033 | 1418 | |
1034 | -/* real mode iret */ | |
1419 | +/* real and vm86 mode iret */ | |
1035 | 1420 | void helper_iret_real(int shift) |
1036 | 1421 | { |
1037 | 1422 | uint32_t sp, new_cs, new_eip, new_eflags, new_esp; |
1038 | 1423 | uint8_t *ssp; |
1039 | 1424 | int eflags_mask; |
1040 | - | |
1425 | + | |
1041 | 1426 | sp = ESP & 0xffff; |
1042 | 1427 | ssp = env->segs[R_SS].base + sp; |
1043 | 1428 | if (shift == 1) { |
... | ... | @@ -1056,7 +1441,10 @@ void helper_iret_real(int shift) |
1056 | 1441 | (new_esp & 0xffff); |
1057 | 1442 | load_seg_vm(R_CS, new_cs); |
1058 | 1443 | env->eip = new_eip; |
1059 | - eflags_mask = FL_UPDATE_CPL0_MASK; | |
1444 | + if (env->eflags & VM_MASK) | |
1445 | + eflags_mask = FL_UPDATE_MASK32 | IF_MASK | RF_MASK; | |
1446 | + else | |
1447 | + eflags_mask = FL_UPDATE_CPL0_MASK; | |
1060 | 1448 | if (shift == 0) |
1061 | 1449 | eflags_mask &= 0xffff; |
1062 | 1450 | load_eflags(new_eflags, eflags_mask); |
... | ... | @@ -1102,7 +1490,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) |
1102 | 1490 | if (rpl < cpl) |
1103 | 1491 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
1104 | 1492 | dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
1105 | - if (e2 & DESC_CS_MASK) { | |
1493 | + if (e2 & DESC_C_MASK) { | |
1106 | 1494 | if (dpl > rpl) |
1107 | 1495 | raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
1108 | 1496 | } else { |
... | ... | @@ -1198,7 +1586,24 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) |
1198 | 1586 | |
1199 | 1587 | void helper_iret_protected(int shift) |
1200 | 1588 | { |
1201 | - helper_ret_protected(shift, 1, 0); | |
1589 | + int tss_selector, type; | |
1590 | + uint32_t e1, e2; | |
1591 | + | |
1592 | + /* specific case for TSS */ | |
1593 | + if (env->eflags & NT_MASK) { | |
1594 | + tss_selector = lduw_kernel(env->tr.base + 0); | |
1595 | + if (tss_selector & 4) | |
1596 | + raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); | |
1597 | + if (load_segment(&e1, &e2, tss_selector) != 0) | |
1598 | + raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); | |
1599 | + type = (e2 >> DESC_TYPE_SHIFT) & 0x17; | |
1600 | + /* NOTE: we check both segment and busy TSS */ | |
1601 | + if (type != 3) | |
1602 | + raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); | |
1603 | + switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET); | |
1604 | + } else { | |
1605 | + helper_ret_protected(shift, 1, 0); | |
1606 | + } | |
1202 | 1607 | } |
1203 | 1608 | |
1204 | 1609 | void helper_lret_protected(int shift, int addend) | ... | ... |