Commit 46ddf5511d31101d83e38db17056f4178ac14bc9
1 parent
89e957e7
vm86 emulation closer to Linux kernel code - added correct IRQ emulation for dosemu
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@136 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
1 changed file
with
407 additions
and
0 deletions
linux-user/vm86.c
0 → 100644
1 | +/* | ||
2 | + * vm86 linux syscall support | ||
3 | + * | ||
4 | + * Copyright (c) 2003 Fabrice Bellard | ||
5 | + * | ||
6 | + * This program is free software; you can redistribute it and/or modify | ||
7 | + * it under the terms of the GNU General Public License as published by | ||
8 | + * the Free Software Foundation; either version 2 of the License, or | ||
9 | + * (at your option) any later version. | ||
10 | + * | ||
11 | + * This program is distributed in the hope that it will be useful, | ||
12 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | + * GNU General Public License for more details. | ||
15 | + * | ||
16 | + * You should have received a copy of the GNU General Public License | ||
17 | + * along with this program; if not, write to the Free Software | ||
18 | + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | + */ | ||
20 | +#include <stdlib.h> | ||
21 | +#include <stdio.h> | ||
22 | +#include <stdarg.h> | ||
23 | +#include <string.h> | ||
24 | +#include <errno.h> | ||
25 | +#include <unistd.h> | ||
26 | + | ||
27 | +#include "qemu.h" | ||
28 | + | ||
29 | +//#define DEBUG_VM86 | ||
30 | + | ||
31 | +#define set_flags(X,new,mask) \ | ||
32 | +((X) = ((X) & ~(mask)) | ((new) & (mask))) | ||
33 | + | ||
34 | +#define SAFE_MASK (0xDD5) | ||
35 | +#define RETURN_MASK (0xDFF) | ||
36 | + | ||
37 | +static inline int is_revectored(int nr, struct target_revectored_struct *bitmap) | ||
38 | +{ | ||
39 | + return (tswap32(bitmap->__map[nr >> 5]) >> (nr & 0x1f)) & 1; | ||
40 | +} | ||
41 | + | ||
42 | +static inline void vm_putw(uint8_t *segptr, unsigned int reg16, unsigned int val) | ||
43 | +{ | ||
44 | + *(uint16_t *)(segptr + (reg16 & 0xffff)) = tswap16(val); | ||
45 | +} | ||
46 | + | ||
47 | +static inline void vm_putl(uint8_t *segptr, unsigned int reg16, unsigned int val) | ||
48 | +{ | ||
49 | + *(uint32_t *)(segptr + (reg16 & 0xffff)) = tswap32(val); | ||
50 | +} | ||
51 | + | ||
52 | +static inline unsigned int vm_getw(uint8_t *segptr, unsigned int reg16) | ||
53 | +{ | ||
54 | + return tswap16(*(uint16_t *)(segptr + (reg16 & 0xffff))); | ||
55 | +} | ||
56 | + | ||
57 | +static inline unsigned int vm_getl(uint8_t *segptr, unsigned int reg16) | ||
58 | +{ | ||
59 | + return tswap32(*(uint16_t *)(segptr + (reg16 & 0xffff))); | ||
60 | +} | ||
61 | + | ||
62 | +void save_v86_state(CPUX86State *env) | ||
63 | +{ | ||
64 | + TaskState *ts = env->opaque; | ||
65 | + | ||
66 | + /* put the VM86 registers in the userspace register structure */ | ||
67 | + ts->target_v86->regs.eax = tswap32(env->regs[R_EAX]); | ||
68 | + ts->target_v86->regs.ebx = tswap32(env->regs[R_EBX]); | ||
69 | + ts->target_v86->regs.ecx = tswap32(env->regs[R_ECX]); | ||
70 | + ts->target_v86->regs.edx = tswap32(env->regs[R_EDX]); | ||
71 | + ts->target_v86->regs.esi = tswap32(env->regs[R_ESI]); | ||
72 | + ts->target_v86->regs.edi = tswap32(env->regs[R_EDI]); | ||
73 | + ts->target_v86->regs.ebp = tswap32(env->regs[R_EBP]); | ||
74 | + ts->target_v86->regs.esp = tswap32(env->regs[R_ESP]); | ||
75 | + ts->target_v86->regs.eip = tswap32(env->eip); | ||
76 | + ts->target_v86->regs.cs = tswap16(env->segs[R_CS]); | ||
77 | + ts->target_v86->regs.ss = tswap16(env->segs[R_SS]); | ||
78 | + ts->target_v86->regs.ds = tswap16(env->segs[R_DS]); | ||
79 | + ts->target_v86->regs.es = tswap16(env->segs[R_ES]); | ||
80 | + ts->target_v86->regs.fs = tswap16(env->segs[R_FS]); | ||
81 | + ts->target_v86->regs.gs = tswap16(env->segs[R_GS]); | ||
82 | + set_flags(env->eflags, ts->v86flags, VIF_MASK | ts->v86mask); | ||
83 | + ts->target_v86->regs.eflags = tswap32(env->eflags); | ||
84 | +#ifdef DEBUG_VM86 | ||
85 | + fprintf(logfile, "save_v86_state: eflags=%08x cs:ip=%04x:%04x\n", | ||
86 | + env->eflags, env->segs[R_CS], env->eip); | ||
87 | +#endif | ||
88 | + | ||
89 | + /* restore 32 bit registers */ | ||
90 | + env->regs[R_EAX] = ts->vm86_saved_regs.eax; | ||
91 | + env->regs[R_EBX] = ts->vm86_saved_regs.ebx; | ||
92 | + env->regs[R_ECX] = ts->vm86_saved_regs.ecx; | ||
93 | + env->regs[R_EDX] = ts->vm86_saved_regs.edx; | ||
94 | + env->regs[R_ESI] = ts->vm86_saved_regs.esi; | ||
95 | + env->regs[R_EDI] = ts->vm86_saved_regs.edi; | ||
96 | + env->regs[R_EBP] = ts->vm86_saved_regs.ebp; | ||
97 | + env->regs[R_ESP] = ts->vm86_saved_regs.esp; | ||
98 | + env->eflags = ts->vm86_saved_regs.eflags; | ||
99 | + env->eip = ts->vm86_saved_regs.eip; | ||
100 | + | ||
101 | + cpu_x86_load_seg(env, R_CS, ts->vm86_saved_regs.cs); | ||
102 | + cpu_x86_load_seg(env, R_SS, ts->vm86_saved_regs.ss); | ||
103 | + cpu_x86_load_seg(env, R_DS, ts->vm86_saved_regs.ds); | ||
104 | + cpu_x86_load_seg(env, R_ES, ts->vm86_saved_regs.es); | ||
105 | + cpu_x86_load_seg(env, R_FS, ts->vm86_saved_regs.fs); | ||
106 | + cpu_x86_load_seg(env, R_GS, ts->vm86_saved_regs.gs); | ||
107 | +} | ||
108 | + | ||
109 | +/* return from vm86 mode to 32 bit. The vm86() syscall will return | ||
110 | + 'retval' */ | ||
111 | +static inline void return_to_32bit(CPUX86State *env, int retval) | ||
112 | +{ | ||
113 | +#ifdef DEBUG_VM86 | ||
114 | + fprintf(logfile, "return_to_32bit: ret=0x%x\n", retval); | ||
115 | +#endif | ||
116 | + save_v86_state(env); | ||
117 | + env->regs[R_EAX] = retval; | ||
118 | +} | ||
119 | + | ||
120 | +static inline int set_IF(CPUX86State *env) | ||
121 | +{ | ||
122 | + TaskState *ts = env->opaque; | ||
123 | + | ||
124 | + ts->v86flags |= VIF_MASK; | ||
125 | + if (ts->v86flags & VIP_MASK) { | ||
126 | + return_to_32bit(env, TARGET_VM86_STI); | ||
127 | + return 1; | ||
128 | + } | ||
129 | + return 0; | ||
130 | +} | ||
131 | + | ||
132 | +static inline void clear_IF(CPUX86State *env) | ||
133 | +{ | ||
134 | + TaskState *ts = env->opaque; | ||
135 | + | ||
136 | + ts->v86flags &= ~VIF_MASK; | ||
137 | +} | ||
138 | + | ||
139 | +static inline void clear_TF(CPUX86State *env) | ||
140 | +{ | ||
141 | + env->eflags &= ~TF_MASK; | ||
142 | +} | ||
143 | + | ||
144 | +static inline int set_vflags_long(unsigned long eflags, CPUX86State *env) | ||
145 | +{ | ||
146 | + TaskState *ts = env->opaque; | ||
147 | + | ||
148 | + set_flags(ts->v86flags, eflags, ts->v86mask); | ||
149 | + set_flags(env->eflags, eflags, SAFE_MASK); | ||
150 | + if (eflags & IF_MASK) | ||
151 | + return set_IF(env); | ||
152 | + return 0; | ||
153 | +} | ||
154 | + | ||
155 | +static inline int set_vflags_short(unsigned short flags, CPUX86State *env) | ||
156 | +{ | ||
157 | + TaskState *ts = env->opaque; | ||
158 | + | ||
159 | + set_flags(ts->v86flags, flags, ts->v86mask & 0xffff); | ||
160 | + set_flags(env->eflags, flags, SAFE_MASK); | ||
161 | + if (flags & IF_MASK) | ||
162 | + return set_IF(env); | ||
163 | + return 0; | ||
164 | +} | ||
165 | + | ||
166 | +static inline unsigned int get_vflags(CPUX86State *env) | ||
167 | +{ | ||
168 | + TaskState *ts = env->opaque; | ||
169 | + unsigned int flags; | ||
170 | + | ||
171 | + flags = env->eflags & RETURN_MASK; | ||
172 | + if (ts->v86flags & VIF_MASK) | ||
173 | + flags |= IF_MASK; | ||
174 | + return flags | (ts->v86flags & ts->v86mask); | ||
175 | +} | ||
176 | + | ||
177 | +#define ADD16(reg, val) reg = (reg & ~0xffff) | ((reg + (val)) & 0xffff) | ||
178 | + | ||
179 | +/* handle VM86 interrupt (NOTE: the CPU core currently does not | ||
180 | + support TSS interrupt revectoring, so this code is always executed) */ | ||
181 | +void do_int(CPUX86State *env, int intno) | ||
182 | +{ | ||
183 | + TaskState *ts = env->opaque; | ||
184 | + uint32_t *int_ptr, segoffs; | ||
185 | + uint8_t *ssp; | ||
186 | + unsigned int sp; | ||
187 | + | ||
188 | +#if 1 | ||
189 | + if (intno == 0xe6 && (env->regs[R_EAX] & 0xffff) == 0x00c0) | ||
190 | + loglevel = 1; | ||
191 | +#endif | ||
192 | + | ||
193 | + if (env->segs[R_CS] == TARGET_BIOSSEG) | ||
194 | + goto cannot_handle; | ||
195 | + if (is_revectored(intno, &ts->target_v86->int_revectored)) | ||
196 | + goto cannot_handle; | ||
197 | + if (intno == 0x21 && is_revectored((env->regs[R_EAX] >> 8) & 0xff, | ||
198 | + &ts->target_v86->int21_revectored)) | ||
199 | + goto cannot_handle; | ||
200 | + int_ptr = (uint32_t *)(intno << 2); | ||
201 | + segoffs = tswap32(*int_ptr); | ||
202 | + if ((segoffs >> 16) == TARGET_BIOSSEG) | ||
203 | + goto cannot_handle; | ||
204 | +#if defined(DEBUG_VM86) | ||
205 | + fprintf(logfile, "VM86: emulating int 0x%x. CS:IP=%04x:%04x\n", | ||
206 | + intno, segoffs >> 16, segoffs & 0xffff); | ||
207 | +#endif | ||
208 | + /* save old state */ | ||
209 | + ssp = (uint8_t *)(env->segs[R_SS] << 4); | ||
210 | + sp = env->regs[R_ESP] & 0xffff; | ||
211 | + vm_putw(ssp, sp - 2, get_vflags(env)); | ||
212 | + vm_putw(ssp, sp - 4, env->segs[R_CS]); | ||
213 | + vm_putw(ssp, sp - 6, env->eip); | ||
214 | + ADD16(env->regs[R_ESP], -6); | ||
215 | + /* goto interrupt handler */ | ||
216 | + env->eip = segoffs & 0xffff; | ||
217 | + cpu_x86_load_seg(env, R_CS, segoffs >> 16); | ||
218 | + clear_TF(env); | ||
219 | + clear_IF(env); | ||
220 | + return; | ||
221 | + cannot_handle: | ||
222 | +#if defined(DEBUG_VM86) | ||
223 | + fprintf(logfile, "VM86: return to 32 bits int 0x%x\n", intno); | ||
224 | +#endif | ||
225 | + return_to_32bit(env, TARGET_VM86_INTx | (intno << 8)); | ||
226 | +} | ||
227 | + | ||
228 | +#define CHECK_IF_IN_TRAP(disp) \ | ||
229 | + if ((tswap32(ts->target_v86->vm86plus.flags) & TARGET_vm86dbg_active) && \ | ||
230 | + (tswap32(ts->target_v86->vm86plus.flags) & TARGET_vm86dbg_TFpendig)) \ | ||
231 | + vm_putw(ssp,sp + disp,vm_getw(ssp,sp + disp) | TF_MASK) | ||
232 | + | ||
233 | +#define VM86_FAULT_RETURN \ | ||
234 | + if ((tswap32(ts->target_v86->vm86plus.flags) & TARGET_force_return_for_pic) && \ | ||
235 | + (ts->v86flags & (IF_MASK | VIF_MASK))) \ | ||
236 | + return_to_32bit(env, TARGET_VM86_PICRETURN); \ | ||
237 | + return | ||
238 | + | ||
239 | +void handle_vm86_fault(CPUX86State *env) | ||
240 | +{ | ||
241 | + TaskState *ts = env->opaque; | ||
242 | + uint8_t *csp, *pc, *ssp; | ||
243 | + unsigned int ip, sp; | ||
244 | + | ||
245 | + csp = (uint8_t *)(env->segs[R_CS] << 4); | ||
246 | + ip = env->eip & 0xffff; | ||
247 | + pc = csp + ip; | ||
248 | + | ||
249 | + ssp = (uint8_t *)(env->segs[R_SS] << 4); | ||
250 | + sp = env->regs[R_ESP] & 0xffff; | ||
251 | + | ||
252 | +#if defined(DEBUG_VM86) | ||
253 | + fprintf(logfile, "VM86 exception %04x:%08x %02x %02x\n", | ||
254 | + env->segs[R_CS], env->eip, pc[0], pc[1]); | ||
255 | +#endif | ||
256 | + | ||
257 | + /* VM86 mode */ | ||
258 | + switch(pc[0]) { | ||
259 | + case 0x66: | ||
260 | + switch(pc[1]) { | ||
261 | + case 0x9c: /* pushfd */ | ||
262 | + ADD16(env->eip, 2); | ||
263 | + ADD16(env->regs[R_ESP], -4); | ||
264 | + vm_putl(ssp, sp - 4, get_vflags(env)); | ||
265 | + VM86_FAULT_RETURN; | ||
266 | + | ||
267 | + case 0x9d: /* popfd */ | ||
268 | + ADD16(env->eip, 2); | ||
269 | + ADD16(env->regs[R_ESP], 4); | ||
270 | + CHECK_IF_IN_TRAP(0); | ||
271 | + if (set_vflags_long(vm_getl(ssp, sp), env)) | ||
272 | + return; | ||
273 | + VM86_FAULT_RETURN; | ||
274 | + | ||
275 | + case 0xcf: /* iretd */ | ||
276 | + ADD16(env->regs[R_ESP], 12); | ||
277 | + env->eip = vm_getl(ssp, sp) & 0xffff; | ||
278 | + cpu_x86_load_seg(env, R_CS, vm_getl(ssp, sp + 4) & 0xffff); | ||
279 | + CHECK_IF_IN_TRAP(8); | ||
280 | + if (set_vflags_long(vm_getl(ssp, sp + 8), env)) | ||
281 | + return; | ||
282 | + VM86_FAULT_RETURN; | ||
283 | + | ||
284 | + default: | ||
285 | + goto vm86_gpf; | ||
286 | + } | ||
287 | + break; | ||
288 | + case 0x9c: /* pushf */ | ||
289 | + ADD16(env->eip, 1); | ||
290 | + ADD16(env->regs[R_ESP], -2); | ||
291 | + vm_putw(ssp, sp - 2, get_vflags(env)); | ||
292 | + VM86_FAULT_RETURN; | ||
293 | + | ||
294 | + case 0x9d: /* popf */ | ||
295 | + ADD16(env->eip, 1); | ||
296 | + ADD16(env->regs[R_ESP], 2); | ||
297 | + CHECK_IF_IN_TRAP(0); | ||
298 | + if (set_vflags_short(vm_getw(ssp, sp), env)) | ||
299 | + return; | ||
300 | + VM86_FAULT_RETURN; | ||
301 | + | ||
302 | + case 0xcd: /* int */ | ||
303 | + ADD16(env->eip, 2); | ||
304 | + do_int(env, pc[1]); | ||
305 | + break; | ||
306 | + | ||
307 | + case 0xcf: /* iret */ | ||
308 | + ADD16(env->regs[R_ESP], 6); | ||
309 | + env->eip = vm_getw(ssp, sp); | ||
310 | + cpu_x86_load_seg(env, R_CS, vm_getw(ssp, sp + 2)); | ||
311 | + CHECK_IF_IN_TRAP(4); | ||
312 | + if (set_vflags_short(vm_getw(ssp, sp + 4), env)) | ||
313 | + return; | ||
314 | + VM86_FAULT_RETURN; | ||
315 | + | ||
316 | + case 0xfa: /* cli */ | ||
317 | + ADD16(env->eip, 1); | ||
318 | + clear_IF(env); | ||
319 | + VM86_FAULT_RETURN; | ||
320 | + | ||
321 | + case 0xfb: /* sti */ | ||
322 | + ADD16(env->eip, 1); | ||
323 | + if (set_IF(env)) | ||
324 | + return; | ||
325 | + VM86_FAULT_RETURN; | ||
326 | + | ||
327 | + default: | ||
328 | + vm86_gpf: | ||
329 | + /* real VM86 GPF exception */ | ||
330 | + return_to_32bit(env, TARGET_VM86_UNKNOWN); | ||
331 | + break; | ||
332 | + } | ||
333 | +} | ||
334 | + | ||
335 | +int do_vm86(CPUX86State *env, long subfunction, | ||
336 | + struct target_vm86plus_struct * target_v86) | ||
337 | +{ | ||
338 | + TaskState *ts = env->opaque; | ||
339 | + int ret; | ||
340 | + | ||
341 | + switch (subfunction) { | ||
342 | + case TARGET_VM86_REQUEST_IRQ: | ||
343 | + case TARGET_VM86_FREE_IRQ: | ||
344 | + case TARGET_VM86_GET_IRQ_BITS: | ||
345 | + case TARGET_VM86_GET_AND_RESET_IRQ: | ||
346 | + gemu_log("qemu: unsupported vm86 subfunction (%ld)\n", subfunction); | ||
347 | + ret = -EINVAL; | ||
348 | + goto out; | ||
349 | + case TARGET_VM86_PLUS_INSTALL_CHECK: | ||
350 | + /* NOTE: on old vm86 stuff this will return the error | ||
351 | + from verify_area(), because the subfunction is | ||
352 | + interpreted as (invalid) address to vm86_struct. | ||
353 | + So the installation check works. | ||
354 | + */ | ||
355 | + ret = 0; | ||
356 | + goto out; | ||
357 | + } | ||
358 | + | ||
359 | + ts->target_v86 = target_v86; | ||
360 | + /* save current CPU regs */ | ||
361 | + ts->vm86_saved_regs.eax = 0; /* default vm86 syscall return code */ | ||
362 | + ts->vm86_saved_regs.ebx = env->regs[R_EBX]; | ||
363 | + ts->vm86_saved_regs.ecx = env->regs[R_ECX]; | ||
364 | + ts->vm86_saved_regs.edx = env->regs[R_EDX]; | ||
365 | + ts->vm86_saved_regs.esi = env->regs[R_ESI]; | ||
366 | + ts->vm86_saved_regs.edi = env->regs[R_EDI]; | ||
367 | + ts->vm86_saved_regs.ebp = env->regs[R_EBP]; | ||
368 | + ts->vm86_saved_regs.esp = env->regs[R_ESP]; | ||
369 | + ts->vm86_saved_regs.eflags = env->eflags; | ||
370 | + ts->vm86_saved_regs.eip = env->eip; | ||
371 | + ts->vm86_saved_regs.cs = env->segs[R_CS]; | ||
372 | + ts->vm86_saved_regs.ss = env->segs[R_SS]; | ||
373 | + ts->vm86_saved_regs.ds = env->segs[R_DS]; | ||
374 | + ts->vm86_saved_regs.es = env->segs[R_ES]; | ||
375 | + ts->vm86_saved_regs.fs = env->segs[R_FS]; | ||
376 | + ts->vm86_saved_regs.gs = env->segs[R_GS]; | ||
377 | + | ||
378 | + /* build vm86 CPU state */ | ||
379 | + ts->v86flags = tswap32(target_v86->regs.eflags); | ||
380 | + env->eflags = (env->eflags & ~SAFE_MASK) | | ||
381 | + (tswap32(target_v86->regs.eflags) & SAFE_MASK) | VM_MASK; | ||
382 | + ts->v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; | ||
383 | + | ||
384 | + env->regs[R_EBX] = tswap32(target_v86->regs.ebx); | ||
385 | + env->regs[R_ECX] = tswap32(target_v86->regs.ecx); | ||
386 | + env->regs[R_EDX] = tswap32(target_v86->regs.edx); | ||
387 | + env->regs[R_ESI] = tswap32(target_v86->regs.esi); | ||
388 | + env->regs[R_EDI] = tswap32(target_v86->regs.edi); | ||
389 | + env->regs[R_EBP] = tswap32(target_v86->regs.ebp); | ||
390 | + env->regs[R_ESP] = tswap32(target_v86->regs.esp); | ||
391 | + env->eip = tswap32(target_v86->regs.eip); | ||
392 | + cpu_x86_load_seg(env, R_CS, tswap16(target_v86->regs.cs)); | ||
393 | + cpu_x86_load_seg(env, R_SS, tswap16(target_v86->regs.ss)); | ||
394 | + cpu_x86_load_seg(env, R_DS, tswap16(target_v86->regs.ds)); | ||
395 | + cpu_x86_load_seg(env, R_ES, tswap16(target_v86->regs.es)); | ||
396 | + cpu_x86_load_seg(env, R_FS, tswap16(target_v86->regs.fs)); | ||
397 | + cpu_x86_load_seg(env, R_GS, tswap16(target_v86->regs.gs)); | ||
398 | + ret = tswap32(target_v86->regs.eax); /* eax will be restored at | ||
399 | + the end of the syscall */ | ||
400 | +#ifdef DEBUG_VM86 | ||
401 | + fprintf(logfile, "do_vm86: cs:ip=%04x:%04x\n", env->segs[R_CS], env->eip); | ||
402 | +#endif | ||
403 | + /* now the virtual CPU is ready for vm86 execution ! */ | ||
404 | + out: | ||
405 | + return ret; | ||
406 | +} | ||
407 | + |