Commit 3fb2ded1d501287c10be6cad76bd0adc109ba2b0

Authored by bellard
1 parent f76af4b3

hardware interrupt support - support forfull ring 0 exception simulation


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@260 c046a42c-6fe2-441c-8c8c-71466251a162
Showing 1 changed file with 195 additions and 131 deletions
cpu-exec.c
@@ -29,6 +29,8 @@ @@ -29,6 +29,8 @@
29 29
30 //#define DEBUG_EXEC 30 //#define DEBUG_EXEC
31 //#define DEBUG_SIGNAL 31 //#define DEBUG_SIGNAL
  32 +/* enable it to have a fully working x86 emulator for ring 0 */
  33 +//#define RING0_HACKS
32 34
33 #if defined(TARGET_ARM) 35 #if defined(TARGET_ARM)
34 /* XXX: unify with i386 target */ 36 /* XXX: unify with i386 target */
@@ -140,146 +142,195 @@ int cpu_exec(CPUState *env1) @@ -140,146 +142,195 @@ int cpu_exec(CPUState *env1)
140 #error unsupported target CPU 142 #error unsupported target CPU
141 #endif 143 #endif
142 env->interrupt_request = 0; 144 env->interrupt_request = 0;
  145 + env->exception_index = -1;
143 146
144 /* prepare setjmp context for exception handling */ 147 /* prepare setjmp context for exception handling */
145 - if (setjmp(env->jmp_env) == 0) {  
146 - T0 = 0; /* force lookup of first TB */  
147 - for(;;) { 148 + for(;;) {
  149 + if (setjmp(env->jmp_env) == 0) {
  150 + /* if an exception is pending, we execute it here */
  151 + if (env->exception_index >= 0) {
  152 + if (env->exception_index >= EXCP_INTERRUPT) {
  153 + /* exit request from the cpu execution loop */
  154 + ret = env->exception_index;
  155 + break;
  156 + } else if (env->user_mode_only) {
  157 + /* if user mode only, we simulate a fake exception
  158 + which will be hanlded outside the cpu execution
  159 + loop */
  160 + do_interrupt_user(env->exception_index,
  161 + env->exception_is_int,
  162 + env->error_code,
  163 + env->exception_next_eip);
  164 + ret = env->exception_index;
  165 + break;
  166 + } else {
  167 + /* simulate a real cpu exception. On i386, it can
  168 + trigger new exceptions, but we do not handle
  169 + double or triple faults yet. */
  170 + do_interrupt(env->exception_index,
  171 + env->exception_is_int,
  172 + env->error_code,
  173 + env->exception_next_eip);
  174 + }
  175 + env->exception_index = -1;
  176 + }
  177 +#if defined(TARGET_I386)
  178 + /* if hardware interrupt pending, we execute it */
  179 + if (env->hard_interrupt_request &&
  180 + (env->eflags & IF_MASK)) {
  181 + int intno;
  182 + intno = cpu_x86_get_pic_interrupt(env);
  183 + if (loglevel) {
  184 + fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
  185 + }
  186 + do_interrupt(intno, 0, 0, 0);
  187 + env->hard_interrupt_request = 0;
  188 + }
  189 +#endif
  190 + T0 = 0; /* force lookup of first TB */
  191 + for(;;) {
148 #ifdef __sparc__ 192 #ifdef __sparc__
149 - /* g1 can be modified by some libc? functions */  
150 - tmp_T0 = T0; 193 + /* g1 can be modified by some libc? functions */
  194 + tmp_T0 = T0;
151 #endif 195 #endif
152 - if (env->interrupt_request) {  
153 - env->exception_index = EXCP_INTERRUPT;  
154 - cpu_loop_exit();  
155 - } 196 + if (env->interrupt_request) {
  197 + env->exception_index = EXCP_INTERRUPT;
  198 + cpu_loop_exit();
  199 + }
156 #ifdef DEBUG_EXEC 200 #ifdef DEBUG_EXEC
157 - if (loglevel) { 201 + if (loglevel) {
158 #if defined(TARGET_I386) 202 #if defined(TARGET_I386)
159 - /* restore flags in standard format */  
160 - env->regs[R_EAX] = EAX;  
161 - env->regs[R_EBX] = EBX;  
162 - env->regs[R_ECX] = ECX;  
163 - env->regs[R_EDX] = EDX;  
164 - env->regs[R_ESI] = ESI;  
165 - env->regs[R_EDI] = EDI;  
166 - env->regs[R_EBP] = EBP;  
167 - env->regs[R_ESP] = ESP;  
168 - env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);  
169 - cpu_x86_dump_state(env, logfile, 0);  
170 - env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 203 + /* restore flags in standard format */
  204 + env->regs[R_EAX] = EAX;
  205 + env->regs[R_EBX] = EBX;
  206 + env->regs[R_ECX] = ECX;
  207 + env->regs[R_EDX] = EDX;
  208 + env->regs[R_ESI] = ESI;
  209 + env->regs[R_EDI] = EDI;
  210 + env->regs[R_EBP] = EBP;
  211 + env->regs[R_ESP] = ESP;
  212 + env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
  213 + cpu_x86_dump_state(env, logfile, 0);
  214 + env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
171 #elif defined(TARGET_ARM) 215 #elif defined(TARGET_ARM)
172 - cpu_arm_dump_state(env, logfile, 0); 216 + cpu_arm_dump_state(env, logfile, 0);
173 #else 217 #else
174 #error unsupported target CPU 218 #error unsupported target CPU
175 #endif 219 #endif
176 - } 220 + }
177 #endif 221 #endif
178 - /* we compute the CPU state. We assume it will not  
179 - change during the whole generated block. */ 222 + /* we compute the CPU state. We assume it will not
  223 + change during the whole generated block. */
180 #if defined(TARGET_I386) 224 #if defined(TARGET_I386)
181 - flags = env->segs[R_CS].seg_32bit << GEN_FLAG_CODE32_SHIFT;  
182 - flags |= env->segs[R_SS].seg_32bit << GEN_FLAG_SS32_SHIFT;  
183 - flags |= (((unsigned long)env->segs[R_DS].base |  
184 - (unsigned long)env->segs[R_ES].base |  
185 - (unsigned long)env->segs[R_SS].base) != 0) <<  
186 - GEN_FLAG_ADDSEG_SHIFT;  
187 - if (!(env->eflags & VM_MASK)) {  
188 - flags |= (env->segs[R_CS].selector & 3) << GEN_FLAG_CPL_SHIFT;  
189 - } else {  
190 - /* NOTE: a dummy CPL is kept */  
191 - flags |= (1 << GEN_FLAG_VM_SHIFT);  
192 - flags |= (3 << GEN_FLAG_CPL_SHIFT);  
193 - }  
194 - flags |= (env->eflags & (IOPL_MASK | TF_MASK));  
195 - cs_base = env->segs[R_CS].base;  
196 - pc = cs_base + env->eip; 225 + flags = (env->segs[R_CS].flags & DESC_B_MASK)
  226 + >> (DESC_B_SHIFT - GEN_FLAG_CODE32_SHIFT);
  227 + flags |= (env->segs[R_SS].flags & DESC_B_MASK)
  228 + >> (DESC_B_SHIFT - GEN_FLAG_SS32_SHIFT);
  229 + flags |= (((unsigned long)env->segs[R_DS].base |
  230 + (unsigned long)env->segs[R_ES].base |
  231 + (unsigned long)env->segs[R_SS].base) != 0) <<
  232 + GEN_FLAG_ADDSEG_SHIFT;
  233 + if (!(env->eflags & VM_MASK)) {
  234 + flags |= (env->segs[R_CS].selector & 3) << GEN_FLAG_CPL_SHIFT;
  235 + } else {
  236 + /* NOTE: a dummy CPL is kept */
  237 + flags |= (1 << GEN_FLAG_VM_SHIFT);
  238 + flags |= (3 << GEN_FLAG_CPL_SHIFT);
  239 + }
  240 + flags |= (env->eflags & (IOPL_MASK | TF_MASK));
  241 + cs_base = env->segs[R_CS].base;
  242 + pc = cs_base + env->eip;
197 #elif defined(TARGET_ARM) 243 #elif defined(TARGET_ARM)
198 - flags = 0;  
199 - cs_base = 0;  
200 - pc = (uint8_t *)env->regs[15]; 244 + flags = 0;
  245 + cs_base = 0;
  246 + pc = (uint8_t *)env->regs[15];
201 #else 247 #else
202 #error unsupported CPU 248 #error unsupported CPU
203 #endif 249 #endif
204 - tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,  
205 - flags);  
206 - if (!tb) {  
207 - spin_lock(&tb_lock);  
208 - /* if no translated code available, then translate it now */  
209 - tb = tb_alloc((unsigned long)pc); 250 + tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
  251 + flags);
210 if (!tb) { 252 if (!tb) {
211 - /* flush must be done */  
212 - tb_flush();  
213 - /* cannot fail at this point */ 253 + spin_lock(&tb_lock);
  254 + /* if no translated code available, then translate it now */
214 tb = tb_alloc((unsigned long)pc); 255 tb = tb_alloc((unsigned long)pc);
215 - /* don't forget to invalidate previous TB info */  
216 - ptb = &tb_hash[tb_hash_func((unsigned long)pc)];  
217 - T0 = 0;  
218 - }  
219 - tc_ptr = code_gen_ptr;  
220 - tb->tc_ptr = tc_ptr;  
221 - tb->cs_base = (unsigned long)cs_base;  
222 - tb->flags = flags;  
223 - ret = cpu_gen_code(tb, CODE_GEN_MAX_SIZE, &code_gen_size); 256 + if (!tb) {
  257 + /* flush must be done */
  258 + tb_flush();
  259 + /* cannot fail at this point */
  260 + tb = tb_alloc((unsigned long)pc);
  261 + /* don't forget to invalidate previous TB info */
  262 + ptb = &tb_hash[tb_hash_func((unsigned long)pc)];
  263 + T0 = 0;
  264 + }
  265 + tc_ptr = code_gen_ptr;
  266 + tb->tc_ptr = tc_ptr;
  267 + tb->cs_base = (unsigned long)cs_base;
  268 + tb->flags = flags;
  269 + ret = cpu_gen_code(tb, CODE_GEN_MAX_SIZE, &code_gen_size);
224 #if defined(TARGET_I386) 270 #if defined(TARGET_I386)
225 - /* XXX: suppress that, this is incorrect */  
226 - /* if invalid instruction, signal it */  
227 - if (ret != 0) {  
228 - /* NOTE: the tb is allocated but not linked, so we  
229 - can leave it */ 271 + /* XXX: suppress that, this is incorrect */
  272 + /* if invalid instruction, signal it */
  273 + if (ret != 0) {
  274 + /* NOTE: the tb is allocated but not linked, so we
  275 + can leave it */
  276 + spin_unlock(&tb_lock);
  277 + raise_exception(EXCP06_ILLOP);
  278 + }
  279 +#endif
  280 + *ptb = tb;
  281 + tb->hash_next = NULL;
  282 + tb_link(tb);
  283 + code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
230 spin_unlock(&tb_lock); 284 spin_unlock(&tb_lock);
231 - raise_exception(EXCP06_ILLOP);  
232 } 285 }
233 -#endif  
234 - *ptb = tb;  
235 - tb->hash_next = NULL;  
236 - tb_link(tb);  
237 - code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));  
238 - spin_unlock(&tb_lock);  
239 - }  
240 #ifdef DEBUG_EXEC 286 #ifdef DEBUG_EXEC
241 - if (loglevel) {  
242 - fprintf(logfile, "Trace 0x%08lx [0x%08lx] %s\n",  
243 - (long)tb->tc_ptr, (long)tb->pc,  
244 - lookup_symbol((void *)tb->pc));  
245 - } 287 + if (loglevel) {
  288 + fprintf(logfile, "Trace 0x%08lx [0x%08lx] %s\n",
  289 + (long)tb->tc_ptr, (long)tb->pc,
  290 + lookup_symbol((void *)tb->pc));
  291 + }
246 #endif 292 #endif
247 #ifdef __sparc__ 293 #ifdef __sparc__
248 - T0 = tmp_T0; 294 + T0 = tmp_T0;
249 #endif 295 #endif
250 - /* see if we can patch the calling TB. XXX: remove TF test */  
251 - if (T0 != 0 296 + /* see if we can patch the calling TB. XXX: remove TF test */
  297 +#ifndef RING0_HACKS
  298 +
  299 + if (T0 != 0
252 #if defined(TARGET_I386) 300 #if defined(TARGET_I386)
253 - && !(env->eflags & TF_MASK) 301 + && !(env->eflags & TF_MASK)
254 #endif 302 #endif
255 - ) {  
256 - spin_lock(&tb_lock);  
257 - tb_add_jump((TranslationBlock *)(T0 & ~3), T0 & 3, tb);  
258 - spin_unlock(&tb_lock);  
259 - }  
260 - tc_ptr = tb->tc_ptr;  
261 -  
262 - /* execute the generated code */  
263 - gen_func = (void *)tc_ptr; 303 + ) {
  304 + spin_lock(&tb_lock);
  305 + tb_add_jump((TranslationBlock *)(T0 & ~3), T0 & 3, tb);
  306 + spin_unlock(&tb_lock);
  307 + }
  308 +#endif
  309 + tc_ptr = tb->tc_ptr;
  310 +
  311 + /* execute the generated code */
  312 + gen_func = (void *)tc_ptr;
264 #if defined(__sparc__) 313 #if defined(__sparc__)
265 - __asm__ __volatile__("call %0\n\t"  
266 - "mov %%o7,%%i0"  
267 - : /* no outputs */  
268 - : "r" (gen_func)  
269 - : "i0", "i1", "i2", "i3", "i4", "i5"); 314 + __asm__ __volatile__("call %0\n\t"
  315 + "mov %%o7,%%i0"
  316 + : /* no outputs */
  317 + : "r" (gen_func)
  318 + : "i0", "i1", "i2", "i3", "i4", "i5");
270 #elif defined(__arm__) 319 #elif defined(__arm__)
271 - asm volatile ("mov pc, %0\n\t"  
272 - ".global exec_loop\n\t"  
273 - "exec_loop:\n\t"  
274 - : /* no outputs */  
275 - : "r" (gen_func)  
276 - : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14"); 320 + asm volatile ("mov pc, %0\n\t"
  321 + ".global exec_loop\n\t"
  322 + "exec_loop:\n\t"
  323 + : /* no outputs */
  324 + : "r" (gen_func)
  325 + : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
277 #else 326 #else
278 - gen_func(); 327 + gen_func();
279 #endif 328 #endif
  329 + }
  330 + } else {
280 } 331 }
281 - }  
282 - ret = env->exception_index; 332 + } /* for(;;) */
  333 +
283 334
284 #if defined(TARGET_I386) 335 #if defined(TARGET_I386)
285 /* restore flags in standard format */ 336 /* restore flags in standard format */
@@ -348,11 +399,11 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) @@ -348,11 +399,11 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
348 SegmentCache *sc; 399 SegmentCache *sc;
349 selector &= 0xffff; 400 selector &= 0xffff;
350 sc = &env->segs[seg_reg]; 401 sc = &env->segs[seg_reg];
351 - /* NOTE: in VM86 mode, limit and seg_32bit are never reloaded, 402 + /* NOTE: in VM86 mode, limit and flags are never reloaded,
352 so we must load them here */ 403 so we must load them here */
353 sc->base = (void *)(selector << 4); 404 sc->base = (void *)(selector << 4);
354 sc->limit = 0xffff; 405 sc->limit = 0xffff;
355 - sc->seg_32bit = 0; 406 + sc->flags = 0;
356 sc->selector = selector; 407 sc->selector = selector;
357 } else { 408 } else {
358 load_seg(seg_reg, selector, 0); 409 load_seg(seg_reg, selector, 0);
@@ -398,6 +449,8 @@ void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32) @@ -398,6 +449,8 @@ void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
398 #include <signal.h> 449 #include <signal.h>
399 #include <sys/ucontext.h> 450 #include <sys/ucontext.h>
400 451
  452 +#if defined(TARGET_I386)
  453 +
401 /* 'pc' is the host PC at which the exception was raised. 'address' is 454 /* 'pc' is the host PC at which the exception was raised. 'address' is
402 the effective address of the memory exception. 'is_write' is 1 if a 455 the effective address of the memory exception. 'is_write' is 1 if a
403 write caused the exception and otherwise 0'. 'old_set' is the 456 write caused the exception and otherwise 0'. 'old_set' is the
@@ -407,42 +460,53 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address, @@ -407,42 +460,53 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
407 { 460 {
408 TranslationBlock *tb; 461 TranslationBlock *tb;
409 int ret; 462 int ret;
410 - uint32_t found_pc;  
411 463
  464 +#ifdef RING0_HACKS
  465 + env = global_env; /* XXX: find a better solution */
  466 +#endif
412 #if defined(DEBUG_SIGNAL) 467 #if defined(DEBUG_SIGNAL)
413 - printf("qemu: SIGSEGV pc=0x%08lx address=%08lx wr=%d oldset=0x%08lx\n", 468 + printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
414 pc, address, is_write, *(unsigned long *)old_set); 469 pc, address, is_write, *(unsigned long *)old_set);
415 #endif 470 #endif
416 /* XXX: locking issue */ 471 /* XXX: locking issue */
417 if (is_write && page_unprotect(address)) { 472 if (is_write && page_unprotect(address)) {
418 return 1; 473 return 1;
419 } 474 }
  475 + /* see if it is an MMU fault */
  476 + ret = cpu_x86_handle_mmu_fault(env, address, is_write);
  477 + if (ret < 0)
  478 + return 0; /* not an MMU fault */
  479 + if (ret == 0)
  480 + return 1; /* the MMU fault was handled without causing real CPU fault */
  481 + /* now we have a real cpu fault */
420 tb = tb_find_pc(pc); 482 tb = tb_find_pc(pc);
421 if (tb) { 483 if (tb) {
422 /* the PC is inside the translated code. It means that we have 484 /* the PC is inside the translated code. It means that we have
423 a virtual CPU fault */ 485 a virtual CPU fault */
424 - ret = cpu_search_pc(tb, &found_pc, pc);  
425 - if (ret < 0)  
426 - return 0;  
427 -#if defined(TARGET_I386)  
428 - env->eip = found_pc - tb->cs_base;  
429 - env->cr[2] = address;  
430 - /* we restore the process signal mask as the sigreturn should  
431 - do it (XXX: use sigsetjmp) */  
432 - sigprocmask(SIG_SETMASK, old_set, NULL);  
433 - raise_exception_err(EXCP0E_PAGE, 4 | (is_write << 1)); 486 + cpu_restore_state(tb, env, pc);
  487 + }
  488 +#if 0
  489 + printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
  490 + env->eip, env->cr[2], env->error_code);
  491 +#endif
  492 + /* we restore the process signal mask as the sigreturn should
  493 + do it (XXX: use sigsetjmp) */
  494 + sigprocmask(SIG_SETMASK, old_set, NULL);
  495 + raise_exception_err(EXCP0E_PAGE, env->error_code);
  496 + /* never comes here */
  497 + return 1;
  498 +}
  499 +
434 #elif defined(TARGET_ARM) 500 #elif defined(TARGET_ARM)
435 - env->regs[15] = found_pc;  
436 - /* XXX: do more */ 501 +static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
  502 + int is_write, sigset_t *old_set)
  503 +{
  504 + /* XXX: do more */
  505 + return 0;
  506 +}
437 #else 507 #else
438 #error unsupported target CPU 508 #error unsupported target CPU
439 #endif 509 #endif
440 - /* never comes here */  
441 - return 1;  
442 - } else {  
443 - return 0;  
444 - }  
445 -}  
446 510
447 #if defined(__i386__) 511 #if defined(__i386__)
448 512
@@ -570,6 +634,6 @@ int cpu_signal_handler(int host_signum, struct siginfo *info, @@ -570,6 +634,6 @@ int cpu_signal_handler(int host_signum, struct siginfo *info,
570 634
571 #else 635 #else
572 636
573 -#error CPU specific signal handler needed 637 +#error host CPU specific signal handler needed
574 638
575 #endif 639 #endif