Commit 68a7931591fca65ac5dc2e1b23688e08d1c328a6
1 parent
c9159e53
reduced irq latency
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@296 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
6 changed files
with
35 additions
and
29 deletions
cpu-all.h
| @@ -309,6 +309,10 @@ void page_unprotect_range(uint8_t *data, unsigned long data_size); | @@ -309,6 +309,10 @@ void page_unprotect_range(uint8_t *data, unsigned long data_size); | ||
| 309 | void cpu_abort(CPUState *env, const char *fmt, ...); | 309 | void cpu_abort(CPUState *env, const char *fmt, ...); |
| 310 | extern CPUState *cpu_single_env; | 310 | extern CPUState *cpu_single_env; |
| 311 | 311 | ||
| 312 | +#define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */ | ||
| 313 | +#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ | ||
| 314 | +void cpu_interrupt(CPUX86State *s, int mask); | ||
| 315 | + | ||
| 312 | /* gdb stub API */ | 316 | /* gdb stub API */ |
| 313 | extern int gdbstub_fd; | 317 | extern int gdbstub_fd; |
| 314 | CPUState *cpu_gdbstub_get_env(void *opaque); | 318 | CPUState *cpu_gdbstub_get_env(void *opaque); |
cpu-exec.c
| @@ -71,7 +71,7 @@ int cpu_exec(CPUState *env1) | @@ -71,7 +71,7 @@ int cpu_exec(CPUState *env1) | ||
| 71 | #ifdef __sparc__ | 71 | #ifdef __sparc__ |
| 72 | int saved_i7, tmp_T0; | 72 | int saved_i7, tmp_T0; |
| 73 | #endif | 73 | #endif |
| 74 | - int code_gen_size, ret; | 74 | + int code_gen_size, ret, interrupt_request; |
| 75 | void (*gen_func)(void); | 75 | void (*gen_func)(void); |
| 76 | TranslationBlock *tb, **ptb; | 76 | TranslationBlock *tb, **ptb; |
| 77 | uint8_t *tc_ptr, *cs_base, *pc; | 77 | uint8_t *tc_ptr, *cs_base, *pc; |
| @@ -139,7 +139,6 @@ int cpu_exec(CPUState *env1) | @@ -139,7 +139,6 @@ int cpu_exec(CPUState *env1) | ||
| 139 | #else | 139 | #else |
| 140 | #error unsupported target CPU | 140 | #error unsupported target CPU |
| 141 | #endif | 141 | #endif |
| 142 | - env->interrupt_request = 0; | ||
| 143 | env->exception_index = -1; | 142 | env->exception_index = -1; |
| 144 | 143 | ||
| 145 | /* prepare setjmp context for exception handling */ | 144 | /* prepare setjmp context for exception handling */ |
| @@ -176,28 +175,32 @@ int cpu_exec(CPUState *env1) | @@ -176,28 +175,32 @@ int cpu_exec(CPUState *env1) | ||
| 176 | } | 175 | } |
| 177 | env->exception_index = -1; | 176 | env->exception_index = -1; |
| 178 | } | 177 | } |
| 179 | -#if defined(TARGET_I386) | ||
| 180 | - /* if hardware interrupt pending, we execute it */ | ||
| 181 | - if (env->hard_interrupt_request && | ||
| 182 | - (env->eflags & IF_MASK)) { | ||
| 183 | - int intno; | ||
| 184 | - intno = cpu_x86_get_pic_interrupt(env); | ||
| 185 | - if (loglevel) { | ||
| 186 | - fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno); | ||
| 187 | - } | ||
| 188 | - do_interrupt(intno, 0, 0, 0); | ||
| 189 | - env->hard_interrupt_request = 0; | ||
| 190 | - } | ||
| 191 | -#endif | ||
| 192 | T0 = 0; /* force lookup of first TB */ | 178 | T0 = 0; /* force lookup of first TB */ |
| 193 | for(;;) { | 179 | for(;;) { |
| 194 | #ifdef __sparc__ | 180 | #ifdef __sparc__ |
| 195 | /* g1 can be modified by some libc? functions */ | 181 | /* g1 can be modified by some libc? functions */ |
| 196 | tmp_T0 = T0; | 182 | tmp_T0 = T0; |
| 197 | #endif | 183 | #endif |
| 198 | - if (env->interrupt_request) { | ||
| 199 | - env->exception_index = EXCP_INTERRUPT; | ||
| 200 | - cpu_loop_exit(); | 184 | + interrupt_request = env->interrupt_request; |
| 185 | + if (interrupt_request) { | ||
| 186 | +#if defined(TARGET_I386) | ||
| 187 | + /* if hardware interrupt pending, we execute it */ | ||
| 188 | + if ((interrupt_request & CPU_INTERRUPT_HARD) && | ||
| 189 | + (env->eflags & IF_MASK)) { | ||
| 190 | + int intno; | ||
| 191 | + intno = cpu_x86_get_pic_interrupt(env); | ||
| 192 | + if (loglevel) { | ||
| 193 | + fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno); | ||
| 194 | + } | ||
| 195 | + do_interrupt(intno, 0, 0, 0); | ||
| 196 | + env->interrupt_request &= ~CPU_INTERRUPT_HARD; | ||
| 197 | + } | ||
| 198 | +#endif | ||
| 199 | + if (interrupt_request & CPU_INTERRUPT_EXIT) { | ||
| 200 | + env->interrupt_request &= ~CPU_INTERRUPT_EXIT; | ||
| 201 | + env->exception_index = EXCP_INTERRUPT; | ||
| 202 | + cpu_loop_exit(); | ||
| 203 | + } | ||
| 201 | } | 204 | } |
| 202 | #ifdef DEBUG_EXEC | 205 | #ifdef DEBUG_EXEC |
| 203 | if (loglevel) { | 206 | if (loglevel) { |
| @@ -212,7 +215,7 @@ int cpu_exec(CPUState *env1) | @@ -212,7 +215,7 @@ int cpu_exec(CPUState *env1) | ||
| 212 | env->regs[R_EBP] = EBP; | 215 | env->regs[R_EBP] = EBP; |
| 213 | env->regs[R_ESP] = ESP; | 216 | env->regs[R_ESP] = ESP; |
| 214 | env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); | 217 | env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); |
| 215 | - cpu_x86_dump_state(env, logfile, 0); | 218 | + cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP); |
| 216 | env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); | 219 | env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
| 217 | #elif defined(TARGET_ARM) | 220 | #elif defined(TARGET_ARM) |
| 218 | cpu_arm_dump_state(env, logfile, 0); | 221 | cpu_arm_dump_state(env, logfile, 0); |
| @@ -454,7 +457,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address, | @@ -454,7 +457,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address, | ||
| 454 | { | 457 | { |
| 455 | TranslationBlock *tb; | 458 | TranslationBlock *tb; |
| 456 | int ret; | 459 | int ret; |
| 457 | - | 460 | + |
| 458 | if (cpu_single_env) | 461 | if (cpu_single_env) |
| 459 | env = cpu_single_env; /* XXX: find a correct solution for multithread */ | 462 | env = cpu_single_env; /* XXX: find a correct solution for multithread */ |
| 460 | #if defined(DEBUG_SIGNAL) | 463 | #if defined(DEBUG_SIGNAL) |
cpu-i386.h
| @@ -254,10 +254,7 @@ typedef struct CPUX86State { | @@ -254,10 +254,7 @@ typedef struct CPUX86State { | ||
| 254 | struct TranslationBlock *current_tb; /* currently executing TB */ | 254 | struct TranslationBlock *current_tb; /* currently executing TB */ |
| 255 | uint32_t cr[5]; /* NOTE: cr1 is unused */ | 255 | uint32_t cr[5]; /* NOTE: cr1 is unused */ |
| 256 | uint32_t dr[8]; /* debug registers */ | 256 | uint32_t dr[8]; /* debug registers */ |
| 257 | - int interrupt_request; /* if true, will exit from cpu_exec() ASAP */ | ||
| 258 | - /* if true, will call cpu_x86_get_pic_interrupt() ASAP to get the | ||
| 259 | - request interrupt number */ | ||
| 260 | - int hard_interrupt_request; | 257 | + int interrupt_request; |
| 261 | int user_mode_only; /* user mode only simulation */ | 258 | int user_mode_only; /* user mode only simulation */ |
| 262 | 259 | ||
| 263 | /* user data */ | 260 | /* user data */ |
| @@ -275,7 +272,6 @@ int cpu_x86_inl(CPUX86State *env, int addr); | @@ -275,7 +272,6 @@ int cpu_x86_inl(CPUX86State *env, int addr); | ||
| 275 | 272 | ||
| 276 | CPUX86State *cpu_x86_init(void); | 273 | CPUX86State *cpu_x86_init(void); |
| 277 | int cpu_x86_exec(CPUX86State *s); | 274 | int cpu_x86_exec(CPUX86State *s); |
| 278 | -void cpu_x86_interrupt(CPUX86State *s); | ||
| 279 | void cpu_x86_close(CPUX86State *s); | 275 | void cpu_x86_close(CPUX86State *s); |
| 280 | int cpu_x86_get_pic_interrupt(CPUX86State *s); | 276 | int cpu_x86_get_pic_interrupt(CPUX86State *s); |
| 281 | 277 |
exec.c
| @@ -617,11 +617,12 @@ static void tb_reset_jump_recursive(TranslationBlock *tb) | @@ -617,11 +617,12 @@ static void tb_reset_jump_recursive(TranslationBlock *tb) | ||
| 617 | tb_reset_jump_recursive2(tb, 1); | 617 | tb_reset_jump_recursive2(tb, 1); |
| 618 | } | 618 | } |
| 619 | 619 | ||
| 620 | -void cpu_interrupt(CPUState *env) | 620 | +/* mask must never be zero */ |
| 621 | +void cpu_interrupt(CPUState *env, int mask) | ||
| 621 | { | 622 | { |
| 622 | TranslationBlock *tb; | 623 | TranslationBlock *tb; |
| 623 | - | ||
| 624 | - env->interrupt_request = 1; | 624 | + |
| 625 | + env->interrupt_request |= mask; | ||
| 625 | /* if the cpu is currently executing code, we must unlink it and | 626 | /* if the cpu is currently executing code, we must unlink it and |
| 626 | all the potentially executing TB */ | 627 | all the potentially executing TB */ |
| 627 | tb = env->current_tb; | 628 | tb = env->current_tb; |
linux-user/signal.c
| @@ -333,7 +333,7 @@ static void host_signal_handler(int host_signum, siginfo_t *info, | @@ -333,7 +333,7 @@ static void host_signal_handler(int host_signum, siginfo_t *info, | ||
| 333 | host_to_target_siginfo_noswap(&tinfo, info); | 333 | host_to_target_siginfo_noswap(&tinfo, info); |
| 334 | if (queue_signal(sig, &tinfo) == 1) { | 334 | if (queue_signal(sig, &tinfo) == 1) { |
| 335 | /* interrupt the virtual CPU as soon as possible */ | 335 | /* interrupt the virtual CPU as soon as possible */ |
| 336 | - cpu_interrupt(global_env); | 336 | + cpu_interrupt(global_env, CPU_INTERRUPT_EXIT); |
| 337 | } | 337 | } |
| 338 | } | 338 | } |
| 339 | 339 |
translate-i386.c
| @@ -3331,12 +3331,14 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) | @@ -3331,12 +3331,14 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) | ||
| 3331 | if (!s->vm86) { | 3331 | if (!s->vm86) { |
| 3332 | if (s->cpl <= s->iopl) { | 3332 | if (s->cpl <= s->iopl) { |
| 3333 | gen_op_sti(); | 3333 | gen_op_sti(); |
| 3334 | + s->is_jmp = 2; /* give a chance to handle pending irqs */ | ||
| 3334 | } else { | 3335 | } else { |
| 3335 | gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); | 3336 | gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
| 3336 | } | 3337 | } |
| 3337 | } else { | 3338 | } else { |
| 3338 | if (s->iopl == 3) { | 3339 | if (s->iopl == 3) { |
| 3339 | gen_op_sti(); | 3340 | gen_op_sti(); |
| 3341 | + s->is_jmp = 2; /* give a chance to handle pending irqs */ | ||
| 3340 | } else { | 3342 | } else { |
| 3341 | gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); | 3343 | gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); |
| 3342 | } | 3344 | } |