Commit be214e6c0557139ffa5551f77e339c07495bfec3
1 parent
28a76be8
Fix race condition on access to env->interrupt_request
env->interrupt_request is accessed as the bit level from both main code and signal handler, making a race condition possible even on CISC CPU. This causes freeze of QEMU under high load when running the dyntick clock. The patch below move the bit corresponding to CPU_INTERRUPT_EXIT in a separate variable, declared as volatile sig_atomic_t, so it should be work even on RISC CPU. We may want to move the cpu_interrupt(env, CPU_INTERRUPT_EXIT) case in its own function and get rid of CPU_INTERRUPT_EXIT. That can be done later, I wanted to keep the patch short for easier review. Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6728 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
4 changed files
with
19 additions
and
16 deletions
cpu-defs.h
@@ -27,6 +27,7 @@ | @@ -27,6 +27,7 @@ | ||
27 | #include "config.h" | 27 | #include "config.h" |
28 | #include <setjmp.h> | 28 | #include <setjmp.h> |
29 | #include <inttypes.h> | 29 | #include <inttypes.h> |
30 | +#include <signal.h> | ||
30 | #include "osdep.h" | 31 | #include "osdep.h" |
31 | #include "sys-queue.h" | 32 | #include "sys-queue.h" |
32 | 33 | ||
@@ -170,6 +171,7 @@ typedef struct CPUWatchpoint { | @@ -170,6 +171,7 @@ typedef struct CPUWatchpoint { | ||
170 | memory was accessed */ \ | 171 | memory was accessed */ \ |
171 | uint32_t halted; /* Nonzero if the CPU is in suspend state */ \ | 172 | uint32_t halted; /* Nonzero if the CPU is in suspend state */ \ |
172 | uint32_t interrupt_request; \ | 173 | uint32_t interrupt_request; \ |
174 | + volatile sig_atomic_t exit_request; \ | ||
173 | /* The meaning of the MMU modes is defined in the target code. */ \ | 175 | /* The meaning of the MMU modes is defined in the target code. */ \ |
174 | CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ | 176 | CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ |
175 | target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ | 177 | target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ |
cpu-exec.c
@@ -311,7 +311,7 @@ int cpu_exec(CPUState *env1) | @@ -311,7 +311,7 @@ int cpu_exec(CPUState *env1) | ||
311 | env->exception_index = -1; | 311 | env->exception_index = -1; |
312 | } | 312 | } |
313 | #ifdef USE_KQEMU | 313 | #ifdef USE_KQEMU |
314 | - if (kqemu_is_ok(env) && env->interrupt_request == 0) { | 314 | + if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) { |
315 | int ret; | 315 | int ret; |
316 | env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); | 316 | env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); |
317 | ret = kqemu_cpu_exec(env); | 317 | ret = kqemu_cpu_exec(env); |
@@ -326,7 +326,7 @@ int cpu_exec(CPUState *env1) | @@ -326,7 +326,7 @@ int cpu_exec(CPUState *env1) | ||
326 | } else if (ret == 2) { | 326 | } else if (ret == 2) { |
327 | /* softmmu execution needed */ | 327 | /* softmmu execution needed */ |
328 | } else { | 328 | } else { |
329 | - if (env->interrupt_request != 0) { | 329 | + if (env->interrupt_request != 0 || env->exit_request != 0) { |
330 | /* hardware interrupt will be executed just after */ | 330 | /* hardware interrupt will be executed just after */ |
331 | } else { | 331 | } else { |
332 | /* otherwise, we restart */ | 332 | /* otherwise, we restart */ |
@@ -525,11 +525,11 @@ int cpu_exec(CPUState *env1) | @@ -525,11 +525,11 @@ int cpu_exec(CPUState *env1) | ||
525 | the program flow was changed */ | 525 | the program flow was changed */ |
526 | next_tb = 0; | 526 | next_tb = 0; |
527 | } | 527 | } |
528 | - if (interrupt_request & CPU_INTERRUPT_EXIT) { | ||
529 | - env->interrupt_request &= ~CPU_INTERRUPT_EXIT; | ||
530 | - env->exception_index = EXCP_INTERRUPT; | ||
531 | - cpu_loop_exit(); | ||
532 | - } | 528 | + } |
529 | + if (unlikely(env->exit_request)) { | ||
530 | + env->exit_request = 0; | ||
531 | + env->exception_index = EXCP_INTERRUPT; | ||
532 | + cpu_loop_exit(); | ||
533 | } | 533 | } |
534 | #ifdef DEBUG_EXEC | 534 | #ifdef DEBUG_EXEC |
535 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { | 535 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { |
@@ -599,7 +599,7 @@ int cpu_exec(CPUState *env1) | @@ -599,7 +599,7 @@ int cpu_exec(CPUState *env1) | ||
599 | TB, but before it is linked into a potentially | 599 | TB, but before it is linked into a potentially |
600 | infinite loop and becomes env->current_tb. Avoid | 600 | infinite loop and becomes env->current_tb. Avoid |
601 | starting execution if there is a pending interrupt. */ | 601 | starting execution if there is a pending interrupt. */ |
602 | - if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT)) | 602 | + if (unlikely (env->exit_request)) |
603 | env->current_tb = NULL; | 603 | env->current_tb = NULL; |
604 | 604 | ||
605 | while (env->current_tb) { | 605 | while (env->current_tb) { |
exec.c
@@ -1501,9 +1501,12 @@ void cpu_interrupt(CPUState *env, int mask) | @@ -1501,9 +1501,12 @@ void cpu_interrupt(CPUState *env, int mask) | ||
1501 | #endif | 1501 | #endif |
1502 | int old_mask; | 1502 | int old_mask; |
1503 | 1503 | ||
1504 | + if (mask & CPU_INTERRUPT_EXIT) { | ||
1505 | + env->exit_request = 1; | ||
1506 | + mask &= ~CPU_INTERRUPT_EXIT; | ||
1507 | + } | ||
1508 | + | ||
1504 | old_mask = env->interrupt_request; | 1509 | old_mask = env->interrupt_request; |
1505 | - /* FIXME: This is probably not threadsafe. A different thread could | ||
1506 | - be in the middle of a read-modify-write operation. */ | ||
1507 | env->interrupt_request |= mask; | 1510 | env->interrupt_request |= mask; |
1508 | #if defined(USE_NPTL) | 1511 | #if defined(USE_NPTL) |
1509 | /* FIXME: TB unchaining isn't SMP safe. For now just ignore the | 1512 | /* FIXME: TB unchaining isn't SMP safe. For now just ignore the |
@@ -1514,10 +1517,8 @@ void cpu_interrupt(CPUState *env, int mask) | @@ -1514,10 +1517,8 @@ void cpu_interrupt(CPUState *env, int mask) | ||
1514 | if (use_icount) { | 1517 | if (use_icount) { |
1515 | env->icount_decr.u16.high = 0xffff; | 1518 | env->icount_decr.u16.high = 0xffff; |
1516 | #ifndef CONFIG_USER_ONLY | 1519 | #ifndef CONFIG_USER_ONLY |
1517 | - /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means | ||
1518 | - an async event happened and we need to process it. */ | ||
1519 | if (!can_do_io(env) | 1520 | if (!can_do_io(env) |
1520 | - && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) { | 1521 | + && (mask & ~old_mask) != 0) { |
1521 | cpu_abort(env, "Raised interrupt while not in I/O function"); | 1522 | cpu_abort(env, "Raised interrupt while not in I/O function"); |
1522 | } | 1523 | } |
1523 | #endif | 1524 | #endif |
kvm-all.c
@@ -445,7 +445,7 @@ int kvm_cpu_exec(CPUState *env) | @@ -445,7 +445,7 @@ int kvm_cpu_exec(CPUState *env) | ||
445 | do { | 445 | do { |
446 | kvm_arch_pre_run(env, run); | 446 | kvm_arch_pre_run(env, run); |
447 | 447 | ||
448 | - if ((env->interrupt_request & CPU_INTERRUPT_EXIT)) { | 448 | + if (env->exit_request) { |
449 | dprintf("interrupt exit requested\n"); | 449 | dprintf("interrupt exit requested\n"); |
450 | ret = 0; | 450 | ret = 0; |
451 | break; | 451 | break; |
@@ -512,8 +512,8 @@ int kvm_cpu_exec(CPUState *env) | @@ -512,8 +512,8 @@ int kvm_cpu_exec(CPUState *env) | ||
512 | } | 512 | } |
513 | } while (ret > 0); | 513 | } while (ret > 0); |
514 | 514 | ||
515 | - if ((env->interrupt_request & CPU_INTERRUPT_EXIT)) { | ||
516 | - env->interrupt_request &= ~CPU_INTERRUPT_EXIT; | 515 | + if (env->exit_request) { |
516 | + env->exit_request = 0; | ||
517 | env->exception_index = EXCP_INTERRUPT; | 517 | env->exception_index = EXCP_INTERRUPT; |
518 | } | 518 | } |
519 | 519 |