Commit 15a5115690558ad65de02d9b9bb4ec89bc4cf8ac
1 parent
f9e7bcfe
Use spinlock_t for interrupt_lock, lock support for HPPA (Stuart Brady)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4118 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
2 changed files
with
54 additions
and
7 deletions
exec-all.h
... | ... | @@ -297,6 +297,30 @@ extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; |
297 | 297 | extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; |
298 | 298 | extern void *io_mem_opaque[IO_MEM_NB_ENTRIES]; |
299 | 299 | |
300 | +#if defined(__hppa__) | |
301 | + | |
302 | +typedef int spinlock_t[4]; | |
303 | + | |
304 | +#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 } | |
305 | + | |
306 | +static inline void resetlock (spinlock_t *p) | |
307 | +{ | |
308 | + (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1; | |
309 | +} | |
310 | + | |
311 | +#else | |
312 | + | |
313 | +typedef int spinlock_t; | |
314 | + | |
315 | +#define SPIN_LOCK_UNLOCKED 0 | |
316 | + | |
317 | +static inline void resetlock (spinlock_t *p) | |
318 | +{ | |
319 | + *p = SPIN_LOCK_UNLOCKED; | |
320 | +} | |
321 | + | |
322 | +#endif | |
323 | + | |
300 | 324 | #if defined(__powerpc__) |
301 | 325 | static inline int testandset (int *p) |
302 | 326 | { |
... | ... | @@ -396,6 +420,33 @@ static inline int testandset (int *p) |
396 | 420 | : "cc","memory"); |
397 | 421 | return ret; |
398 | 422 | } |
423 | +#elif defined(__hppa__) | |
424 | + | |
425 | +/* Because malloc only guarantees 8-byte alignment for malloc'd data, | |
426 | + and GCC only guarantees 8-byte alignment for stack locals, we can't | |
427 | + be assured of 16-byte alignment for atomic lock data even if we | |
428 | + specify "__attribute ((aligned(16)))" in the type declaration. So, | |
429 | + we use a struct containing an array of four ints for the atomic lock | |
430 | + type and dynamically select the 16-byte aligned int from the array | |
431 | + for the semaphore. */ | |
432 | +#define __PA_LDCW_ALIGNMENT 16 | |
433 | +static inline void *ldcw_align (void *p) { | |
434 | + unsigned long a = (unsigned long)p; | |
435 | + a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); | |
436 | + return (void *)a; | |
437 | +} | |
438 | + | |
439 | +static inline int testandset (spinlock_t *p) | |
440 | +{ | |
441 | + unsigned int ret; | |
442 | + p = ldcw_align(p); | |
443 | + __asm__ __volatile__("ldcw 0(%1),%0" | |
444 | + : "=r" (ret) | |
445 | + : "r" (p) | |
446 | + : "memory" ); | |
447 | + return !ret; | |
448 | +} | |
449 | + | |
399 | 450 | #elif defined(__ia64) |
400 | 451 | |
401 | 452 | #include <ia64intrin.h> |
... | ... | @@ -428,10 +479,6 @@ static inline int testandset (int *p) |
428 | 479 | #error unimplemented CPU support |
429 | 480 | #endif |
430 | 481 | |
431 | -typedef int spinlock_t; | |
432 | - | |
433 | -#define SPIN_LOCK_UNLOCKED 0 | |
434 | - | |
435 | 482 | #if defined(CONFIG_USER_ONLY) |
436 | 483 | static inline void spin_lock(spinlock_t *lock) |
437 | 484 | { |
... | ... | @@ -440,7 +487,7 @@ static inline void spin_lock(spinlock_t *lock) |
440 | 487 | |
441 | 488 | static inline void spin_unlock(spinlock_t *lock) |
442 | 489 | { |
443 | - *lock = 0; | |
490 | + resetlock(lock); | |
444 | 491 | } |
445 | 492 | |
446 | 493 | static inline int spin_trylock(spinlock_t *lock) | ... | ... |
exec.c
... | ... | @@ -1215,7 +1215,7 @@ void cpu_set_log_filename(const char *filename) |
1215 | 1215 | void cpu_interrupt(CPUState *env, int mask) |
1216 | 1216 | { |
1217 | 1217 | TranslationBlock *tb; |
1218 | - static int interrupt_lock; | |
1218 | + static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; | |
1219 | 1219 | |
1220 | 1220 | env->interrupt_request |= mask; |
1221 | 1221 | /* if the cpu is currently executing code, we must unlink it and |
... | ... | @@ -1224,7 +1224,7 @@ void cpu_interrupt(CPUState *env, int mask) |
1224 | 1224 | if (tb && !testandset(&interrupt_lock)) { |
1225 | 1225 | env->current_tb = NULL; |
1226 | 1226 | tb_reset_jump_recursive(tb); |
1227 | - interrupt_lock = 0; | |
1227 | + resetlock(&interrupt_lock); | |
1228 | 1228 | } |
1229 | 1229 | } |
1230 | 1230 | ... | ... |