Commit 43b968580736ebd564a5d20b8a9a78b72c86fe57
1 parent
50317c7f
qemu: refactor main_loop (Marcelo Tosatti)
Break main loop into 3 main functions. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@7241 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
1 changed file
with
151 additions
and
129 deletions
vl.c
| @@ -273,7 +273,7 @@ uint64_t node_cpumask[MAX_NODES]; | @@ -273,7 +273,7 @@ uint64_t node_cpumask[MAX_NODES]; | ||
| 273 | 273 | ||
| 274 | static CPUState *cur_cpu; | 274 | static CPUState *cur_cpu; |
| 275 | static CPUState *next_cpu; | 275 | static CPUState *next_cpu; |
| 276 | -static int event_pending = 1; | 276 | +static int timer_alarm_pending = 1; |
| 277 | /* Conversion factor from emulated instructions to virtual clock ticks. */ | 277 | /* Conversion factor from emulated instructions to virtual clock ticks. */ |
| 278 | static int icount_time_shift; | 278 | static int icount_time_shift; |
| 279 | /* Arbitrarily pick 1MIPS as the minimum allowable speed. */ | 279 | /* Arbitrarily pick 1MIPS as the minimum allowable speed. */ |
| @@ -1360,7 +1360,7 @@ static void host_alarm_handler(int host_signum) | @@ -1360,7 +1360,7 @@ static void host_alarm_handler(int host_signum) | ||
| 1360 | } | 1360 | } |
| 1361 | #endif | 1361 | #endif |
| 1362 | } | 1362 | } |
| 1363 | - event_pending = 1; | 1363 | + timer_alarm_pending = 1; |
| 1364 | qemu_notify_event(); | 1364 | qemu_notify_event(); |
| 1365 | } | 1365 | } |
| 1366 | } | 1366 | } |
| @@ -3879,153 +3879,175 @@ void main_loop_wait(int timeout) | @@ -3879,153 +3879,175 @@ void main_loop_wait(int timeout) | ||
| 3879 | 3879 | ||
| 3880 | } | 3880 | } |
| 3881 | 3881 | ||
| 3882 | -static int main_loop(void) | 3882 | +static int qemu_cpu_exec(CPUState *env) |
| 3883 | { | 3883 | { |
| 3884 | - int ret, timeout; | 3884 | + int ret; |
| 3885 | #ifdef CONFIG_PROFILER | 3885 | #ifdef CONFIG_PROFILER |
| 3886 | int64_t ti; | 3886 | int64_t ti; |
| 3887 | #endif | 3887 | #endif |
| 3888 | - CPUState *env; | ||
| 3889 | 3888 | ||
| 3890 | - cur_cpu = first_cpu; | ||
| 3891 | - next_cpu = cur_cpu->next_cpu ?: first_cpu; | ||
| 3892 | - for(;;) { | ||
| 3893 | - if (vm_running) { | ||
| 3894 | - | ||
| 3895 | - for(;;) { | ||
| 3896 | - /* get next cpu */ | ||
| 3897 | - env = next_cpu; | ||
| 3898 | #ifdef CONFIG_PROFILER | 3889 | #ifdef CONFIG_PROFILER |
| 3899 | - ti = profile_getclock(); | 3890 | + ti = profile_getclock(); |
| 3900 | #endif | 3891 | #endif |
| 3901 | - if (use_icount) { | ||
| 3902 | - int64_t count; | ||
| 3903 | - int decr; | ||
| 3904 | - qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); | ||
| 3905 | - env->icount_decr.u16.low = 0; | ||
| 3906 | - env->icount_extra = 0; | ||
| 3907 | - count = qemu_next_deadline(); | ||
| 3908 | - count = (count + (1 << icount_time_shift) - 1) | ||
| 3909 | - >> icount_time_shift; | ||
| 3910 | - qemu_icount += count; | ||
| 3911 | - decr = (count > 0xffff) ? 0xffff : count; | ||
| 3912 | - count -= decr; | ||
| 3913 | - env->icount_decr.u16.low = decr; | ||
| 3914 | - env->icount_extra = count; | ||
| 3915 | - } | ||
| 3916 | - ret = cpu_exec(env); | 3892 | + if (use_icount) { |
| 3893 | + int64_t count; | ||
| 3894 | + int decr; | ||
| 3895 | + qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); | ||
| 3896 | + env->icount_decr.u16.low = 0; | ||
| 3897 | + env->icount_extra = 0; | ||
| 3898 | + count = qemu_next_deadline(); | ||
| 3899 | + count = (count + (1 << icount_time_shift) - 1) | ||
| 3900 | + >> icount_time_shift; | ||
| 3901 | + qemu_icount += count; | ||
| 3902 | + decr = (count > 0xffff) ? 0xffff : count; | ||
| 3903 | + count -= decr; | ||
| 3904 | + env->icount_decr.u16.low = decr; | ||
| 3905 | + env->icount_extra = count; | ||
| 3906 | + } | ||
| 3907 | + ret = cpu_exec(env); | ||
| 3917 | #ifdef CONFIG_PROFILER | 3908 | #ifdef CONFIG_PROFILER |
| 3918 | - qemu_time += profile_getclock() - ti; | 3909 | + qemu_time += profile_getclock() - ti; |
| 3919 | #endif | 3910 | #endif |
| 3920 | - if (use_icount) { | ||
| 3921 | - /* Fold pending instructions back into the | ||
| 3922 | - instruction counter, and clear the interrupt flag. */ | ||
| 3923 | - qemu_icount -= (env->icount_decr.u16.low | ||
| 3924 | - + env->icount_extra); | ||
| 3925 | - env->icount_decr.u32 = 0; | ||
| 3926 | - env->icount_extra = 0; | ||
| 3927 | - } | ||
| 3928 | - next_cpu = env->next_cpu ?: first_cpu; | ||
| 3929 | - if (event_pending && likely(ret != EXCP_DEBUG)) { | ||
| 3930 | - ret = EXCP_INTERRUPT; | ||
| 3931 | - event_pending = 0; | ||
| 3932 | - break; | ||
| 3933 | - } | ||
| 3934 | - if (ret == EXCP_HLT) { | ||
| 3935 | - /* Give the next CPU a chance to run. */ | ||
| 3936 | - cur_cpu = env; | ||
| 3937 | - continue; | ||
| 3938 | - } | ||
| 3939 | - if (ret != EXCP_HALTED) | 3911 | + if (use_icount) { |
| 3912 | + /* Fold pending instructions back into the | ||
| 3913 | + instruction counter, and clear the interrupt flag. */ | ||
| 3914 | + qemu_icount -= (env->icount_decr.u16.low | ||
| 3915 | + + env->icount_extra); | ||
| 3916 | + env->icount_decr.u32 = 0; | ||
| 3917 | + env->icount_extra = 0; | ||
| 3918 | + } | ||
| 3919 | + return ret; | ||
| 3920 | +} | ||
| 3921 | + | ||
| 3922 | +static int cpu_has_work(CPUState *env) | ||
| 3923 | +{ | ||
| 3924 | + if (!env->halted) | ||
| 3925 | + return 1; | ||
| 3926 | + if (qemu_cpu_has_work(env)) | ||
| 3927 | + return 1; | ||
| 3928 | + return 0; | ||
| 3929 | +} | ||
| 3930 | + | ||
| 3931 | +static int tcg_has_work(void) | ||
| 3932 | +{ | ||
| 3933 | + CPUState *env; | ||
| 3934 | + | ||
| 3935 | + for (env = first_cpu; env != NULL; env = env->next_cpu) | ||
| 3936 | + if (cpu_has_work(env)) | ||
| 3937 | + return 1; | ||
| 3938 | + return 0; | ||
| 3939 | +} | ||
| 3940 | + | ||
| 3941 | +static int qemu_calculate_timeout(void) | ||
| 3942 | +{ | ||
| 3943 | + int timeout; | ||
| 3944 | + | ||
| 3945 | + if (!vm_running) | ||
| 3946 | + timeout = 5000; | ||
| 3947 | + else if (tcg_has_work()) | ||
| 3948 | + timeout = 0; | ||
| 3949 | + else if (!use_icount) | ||
| 3950 | + timeout = 5000; | ||
| 3951 | + else { | ||
| 3952 | + /* XXX: use timeout computed from timers */ | ||
| 3953 | + int64_t add; | ||
| 3954 | + int64_t delta; | ||
| 3955 | + /* Advance virtual time to the next event. */ | ||
| 3956 | + if (use_icount == 1) { | ||
| 3957 | + /* When not using an adaptive execution frequency | ||
| 3958 | + we tend to get badly out of sync with real time, | ||
| 3959 | + so just delay for a reasonable amount of time. */ | ||
| 3960 | + delta = 0; | ||
| 3961 | + } else { | ||
| 3962 | + delta = cpu_get_icount() - cpu_get_clock(); | ||
| 3963 | + } | ||
| 3964 | + if (delta > 0) { | ||
| 3965 | + /* If virtual time is ahead of real time then just | ||
| 3966 | + wait for IO. */ | ||
| 3967 | + timeout = (delta / 1000000) + 1; | ||
| 3968 | + } else { | ||
| 3969 | + /* Wait for either IO to occur or the next | ||
| 3970 | + timer event. */ | ||
| 3971 | + add = qemu_next_deadline(); | ||
| 3972 | + /* We advance the timer before checking for IO. | ||
| 3973 | + Limit the amount we advance so that early IO | ||
| 3974 | + activity won't get the guest too far ahead. */ | ||
| 3975 | + if (add > 10000000) | ||
| 3976 | + add = 10000000; | ||
| 3977 | + delta += add; | ||
| 3978 | + add = (add + (1 << icount_time_shift) - 1) | ||
| 3979 | + >> icount_time_shift; | ||
| 3980 | + qemu_icount += add; | ||
| 3981 | + timeout = delta / 1000000; | ||
| 3982 | + if (timeout < 0) | ||
| 3983 | + timeout = 0; | ||
| 3984 | + } | ||
| 3985 | + } | ||
| 3986 | + | ||
| 3987 | + return timeout; | ||
| 3988 | +} | ||
| 3989 | + | ||
| 3990 | +static int vm_can_run(void) | ||
| 3991 | +{ | ||
| 3992 | + if (powerdown_requested) | ||
| 3993 | + return 0; | ||
| 3994 | + if (reset_requested) | ||
| 3995 | + return 0; | ||
| 3996 | + if (shutdown_requested) | ||
| 3997 | + return 0; | ||
| 3998 | + return 1; | ||
| 3999 | +} | ||
| 4000 | + | ||
| 4001 | +static void main_loop(void) | ||
| 4002 | +{ | ||
| 4003 | + int ret = 0; | ||
| 4004 | +#ifdef CONFIG_PROFILER | ||
| 4005 | + int64_t ti; | ||
| 4006 | +#endif | ||
| 4007 | + | ||
| 4008 | + for (;;) { | ||
| 4009 | + do { | ||
| 4010 | + if (next_cpu == NULL) | ||
| 4011 | + next_cpu = first_cpu; | ||
| 4012 | + for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) { | ||
| 4013 | + CPUState *env = cur_cpu = next_cpu; | ||
| 4014 | + | ||
| 4015 | + if (!vm_running) | ||
| 3940 | break; | 4016 | break; |
| 3941 | - /* all CPUs are halted ? */ | ||
| 3942 | - if (env == cur_cpu) | 4017 | + if (timer_alarm_pending) { |
| 4018 | + timer_alarm_pending = 0; | ||
| 3943 | break; | 4019 | break; |
| 3944 | - } | ||
| 3945 | - cur_cpu = env; | ||
| 3946 | - | ||
| 3947 | - if (shutdown_requested) { | ||
| 3948 | - ret = EXCP_INTERRUPT; | ||
| 3949 | - if (no_shutdown) { | ||
| 3950 | - vm_stop(0); | ||
| 3951 | - no_shutdown = 0; | ||
| 3952 | } | 4020 | } |
| 3953 | - else | 4021 | + ret = qemu_cpu_exec(env); |
| 4022 | + if (ret == EXCP_DEBUG) { | ||
| 4023 | + gdb_set_stop_cpu(env); | ||
| 3954 | break; | 4024 | break; |
| 3955 | - } | ||
| 3956 | - if (reset_requested) { | ||
| 3957 | - reset_requested = 0; | ||
| 3958 | - qemu_system_reset(); | ||
| 3959 | - ret = EXCP_INTERRUPT; | ||
| 3960 | - } | ||
| 3961 | - if (powerdown_requested) { | ||
| 3962 | - powerdown_requested = 0; | ||
| 3963 | - qemu_system_powerdown(); | ||
| 3964 | - ret = EXCP_INTERRUPT; | ||
| 3965 | - } | ||
| 3966 | - if (unlikely(ret == EXCP_DEBUG)) { | ||
| 3967 | - gdb_set_stop_cpu(cur_cpu); | ||
| 3968 | - vm_stop(EXCP_DEBUG); | ||
| 3969 | - } | ||
| 3970 | - /* If all cpus are halted then wait until the next IRQ */ | ||
| 3971 | - /* XXX: use timeout computed from timers */ | ||
| 3972 | - if (ret == EXCP_HALTED) { | ||
| 3973 | - if (use_icount) { | ||
| 3974 | - int64_t add; | ||
| 3975 | - int64_t delta; | ||
| 3976 | - /* Advance virtual time to the next event. */ | ||
| 3977 | - if (use_icount == 1) { | ||
| 3978 | - /* When not using an adaptive execution frequency | ||
| 3979 | - we tend to get badly out of sync with real time, | ||
| 3980 | - so just delay for a reasonable amount of time. */ | ||
| 3981 | - delta = 0; | ||
| 3982 | - } else { | ||
| 3983 | - delta = cpu_get_icount() - cpu_get_clock(); | ||
| 3984 | - } | ||
| 3985 | - if (delta > 0) { | ||
| 3986 | - /* If virtual time is ahead of real time then just | ||
| 3987 | - wait for IO. */ | ||
| 3988 | - timeout = (delta / 1000000) + 1; | ||
| 3989 | - } else { | ||
| 3990 | - /* Wait for either IO to occur or the next | ||
| 3991 | - timer event. */ | ||
| 3992 | - add = qemu_next_deadline(); | ||
| 3993 | - /* We advance the timer before checking for IO. | ||
| 3994 | - Limit the amount we advance so that early IO | ||
| 3995 | - activity won't get the guest too far ahead. */ | ||
| 3996 | - if (add > 10000000) | ||
| 3997 | - add = 10000000; | ||
| 3998 | - delta += add; | ||
| 3999 | - add = (add + (1 << icount_time_shift) - 1) | ||
| 4000 | - >> icount_time_shift; | ||
| 4001 | - qemu_icount += add; | ||
| 4002 | - timeout = delta / 1000000; | ||
| 4003 | - if (timeout < 0) | ||
| 4004 | - timeout = 0; | ||
| 4005 | - } | ||
| 4006 | - } else { | ||
| 4007 | - timeout = 5000; | ||
| 4008 | } | 4025 | } |
| 4009 | - } else { | ||
| 4010 | - timeout = 0; | ||
| 4011 | } | 4026 | } |
| 4012 | - } else { | ||
| 4013 | - if (shutdown_requested) { | ||
| 4014 | - ret = EXCP_INTERRUPT; | ||
| 4015 | - break; | ||
| 4016 | - } | ||
| 4017 | - timeout = 5000; | ||
| 4018 | - } | ||
| 4019 | #ifdef CONFIG_PROFILER | 4027 | #ifdef CONFIG_PROFILER |
| 4020 | - ti = profile_getclock(); | 4028 | + ti = profile_getclock(); |
| 4021 | #endif | 4029 | #endif |
| 4022 | - main_loop_wait(timeout); | 4030 | + main_loop_wait(qemu_calculate_timeout()); |
| 4023 | #ifdef CONFIG_PROFILER | 4031 | #ifdef CONFIG_PROFILER |
| 4024 | - dev_time += profile_getclock() - ti; | 4032 | + dev_time += profile_getclock() - ti; |
| 4025 | #endif | 4033 | #endif |
| 4034 | + } while (ret != EXCP_DEBUG && vm_can_run()); | ||
| 4035 | + | ||
| 4036 | + if (ret == EXCP_DEBUG) | ||
| 4037 | + vm_stop(EXCP_DEBUG); | ||
| 4038 | + | ||
| 4039 | + if (qemu_shutdown_requested()) { | ||
| 4040 | + if (no_shutdown) { | ||
| 4041 | + vm_stop(0); | ||
| 4042 | + no_shutdown = 0; | ||
| 4043 | + } else | ||
| 4044 | + break; | ||
| 4045 | + } | ||
| 4046 | + if (qemu_reset_requested()) | ||
| 4047 | + qemu_system_reset(); | ||
| 4048 | + if (qemu_powerdown_requested()) | ||
| 4049 | + qemu_system_powerdown(); | ||
| 4026 | } | 4050 | } |
| 4027 | - cpu_disable_ticks(); | ||
| 4028 | - return ret; | ||
| 4029 | } | 4051 | } |
| 4030 | 4052 | ||
| 4031 | static void version(void) | 4053 | static void version(void) |