Commit 9df217a31741e21eb63a5e3ee8529391ba3762e3

Authored by bellard
1 parent 92a31b1f

kqemu support


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1283 c046a42c-6fe2-441c-8c8c-71466251a162
cpu-exec.c
@@ -209,7 +209,33 @@ int cpu_exec(CPUState *env1) @@ -209,7 +209,33 @@ int cpu_exec(CPUState *env1)
209 #endif 209 #endif
210 } 210 }
211 env->exception_index = -1; 211 env->exception_index = -1;
  212 + }
  213 +#ifdef USE_KQEMU
  214 + if (kqemu_is_ok(env) && env->interrupt_request == 0) {
  215 + int ret;
  216 + env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
  217 + ret = kqemu_cpu_exec(env);
  218 + /* put eflags in CPU temporary format */
  219 + CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  220 + DF = 1 - (2 * ((env->eflags >> 10) & 1));
  221 + CC_OP = CC_OP_EFLAGS;
  222 + env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
  223 + if (ret == 1) {
  224 + /* exception */
  225 + longjmp(env->jmp_env, 1);
  226 + } else if (ret == 2) {
  227 + /* softmmu execution needed */
  228 + } else {
  229 + if (env->interrupt_request != 0) {
  230 + /* hardware interrupt will be executed just after */
  231 + } else {
  232 + /* otherwise, we restart */
  233 + longjmp(env->jmp_env, 1);
  234 + }
  235 + }
212 } 236 }
  237 +#endif
  238 +
213 T0 = 0; /* force lookup of first TB */ 239 T0 = 0; /* force lookup of first TB */
214 for(;;) { 240 for(;;) {
215 #ifdef __sparc__ 241 #ifdef __sparc__
exec-all.h
@@ -586,3 +586,25 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) @@ -586,3 +586,25 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
586 return addr + env->tlb_read[is_user][index].addend - (unsigned long)phys_ram_base; 586 return addr + env->tlb_read[is_user][index].addend - (unsigned long)phys_ram_base;
587 } 587 }
588 #endif 588 #endif
  589 +
  590 +
  591 +#ifdef USE_KQEMU
  592 +extern int kqemu_fd;
  593 +extern int kqemu_flushed;
  594 +
  595 +int kqemu_init(CPUState *env);
  596 +int kqemu_cpu_exec(CPUState *env);
  597 +void kqemu_flush_page(CPUState *env, target_ulong addr);
  598 +void kqemu_flush(CPUState *env, int global);
  599 +
  600 +static inline int kqemu_is_ok(CPUState *env)
  601 +{
  602 + return(env->kqemu_enabled &&
  603 + (env->hflags & HF_CPL_MASK) == 3 &&
  604 + (env->eflags & IOPL_MASK) != IOPL_MASK &&
  605 + (env->cr[0] & CR0_PE_MASK) &&
  606 + (env->eflags & IF_MASK) &&
  607 + !(env->eflags & VM_MASK));
  608 +}
  609 +
  610 +#endif
kqemu.c 0 → 100644
  1 +/*
  2 + * KQEMU support
  3 + *
  4 + * Copyright (c) 2005 Fabrice Bellard
  5 + *
  6 + * This library is free software; you can redistribute it and/or
  7 + * modify it under the terms of the GNU Lesser General Public
  8 + * License as published by the Free Software Foundation; either
  9 + * version 2 of the License, or (at your option) any later version.
  10 + *
  11 + * This library is distributed in the hope that it will be useful,
  12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14 + * Lesser General Public License for more details.
  15 + *
  16 + * You should have received a copy of the GNU Lesser General Public
  17 + * License along with this library; if not, write to the Free Software
  18 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19 + */
  20 +#include "config.h"
  21 +#ifdef _WIN32
  22 +#include <windows.h>
  23 +#else
  24 +#include <sys/types.h>
  25 +#include <sys/mman.h>
  26 +#endif
  27 +#include <stdlib.h>
  28 +#include <stdio.h>
  29 +#include <stdarg.h>
  30 +#include <string.h>
  31 +#include <errno.h>
  32 +#include <unistd.h>
  33 +#include <inttypes.h>
  34 +
  35 +#include "cpu.h"
  36 +#include "exec-all.h"
  37 +
  38 +#ifdef USE_KQEMU
  39 +
  40 +#define DEBUG
  41 +
  42 +#include <unistd.h>
  43 +#include <fcntl.h>
  44 +#include <sys/ioctl.h>
  45 +#include "kqemu/kqemu.h"
  46 +
  47 +#define KQEMU_DEVICE "/dev/kqemu"
  48 +
  49 +int kqemu_allowed = 1;
  50 +int kqemu_fd = -1;
  51 +unsigned long *pages_to_flush;
  52 +unsigned int nb_pages_to_flush;
  53 +extern uint32_t **l1_phys_map;
  54 +
  55 +#define cpuid(index, eax, ebx, ecx, edx) \
  56 + asm volatile ("cpuid" \
  57 + : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
  58 + : "0" (index))
  59 +
  60 +static int is_cpuid_supported(void)
  61 +{
  62 + int v0, v1;
  63 + asm volatile ("pushf\n"
  64 + "popl %0\n"
  65 + "movl %0, %1\n"
  66 + "xorl $0x00200000, %0\n"
  67 + "pushl %0\n"
  68 + "popf\n"
  69 + "pushf\n"
  70 + "popl %0\n"
  71 + : "=a" (v0), "=d" (v1)
  72 + :
  73 + : "cc");
  74 + return (v0 != v1);
  75 +}
  76 +
  77 +static void kqemu_update_cpuid(CPUState *env)
  78 +{
  79 + int critical_features_mask, features;
  80 + uint32_t eax, ebx, ecx, edx;
  81 +
  82 + /* the following features are kept identical on the host and
  83 + target cpus because they are important for user code. Strictly
  84 + speaking, only SSE really matters because the OS must support
  85 + it if the user code uses it. */
  86 + critical_features_mask =
  87 + CPUID_CMOV | CPUID_CX8 |
  88 + CPUID_FXSR | CPUID_MMX | CPUID_SSE |
  89 + CPUID_SSE2;
  90 + if (!is_cpuid_supported()) {
  91 + features = 0;
  92 + } else {
  93 + cpuid(1, eax, ebx, ecx, edx);
  94 + features = edx;
  95 + }
  96 + env->cpuid_features = (env->cpuid_features & ~critical_features_mask) |
  97 + (features & critical_features_mask);
  98 + /* XXX: we could update more of the target CPUID state so that the
  99 + non accelerated code sees exactly the same CPU features as the
  100 + accelerated code */
  101 +}
  102 +
  103 +int kqemu_init(CPUState *env)
  104 +{
  105 + struct kqemu_init init;
  106 + int ret, version;
  107 +
  108 + if (!kqemu_allowed)
  109 + return -1;
  110 +
  111 + kqemu_fd = open(KQEMU_DEVICE, O_RDWR);
  112 + if (kqemu_fd < 0) {
  113 + fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE);
  114 + return -1;
  115 + }
  116 + version = 0;
  117 + ioctl(kqemu_fd, KQEMU_GET_VERSION, &version);
  118 + if (version != KQEMU_VERSION) {
  119 + fprintf(stderr, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
  120 + version, KQEMU_VERSION);
  121 + goto fail;
  122 + }
  123 +
  124 + pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
  125 + sizeof(unsigned long));
  126 + if (!pages_to_flush)
  127 + goto fail;
  128 +
  129 + init.ram_base = phys_ram_base;
  130 + init.ram_size = phys_ram_size;
  131 + init.ram_dirty = phys_ram_dirty;
  132 + init.phys_to_ram_map = l1_phys_map;
  133 + init.pages_to_flush = pages_to_flush;
  134 + ret = ioctl(kqemu_fd, KQEMU_INIT, &init);
  135 + if (ret < 0) {
  136 + fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret);
  137 + fail:
  138 + close(kqemu_fd);
  139 + kqemu_fd = -1;
  140 + return -1;
  141 + }
  142 + kqemu_update_cpuid(env);
  143 + env->kqemu_enabled = 1;
  144 + nb_pages_to_flush = 0;
  145 + return 0;
  146 +}
  147 +
  148 +void kqemu_flush_page(CPUState *env, target_ulong addr)
  149 +{
  150 +#ifdef DEBUG
  151 + if (loglevel & CPU_LOG_INT) {
  152 + fprintf(logfile, "kqemu_flush_page: addr=" TARGET_FMT_lx "\n", addr);
  153 + }
  154 +#endif
  155 + if (nb_pages_to_flush >= KQEMU_MAX_PAGES_TO_FLUSH)
  156 + nb_pages_to_flush = KQEMU_FLUSH_ALL;
  157 + else
  158 + pages_to_flush[nb_pages_to_flush++] = addr;
  159 +}
  160 +
  161 +void kqemu_flush(CPUState *env, int global)
  162 +{
  163 +#ifdef DEBUG
  164 + if (loglevel & CPU_LOG_INT) {
  165 + fprintf(logfile, "kqemu_flush:\n");
  166 + }
  167 +#endif
  168 + nb_pages_to_flush = KQEMU_FLUSH_ALL;
  169 +}
  170 +
  171 +struct fpstate {
  172 + uint16_t fpuc;
  173 + uint16_t dummy1;
  174 + uint16_t fpus;
  175 + uint16_t dummy2;
  176 + uint16_t fptag;
  177 + uint16_t dummy3;
  178 +
  179 + uint32_t fpip;
  180 + uint32_t fpcs;
  181 + uint32_t fpoo;
  182 + uint32_t fpos;
  183 + uint8_t fpregs1[8 * 10];
  184 +};
  185 +
  186 +struct fpxstate {
  187 + uint16_t fpuc;
  188 + uint16_t fpus;
  189 + uint16_t fptag;
  190 + uint16_t fop;
  191 + uint32_t fpuip;
  192 + uint16_t cs_sel;
  193 + uint16_t dummy0;
  194 + uint32_t fpudp;
  195 + uint16_t ds_sel;
  196 + uint16_t dummy1;
  197 + uint32_t mxcsr;
  198 + uint32_t mxcsr_mask;
  199 + uint8_t fpregs1[8 * 16];
  200 + uint8_t xmm_regs[8 * 16];
  201 + uint8_t dummy2[224];
  202 +};
  203 +
  204 +static struct fpxstate fpx1 __attribute__((aligned(16)));
  205 +
  206 +static void restore_native_fp_frstor(CPUState *env)
  207 +{
  208 + int fptag, i, j;
  209 + struct fpstate fp1, *fp = &fp1;
  210 +
  211 + fp->fpuc = env->fpuc;
  212 + fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
  213 + fptag = 0;
  214 + for (i=7; i>=0; i--) {
  215 + fptag <<= 2;
  216 + if (env->fptags[i]) {
  217 + fptag |= 3;
  218 + } else {
  219 + /* the FPU automatically computes it */
  220 + }
  221 + }
  222 + fp->fptag = fptag;
  223 + j = env->fpstt;
  224 + for(i = 0;i < 8; i++) {
  225 + memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
  226 + j = (j + 1) & 7;
  227 + }
  228 + asm volatile ("frstor %0" : "=m" (*fp));
  229 +}
  230 +
  231 +static void save_native_fp_fsave(CPUState *env)
  232 +{
  233 + int fptag, i, j;
  234 + uint16_t fpuc;
  235 + struct fpstate fp1, *fp = &fp1;
  236 +
  237 + asm volatile ("fsave %0" : : "m" (*fp));
  238 + env->fpuc = fp->fpuc;
  239 + env->fpstt = (fp->fpus >> 11) & 7;
  240 + env->fpus = fp->fpus & ~0x3800;
  241 + fptag = fp->fptag;
  242 + for(i = 0;i < 8; i++) {
  243 + env->fptags[i] = ((fptag & 3) == 3);
  244 + fptag >>= 2;
  245 + }
  246 + j = env->fpstt;
  247 + for(i = 0;i < 8; i++) {
  248 + memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
  249 + j = (j + 1) & 7;
  250 + }
  251 + /* we must restore the default rounding state */
  252 + fpuc = 0x037f | (env->fpuc & (3 << 10));
  253 + asm volatile("fldcw %0" : : "m" (fpuc));
  254 +}
  255 +
  256 +static void restore_native_fp_fxrstor(CPUState *env)
  257 +{
  258 + struct fpxstate *fp = &fpx1;
  259 + int i, j, fptag;
  260 +
  261 + fp->fpuc = env->fpuc;
  262 + fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
  263 + fptag = 0;
  264 + for(i = 0; i < 8; i++)
  265 + fptag |= (env->fptags[i] << i);
  266 + fp->fptag = fptag ^ 0xff;
  267 +
  268 + j = env->fpstt;
  269 + for(i = 0;i < 8; i++) {
  270 + memcpy(&fp->fpregs1[i * 16], &env->fpregs[j].d, 10);
  271 + j = (j + 1) & 7;
  272 + }
  273 + if (env->cpuid_features & CPUID_SSE) {
  274 + fp->mxcsr = env->mxcsr;
  275 + /* XXX: check if DAZ is not available */
  276 + fp->mxcsr_mask = 0xffff;
  277 + memcpy(fp->xmm_regs, env->xmm_regs, 8 * 16);
  278 + }
  279 + asm volatile ("fxrstor %0" : "=m" (*fp));
  280 +}
  281 +
  282 +static void save_native_fp_fxsave(CPUState *env)
  283 +{
  284 + struct fpxstate *fp = &fpx1;
  285 + int fptag, i, j;
  286 + uint16_t fpuc;
  287 +
  288 + asm volatile ("fxsave %0" : : "m" (*fp));
  289 + env->fpuc = fp->fpuc;
  290 + env->fpstt = (fp->fpus >> 11) & 7;
  291 + env->fpus = fp->fpus & ~0x3800;
  292 + fptag = fp->fptag ^ 0xff;
  293 + for(i = 0;i < 8; i++) {
  294 + env->fptags[i] = (fptag >> i) & 1;
  295 + }
  296 + j = env->fpstt;
  297 + for(i = 0;i < 8; i++) {
  298 + memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 16], 10);
  299 + j = (j + 1) & 7;
  300 + }
  301 + if (env->cpuid_features & CPUID_SSE) {
  302 + env->mxcsr = fp->mxcsr;
  303 + memcpy(env->xmm_regs, fp->xmm_regs, 8 * 16);
  304 + }
  305 +
  306 + /* we must restore the default rounding state */
  307 + asm volatile ("fninit");
  308 + fpuc = 0x037f | (env->fpuc & (3 << 10));
  309 + asm volatile("fldcw %0" : : "m" (fpuc));
  310 +}
  311 +
  312 +int kqemu_cpu_exec(CPUState *env)
  313 +{
  314 + struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state;
  315 + int ret;
  316 +
  317 +#ifdef DEBUG
  318 + if (loglevel & CPU_LOG_INT) {
  319 + fprintf(logfile, "kqemu: cpu_exec: enter\n");
  320 + cpu_dump_state(env, logfile, fprintf, 0);
  321 + }
  322 +#endif
  323 + memcpy(kenv->regs, env->regs, sizeof(kenv->regs));
  324 + kenv->eip = env->eip;
  325 + kenv->eflags = env->eflags;
  326 + memcpy(&kenv->segs, &env->segs, sizeof(env->segs));
  327 + memcpy(&kenv->ldt, &env->ldt, sizeof(env->ldt));
  328 + memcpy(&kenv->tr, &env->tr, sizeof(env->tr));
  329 + memcpy(&kenv->gdt, &env->gdt, sizeof(env->gdt));
  330 + memcpy(&kenv->idt, &env->idt, sizeof(env->idt));
  331 + kenv->cr0 = env->cr[0];
  332 + kenv->cr2 = env->cr[2];
  333 + kenv->cr3 = env->cr[3];
  334 + kenv->cr4 = env->cr[4];
  335 + kenv->a20_mask = env->a20_mask;
  336 + if (env->dr[7] & 0xff) {
  337 + kenv->dr7 = env->dr[7];
  338 + kenv->dr0 = env->dr[0];
  339 + kenv->dr1 = env->dr[1];
  340 + kenv->dr2 = env->dr[2];
  341 + kenv->dr3 = env->dr[3];
  342 + } else {
  343 + kenv->dr7 = 0;
  344 + }
  345 + kenv->dr6 = env->dr[6];
  346 + kenv->cpl = 3;
  347 + kenv->nb_pages_to_flush = nb_pages_to_flush;
  348 + nb_pages_to_flush = 0;
  349 +
  350 + if (!(kenv->cr0 & CR0_TS_MASK)) {
  351 + if (env->cpuid_features & CPUID_FXSR)
  352 + restore_native_fp_fxrstor(env);
  353 + else
  354 + restore_native_fp_frstor(env);
  355 + }
  356 +
  357 + ret = ioctl(kqemu_fd, KQEMU_EXEC, kenv);
  358 +
  359 + if (!(kenv->cr0 & CR0_TS_MASK)) {
  360 + if (env->cpuid_features & CPUID_FXSR)
  361 + save_native_fp_fxsave(env);
  362 + else
  363 + save_native_fp_fsave(env);
  364 + }
  365 +
  366 + memcpy(env->regs, kenv->regs, sizeof(env->regs));
  367 + env->eip = kenv->eip;
  368 + env->eflags = kenv->eflags;
  369 + memcpy(env->segs, kenv->segs, sizeof(env->segs));
  370 +#if 0
  371 + /* no need to restore that */
  372 + memcpy(env->ldt, kenv->ldt, sizeof(env->ldt));
  373 + memcpy(env->tr, kenv->tr, sizeof(env->tr));
  374 + memcpy(env->gdt, kenv->gdt, sizeof(env->gdt));
  375 + memcpy(env->idt, kenv->idt, sizeof(env->idt));
  376 + env->cr[0] = kenv->cr0;
  377 + env->cr[3] = kenv->cr3;
  378 + env->cr[4] = kenv->cr4;
  379 + env->a20_mask = kenv->a20_mask;
  380 +#endif
  381 + env->cr[2] = kenv->cr2;
  382 + env->dr[6] = kenv->dr6;
  383 +
  384 +#ifdef DEBUG
  385 + if (loglevel & CPU_LOG_INT) {
  386 + fprintf(logfile, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret);
  387 + }
  388 +#endif
  389 + if ((ret & 0xff00) == KQEMU_RET_INT) {
  390 + env->exception_index = ret & 0xff;
  391 + env->error_code = 0;
  392 + env->exception_is_int = 1;
  393 + env->exception_next_eip = kenv->next_eip;
  394 +#ifdef DEBUG
  395 + if (loglevel & CPU_LOG_INT) {
  396 + fprintf(logfile, "kqemu: interrupt v=%02x:\n",
  397 + env->exception_index);
  398 + cpu_dump_state(env, logfile, fprintf, 0);
  399 + }
  400 +#endif
  401 + return 1;
  402 + } else if ((ret & 0xff00) == KQEMU_RET_EXCEPTION) {
  403 + env->exception_index = ret & 0xff;
  404 + env->error_code = kenv->error_code;
  405 + env->exception_is_int = 0;
  406 + env->exception_next_eip = 0;
  407 +#ifdef DEBUG
  408 + if (loglevel & CPU_LOG_INT) {
  409 + fprintf(logfile, "kqemu: exception v=%02x e=%04x:\n",
  410 + env->exception_index, env->error_code);
  411 + cpu_dump_state(env, logfile, fprintf, 0);
  412 + }
  413 +#endif
  414 + return 1;
  415 + } else if (ret == KQEMU_RET_INTR) {
  416 + return 0;
  417 + } else if (ret == KQEMU_RET_SOFTMMU) {
  418 + return 2;
  419 + } else {
  420 + cpu_dump_state(env, stderr, fprintf, 0);
  421 + fprintf(stderr, "Unsupported return value: 0x%x\n", ret);
  422 + exit(1);
  423 + }
  424 + return 0;
  425 +}
  426 +
  427 +#endif
target-i386/cpu.h
@@ -39,6 +39,9 @@ @@ -39,6 +39,9 @@
39 #if defined(__i386__) && !defined(CONFIG_SOFTMMU) 39 #if defined(__i386__) && !defined(CONFIG_SOFTMMU)
40 #define USE_CODE_COPY 40 #define USE_CODE_COPY
41 #endif 41 #endif
  42 +#if defined(__linux__) && defined(CONFIG_SOFTMMU) && defined(__i386__) && !defined(TARGET_X86_64)
  43 +#define USE_KQEMU
  44 +#endif
42 45
43 #define R_EAX 0 46 #define R_EAX 0
44 #define R_ECX 1 47 #define R_ECX 1
@@ -248,6 +251,14 @@ @@ -248,6 +251,14 @@
248 #define CPUID_SSE (1 << 25) 251 #define CPUID_SSE (1 << 25)
249 #define CPUID_SSE2 (1 << 26) 252 #define CPUID_SSE2 (1 << 26)
250 253
  254 +#define CPUID_EXT_SS3 (1 << 0)
  255 +#define CPUID_EXT_MONITOR (1 << 3)
  256 +#define CPUID_EXT_CX16 (1 << 13)
  257 +
  258 +#define CPUID_EXT2_SYSCALL (1 << 11)
  259 +#define CPUID_EXT2_NX (1 << 20)
  260 +#define CPUID_EXT2_LM (1 << 29)
  261 +
251 #define EXCP00_DIVZ 0 262 #define EXCP00_DIVZ 0
252 #define EXCP01_SSTP 1 263 #define EXCP01_SSTP 1
253 #define EXCP02_NMI 2 264 #define EXCP02_NMI 2
@@ -408,6 +419,16 @@ typedef struct CPUX86State { @@ -408,6 +419,16 @@ typedef struct CPUX86State {
408 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ 419 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
409 uint32_t hflags; /* hidden flags, see HF_xxx constants */ 420 uint32_t hflags; /* hidden flags, see HF_xxx constants */
410 421
  422 + /* segments */
  423 + SegmentCache segs[6]; /* selector values */
  424 + SegmentCache ldt;
  425 + SegmentCache tr;
  426 + SegmentCache gdt; /* only base and limit are used */
  427 + SegmentCache idt; /* only base and limit are used */
  428 +
  429 + target_ulong cr[5]; /* NOTE: cr1 is unused */
  430 + uint32_t a20_mask;
  431 +
411 /* FPU state */ 432 /* FPU state */
412 unsigned int fpstt; /* top of stack index */ 433 unsigned int fpstt; /* top of stack index */
413 unsigned int fpus; 434 unsigned int fpus;
@@ -431,13 +452,6 @@ typedef struct CPUX86State { @@ -431,13 +452,6 @@ typedef struct CPUX86State {
431 int64_t i64; 452 int64_t i64;
432 } fp_convert; 453 } fp_convert;
433 454
434 - /* segments */  
435 - SegmentCache segs[6]; /* selector values */  
436 - SegmentCache ldt;  
437 - SegmentCache tr;  
438 - SegmentCache gdt; /* only base and limit are used */  
439 - SegmentCache idt; /* only base and limit are used */  
440 -  
441 uint32_t mxcsr; 455 uint32_t mxcsr;
442 XMMReg xmm_regs[CPU_NB_REGS]; 456 XMMReg xmm_regs[CPU_NB_REGS];
443 XMMReg xmm_t0; 457 XMMReg xmm_t0;
@@ -470,13 +484,10 @@ typedef struct CPUX86State { @@ -470,13 +484,10 @@ typedef struct CPUX86State {
470 int exception_is_int; 484 int exception_is_int;
471 target_ulong exception_next_eip; 485 target_ulong exception_next_eip;
472 struct TranslationBlock *current_tb; /* currently executing TB */ 486 struct TranslationBlock *current_tb; /* currently executing TB */
473 - target_ulong cr[5]; /* NOTE: cr1 is unused */  
474 target_ulong dr[8]; /* debug registers */ 487 target_ulong dr[8]; /* debug registers */
475 int interrupt_request; 488 int interrupt_request;
476 int user_mode_only; /* user mode only simulation */ 489 int user_mode_only; /* user mode only simulation */
477 490
478 - uint32_t a20_mask;  
479 -  
480 /* soft mmu support */ 491 /* soft mmu support */
481 /* in order to avoid passing too many arguments to the memory 492 /* in order to avoid passing too many arguments to the memory
482 write helpers, we store some rarely used information in the CPU 493 write helpers, we store some rarely used information in the CPU
@@ -501,7 +512,11 @@ typedef struct CPUX86State { @@ -501,7 +512,11 @@ typedef struct CPUX86State {
501 uint32_t cpuid_vendor3; 512 uint32_t cpuid_vendor3;
502 uint32_t cpuid_version; 513 uint32_t cpuid_version;
503 uint32_t cpuid_features; 514 uint32_t cpuid_features;
  515 + uint32_t cpuid_ext_features;
504 516
  517 +#ifdef USE_KQEMU
  518 + int kqemu_enabled;
  519 +#endif
505 /* in order to simplify APIC support, we leave this pointer to the 520 /* in order to simplify APIC support, we leave this pointer to the
506 user */ 521 user */
507 struct APICState *apic_state; 522 struct APICState *apic_state;
target-i386/helper.c
@@ -1274,7 +1274,7 @@ void helper_cpuid(void) @@ -1274,7 +1274,7 @@ void helper_cpuid(void)
1274 case 1: 1274 case 1:
1275 EAX = env->cpuid_version; 1275 EAX = env->cpuid_version;
1276 EBX = 0; 1276 EBX = 0;
1277 - ECX = 0; 1277 + ECX = env->cpuid_ext_features;
1278 EDX = env->cpuid_features; 1278 EDX = env->cpuid_features;
1279 break; 1279 break;
1280 default: 1280 default:
@@ -1828,6 +1828,12 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) @@ -1828,6 +1828,12 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
1828 ESP = (ESP & ~sp_mask) | (sp & sp_mask); 1828 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1829 EIP = offset; 1829 EIP = offset;
1830 } 1830 }
  1831 +#ifdef USE_KQEMU
  1832 + if (kqemu_is_ok(env)) {
  1833 + env->exception_index = -1;
  1834 + cpu_loop_exit();
  1835 + }
  1836 +#endif
1831 } 1837 }
1832 1838
1833 /* real and vm86 mode iret */ 1839 /* real and vm86 mode iret */
@@ -2097,11 +2103,25 @@ void helper_iret_protected(int shift, int next_eip) @@ -2097,11 +2103,25 @@ void helper_iret_protected(int shift, int next_eip)
2097 } else { 2103 } else {
2098 helper_ret_protected(shift, 1, 0); 2104 helper_ret_protected(shift, 1, 0);
2099 } 2105 }
  2106 +#ifdef USE_KQEMU
  2107 + if (kqemu_is_ok(env)) {
  2108 + CC_OP = CC_OP_EFLAGS;
  2109 + env->exception_index = -1;
  2110 + cpu_loop_exit();
  2111 + }
  2112 +#endif
2100 } 2113 }
2101 2114
2102 void helper_lret_protected(int shift, int addend) 2115 void helper_lret_protected(int shift, int addend)
2103 { 2116 {
2104 helper_ret_protected(shift, 0, addend); 2117 helper_ret_protected(shift, 0, addend);
  2118 +#ifdef USE_KQEMU
  2119 + if (kqemu_is_ok(env)) {
  2120 + CC_OP = CC_OP_EFLAGS;
  2121 + env->exception_index = -1;
  2122 + cpu_loop_exit();
  2123 + }
  2124 +#endif
2105 } 2125 }
2106 2126
2107 void helper_sysenter(void) 2127 void helper_sysenter(void)
@@ -2146,6 +2166,12 @@ void helper_sysexit(void) @@ -2146,6 +2166,12 @@ void helper_sysexit(void)
2146 DESC_W_MASK | DESC_A_MASK); 2166 DESC_W_MASK | DESC_A_MASK);
2147 ESP = ECX; 2167 ESP = ECX;
2148 EIP = EDX; 2168 EIP = EDX;
  2169 +#ifdef USE_KQEMU
  2170 + if (kqemu_is_ok(env)) {
  2171 + env->exception_index = -1;
  2172 + cpu_loop_exit();
  2173 + }
  2174 +#endif
2149 } 2175 }
2150 2176
2151 void helper_movl_crN_T0(int reg) 2177 void helper_movl_crN_T0(int reg)