Commit da260249a4109b1ac82016b27973c50f0a74311a
1 parent
da94d263
kqemu API change - allow use of kqemu with 32 bit QEMU on a 64 bit host
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4628 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
6 changed files
with
285 additions
and
135 deletions
configure
| @@ -1222,7 +1222,7 @@ case "$target_cpu" in | @@ -1222,7 +1222,7 @@ case "$target_cpu" in | ||
| 1222 | echo "TARGET_ARCH=i386" >> $config_mak | 1222 | echo "TARGET_ARCH=i386" >> $config_mak |
| 1223 | echo "#define TARGET_ARCH \"i386\"" >> $config_h | 1223 | echo "#define TARGET_ARCH \"i386\"" >> $config_h |
| 1224 | echo "#define TARGET_I386 1" >> $config_h | 1224 | echo "#define TARGET_I386 1" >> $config_h |
| 1225 | - if test $kqemu = "yes" -a "$target_softmmu" = "yes" -a $cpu = "i386" | 1225 | + if test $kqemu = "yes" -a "$target_softmmu" = "yes" |
| 1226 | then | 1226 | then |
| 1227 | echo "#define USE_KQEMU 1" >> $config_h | 1227 | echo "#define USE_KQEMU 1" >> $config_h |
| 1228 | fi | 1228 | fi |
exec-all.h
| @@ -563,15 +563,21 @@ static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr) | @@ -563,15 +563,21 @@ static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr) | ||
| 563 | #ifdef USE_KQEMU | 563 | #ifdef USE_KQEMU |
| 564 | #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG)) | 564 | #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG)) |
| 565 | 565 | ||
| 566 | +#define MSR_QPI_COMMBASE 0xfabe0010 | ||
| 567 | + | ||
| 566 | int kqemu_init(CPUState *env); | 568 | int kqemu_init(CPUState *env); |
| 567 | int kqemu_cpu_exec(CPUState *env); | 569 | int kqemu_cpu_exec(CPUState *env); |
| 568 | void kqemu_flush_page(CPUState *env, target_ulong addr); | 570 | void kqemu_flush_page(CPUState *env, target_ulong addr); |
| 569 | void kqemu_flush(CPUState *env, int global); | 571 | void kqemu_flush(CPUState *env, int global); |
| 570 | void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr); | 572 | void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr); |
| 571 | void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr); | 573 | void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr); |
| 574 | +void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size, | ||
| 575 | + ram_addr_t phys_offset); | ||
| 572 | void kqemu_cpu_interrupt(CPUState *env); | 576 | void kqemu_cpu_interrupt(CPUState *env); |
| 573 | void kqemu_record_dump(void); | 577 | void kqemu_record_dump(void); |
| 574 | 578 | ||
| 579 | +extern uint32_t kqemu_comm_base; | ||
| 580 | + | ||
| 575 | static inline int kqemu_is_ok(CPUState *env) | 581 | static inline int kqemu_is_ok(CPUState *env) |
| 576 | { | 582 | { |
| 577 | return(env->kqemu_enabled && | 583 | return(env->kqemu_enabled && |
exec.c
| @@ -2139,6 +2139,13 @@ void cpu_register_physical_memory(target_phys_addr_t start_addr, | @@ -2139,6 +2139,13 @@ void cpu_register_physical_memory(target_phys_addr_t start_addr, | ||
| 2139 | ram_addr_t orig_size = size; | 2139 | ram_addr_t orig_size = size; |
| 2140 | void *subpage; | 2140 | void *subpage; |
| 2141 | 2141 | ||
| 2142 | +#ifdef USE_KQEMU | ||
| 2143 | + /* XXX: should not depend on cpu context */ | ||
| 2144 | + env = first_cpu; | ||
| 2145 | + if (env->kqemu_enabled) { | ||
| 2146 | + kqemu_set_phys_mem(start_addr, size, phys_offset); | ||
| 2147 | + } | ||
| 2148 | +#endif | ||
| 2142 | size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; | 2149 | size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; |
| 2143 | end_addr = start_addr + (target_phys_addr_t)size; | 2150 | end_addr = start_addr + (target_phys_addr_t)size; |
| 2144 | for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { | 2151 | for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { |
kqemu.c
| 1 | /* | 1 | /* |
| 2 | * KQEMU support | 2 | * KQEMU support |
| 3 | * | 3 | * |
| 4 | - * Copyright (c) 2005 Fabrice Bellard | 4 | + * Copyright (c) 2005-2008 Fabrice Bellard |
| 5 | * | 5 | * |
| 6 | * This library is free software; you can redistribute it and/or | 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public | 7 | * modify it under the terms of the GNU Lesser General Public |
| @@ -51,24 +51,14 @@ | @@ -51,24 +51,14 @@ | ||
| 51 | #include <fcntl.h> | 51 | #include <fcntl.h> |
| 52 | #include "kqemu.h" | 52 | #include "kqemu.h" |
| 53 | 53 | ||
| 54 | -/* compatibility stuff */ | ||
| 55 | -#ifndef KQEMU_RET_SYSCALL | ||
| 56 | -#define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */ | ||
| 57 | -#endif | ||
| 58 | -#ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE | ||
| 59 | -#define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512 | ||
| 60 | -#define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1) | ||
| 61 | -#endif | ||
| 62 | -#ifndef KQEMU_MAX_MODIFIED_RAM_PAGES | ||
| 63 | -#define KQEMU_MAX_MODIFIED_RAM_PAGES 512 | ||
| 64 | -#endif | ||
| 65 | - | ||
| 66 | #ifdef _WIN32 | 54 | #ifdef _WIN32 |
| 67 | #define KQEMU_DEVICE "\\\\.\\kqemu" | 55 | #define KQEMU_DEVICE "\\\\.\\kqemu" |
| 68 | #else | 56 | #else |
| 69 | #define KQEMU_DEVICE "/dev/kqemu" | 57 | #define KQEMU_DEVICE "/dev/kqemu" |
| 70 | #endif | 58 | #endif |
| 71 | 59 | ||
| 60 | +static void qpi_init(void); | ||
| 61 | + | ||
| 72 | #ifdef _WIN32 | 62 | #ifdef _WIN32 |
| 73 | #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE | 63 | #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE |
| 74 | HANDLE kqemu_fd = KQEMU_INVALID_FD; | 64 | HANDLE kqemu_fd = KQEMU_INVALID_FD; |
| @@ -84,14 +74,15 @@ int kqemu_fd = KQEMU_INVALID_FD; | @@ -84,14 +74,15 @@ int kqemu_fd = KQEMU_INVALID_FD; | ||
| 84 | 2 = kernel kqemu | 74 | 2 = kernel kqemu |
| 85 | */ | 75 | */ |
| 86 | int kqemu_allowed = 1; | 76 | int kqemu_allowed = 1; |
| 87 | -unsigned long *pages_to_flush; | 77 | +uint64_t *pages_to_flush; |
| 88 | unsigned int nb_pages_to_flush; | 78 | unsigned int nb_pages_to_flush; |
| 89 | -unsigned long *ram_pages_to_update; | 79 | +uint64_t *ram_pages_to_update; |
| 90 | unsigned int nb_ram_pages_to_update; | 80 | unsigned int nb_ram_pages_to_update; |
| 91 | -unsigned long *modified_ram_pages; | 81 | +uint64_t *modified_ram_pages; |
| 92 | unsigned int nb_modified_ram_pages; | 82 | unsigned int nb_modified_ram_pages; |
| 93 | uint8_t *modified_ram_pages_table; | 83 | uint8_t *modified_ram_pages_table; |
| 94 | -extern uint32_t **l1_phys_map; | 84 | +int qpi_io_memory; |
| 85 | +uint32_t kqemu_comm_base; /* physical address of the QPI communication page */ | ||
| 95 | 86 | ||
| 96 | #define cpuid(index, eax, ebx, ecx, edx) \ | 87 | #define cpuid(index, eax, ebx, ecx, edx) \ |
| 97 | asm volatile ("cpuid" \ | 88 | asm volatile ("cpuid" \ |
| @@ -161,7 +152,7 @@ static void kqemu_update_cpuid(CPUState *env) | @@ -161,7 +152,7 @@ static void kqemu_update_cpuid(CPUState *env) | ||
| 161 | 152 | ||
| 162 | int kqemu_init(CPUState *env) | 153 | int kqemu_init(CPUState *env) |
| 163 | { | 154 | { |
| 164 | - struct kqemu_init init; | 155 | + struct kqemu_init kinit; |
| 165 | int ret, version; | 156 | int ret, version; |
| 166 | #ifdef _WIN32 | 157 | #ifdef _WIN32 |
| 167 | DWORD temp; | 158 | DWORD temp; |
| @@ -197,39 +188,35 @@ int kqemu_init(CPUState *env) | @@ -197,39 +188,35 @@ int kqemu_init(CPUState *env) | ||
| 197 | } | 188 | } |
| 198 | 189 | ||
| 199 | pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH * | 190 | pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH * |
| 200 | - sizeof(unsigned long)); | 191 | + sizeof(uint64_t)); |
| 201 | if (!pages_to_flush) | 192 | if (!pages_to_flush) |
| 202 | goto fail; | 193 | goto fail; |
| 203 | 194 | ||
| 204 | ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE * | 195 | ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE * |
| 205 | - sizeof(unsigned long)); | 196 | + sizeof(uint64_t)); |
| 206 | if (!ram_pages_to_update) | 197 | if (!ram_pages_to_update) |
| 207 | goto fail; | 198 | goto fail; |
| 208 | 199 | ||
| 209 | modified_ram_pages = qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES * | 200 | modified_ram_pages = qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES * |
| 210 | - sizeof(unsigned long)); | 201 | + sizeof(uint64_t)); |
| 211 | if (!modified_ram_pages) | 202 | if (!modified_ram_pages) |
| 212 | goto fail; | 203 | goto fail; |
| 213 | modified_ram_pages_table = qemu_mallocz(phys_ram_size >> TARGET_PAGE_BITS); | 204 | modified_ram_pages_table = qemu_mallocz(phys_ram_size >> TARGET_PAGE_BITS); |
| 214 | if (!modified_ram_pages_table) | 205 | if (!modified_ram_pages_table) |
| 215 | goto fail; | 206 | goto fail; |
| 216 | 207 | ||
| 217 | - init.ram_base = phys_ram_base; | ||
| 218 | - init.ram_size = phys_ram_size; | ||
| 219 | - init.ram_dirty = phys_ram_dirty; | ||
| 220 | - init.phys_to_ram_map = l1_phys_map; | ||
| 221 | - init.pages_to_flush = pages_to_flush; | ||
| 222 | -#if KQEMU_VERSION >= 0x010200 | ||
| 223 | - init.ram_pages_to_update = ram_pages_to_update; | ||
| 224 | -#endif | ||
| 225 | -#if KQEMU_VERSION >= 0x010300 | ||
| 226 | - init.modified_ram_pages = modified_ram_pages; | ||
| 227 | -#endif | 208 | + memset(&kinit, 0, sizeof(kinit)); /* set the paddings to zero */ |
| 209 | + kinit.ram_base = phys_ram_base; | ||
| 210 | + kinit.ram_size = phys_ram_size; | ||
| 211 | + kinit.ram_dirty = phys_ram_dirty; | ||
| 212 | + kinit.pages_to_flush = pages_to_flush; | ||
| 213 | + kinit.ram_pages_to_update = ram_pages_to_update; | ||
| 214 | + kinit.modified_ram_pages = modified_ram_pages; | ||
| 228 | #ifdef _WIN32 | 215 | #ifdef _WIN32 |
| 229 | - ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &init, sizeof(init), | 216 | + ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &kinit, sizeof(kinit), |
| 230 | NULL, 0, &temp, NULL) == TRUE ? 0 : -1; | 217 | NULL, 0, &temp, NULL) == TRUE ? 0 : -1; |
| 231 | #else | 218 | #else |
| 232 | - ret = ioctl(kqemu_fd, KQEMU_INIT, &init); | 219 | + ret = ioctl(kqemu_fd, KQEMU_INIT, &kinit); |
| 233 | #endif | 220 | #endif |
| 234 | if (ret < 0) { | 221 | if (ret < 0) { |
| 235 | fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret); | 222 | fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret); |
| @@ -242,6 +229,8 @@ int kqemu_init(CPUState *env) | @@ -242,6 +229,8 @@ int kqemu_init(CPUState *env) | ||
| 242 | env->kqemu_enabled = kqemu_allowed; | 229 | env->kqemu_enabled = kqemu_allowed; |
| 243 | nb_pages_to_flush = 0; | 230 | nb_pages_to_flush = 0; |
| 244 | nb_ram_pages_to_update = 0; | 231 | nb_ram_pages_to_update = 0; |
| 232 | + | ||
| 233 | + qpi_init(); | ||
| 245 | return 0; | 234 | return 0; |
| 246 | } | 235 | } |
| 247 | 236 | ||
| @@ -272,7 +261,8 @@ void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr) | @@ -272,7 +261,8 @@ void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr) | ||
| 272 | { | 261 | { |
| 273 | #ifdef DEBUG | 262 | #ifdef DEBUG |
| 274 | if (loglevel & CPU_LOG_INT) { | 263 | if (loglevel & CPU_LOG_INT) { |
| 275 | - fprintf(logfile, "kqemu_set_notdirty: addr=%08lx\n", ram_addr); | 264 | + fprintf(logfile, "kqemu_set_notdirty: addr=%08lx\n", |
| 265 | + (unsigned long)ram_addr); | ||
| 276 | } | 266 | } |
| 277 | #endif | 267 | #endif |
| 278 | /* we only track transitions to dirty state */ | 268 | /* we only track transitions to dirty state */ |
| @@ -327,6 +317,51 @@ void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr) | @@ -327,6 +317,51 @@ void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr) | ||
| 327 | } | 317 | } |
| 328 | } | 318 | } |
| 329 | 319 | ||
| 320 | +void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size, | ||
| 321 | + ram_addr_t phys_offset) | ||
| 322 | +{ | ||
| 323 | + struct kqemu_phys_mem kphys_mem1, *kphys_mem = &kphys_mem1; | ||
| 324 | + uint64_t end; | ||
| 325 | + int ret, io_index; | ||
| 326 | + | ||
| 327 | + end = (start_addr + size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; | ||
| 328 | + start_addr &= TARGET_PAGE_MASK; | ||
| 329 | + kphys_mem->phys_addr = start_addr; | ||
| 330 | + kphys_mem->size = end - start_addr; | ||
| 331 | + kphys_mem->ram_addr = phys_offset & TARGET_PAGE_MASK; | ||
| 332 | + io_index = phys_offset & ~TARGET_PAGE_MASK; | ||
| 333 | + switch(io_index) { | ||
| 334 | + case IO_MEM_RAM: | ||
| 335 | + kphys_mem->io_index = KQEMU_IO_MEM_RAM; | ||
| 336 | + break; | ||
| 337 | + case IO_MEM_ROM: | ||
| 338 | + kphys_mem->io_index = KQEMU_IO_MEM_ROM; | ||
| 339 | + break; | ||
| 340 | + default: | ||
| 341 | + if (qpi_io_memory == io_index) { | ||
| 342 | + kphys_mem->io_index = KQEMU_IO_MEM_COMM; | ||
| 343 | + } else { | ||
| 344 | + kphys_mem->io_index = KQEMU_IO_MEM_UNASSIGNED; | ||
| 345 | + } | ||
| 346 | + break; | ||
| 347 | + } | ||
| 348 | +#ifdef _WIN32 | ||
| 349 | + { | ||
| 350 | + DWORD temp; | ||
| 351 | + ret = DeviceIoControl(kqemu_fd, KQEMU_SET_PHYS_MEM, | ||
| 352 | + kphys_mem, sizeof(*kphys_mem), | ||
| 353 | + NULL, 0, &temp, NULL) == TRUE ? 0 : -1; | ||
| 354 | + } | ||
| 355 | +#else | ||
| 356 | + ret = ioctl(kqemu_fd, KQEMU_SET_PHYS_MEM, kphys_mem); | ||
| 357 | +#endif | ||
| 358 | + if (ret < 0) { | ||
| 359 | + fprintf(stderr, "kqemu: KQEMU_SET_PHYS_PAGE error=%d: start_addr=0x%016" PRIx64 " size=0x%08lx phys_offset=0x%08lx\n", | ||
| 360 | + ret, start_addr, | ||
| 361 | + (unsigned long)size, (unsigned long)phys_offset); | ||
| 362 | + } | ||
| 363 | +} | ||
| 364 | + | ||
| 330 | struct fpstate { | 365 | struct fpstate { |
| 331 | uint16_t fpuc; | 366 | uint16_t fpuc; |
| 332 | uint16_t dummy1; | 367 | uint16_t dummy1; |
| @@ -474,7 +509,7 @@ static int do_syscall(CPUState *env, | @@ -474,7 +509,7 @@ static int do_syscall(CPUState *env, | ||
| 474 | int selector; | 509 | int selector; |
| 475 | 510 | ||
| 476 | selector = (env->star >> 32) & 0xffff; | 511 | selector = (env->star >> 32) & 0xffff; |
| 477 | -#ifdef __x86_64__ | 512 | +#ifdef TARGET_X86_64 |
| 478 | if (env->hflags & HF_LMA_MASK) { | 513 | if (env->hflags & HF_LMA_MASK) { |
| 479 | int code64; | 514 | int code64; |
| 480 | 515 | ||
| @@ -631,6 +666,24 @@ void kqemu_record_dump(void) | @@ -631,6 +666,24 @@ void kqemu_record_dump(void) | ||
| 631 | } | 666 | } |
| 632 | #endif | 667 | #endif |
| 633 | 668 | ||
| 669 | +static inline void kqemu_load_seg(struct kqemu_segment_cache *ksc, | ||
| 670 | + const SegmentCache *sc) | ||
| 671 | +{ | ||
| 672 | + ksc->selector = sc->selector; | ||
| 673 | + ksc->flags = sc->flags; | ||
| 674 | + ksc->limit = sc->limit; | ||
| 675 | + ksc->base = sc->base; | ||
| 676 | +} | ||
| 677 | + | ||
| 678 | +static inline void kqemu_save_seg(SegmentCache *sc, | ||
| 679 | + const struct kqemu_segment_cache *ksc) | ||
| 680 | +{ | ||
| 681 | + sc->selector = ksc->selector; | ||
| 682 | + sc->flags = ksc->flags; | ||
| 683 | + sc->limit = ksc->limit; | ||
| 684 | + sc->base = ksc->base; | ||
| 685 | +} | ||
| 686 | + | ||
| 634 | int kqemu_cpu_exec(CPUState *env) | 687 | int kqemu_cpu_exec(CPUState *env) |
| 635 | { | 688 | { |
| 636 | struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state; | 689 | struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state; |
| @@ -638,7 +691,6 @@ int kqemu_cpu_exec(CPUState *env) | @@ -638,7 +691,6 @@ int kqemu_cpu_exec(CPUState *env) | ||
| 638 | #ifdef CONFIG_PROFILER | 691 | #ifdef CONFIG_PROFILER |
| 639 | int64_t ti; | 692 | int64_t ti; |
| 640 | #endif | 693 | #endif |
| 641 | - | ||
| 642 | #ifdef _WIN32 | 694 | #ifdef _WIN32 |
| 643 | DWORD temp; | 695 | DWORD temp; |
| 644 | #endif | 696 | #endif |
| @@ -652,35 +704,33 @@ int kqemu_cpu_exec(CPUState *env) | @@ -652,35 +704,33 @@ int kqemu_cpu_exec(CPUState *env) | ||
| 652 | cpu_dump_state(env, logfile, fprintf, 0); | 704 | cpu_dump_state(env, logfile, fprintf, 0); |
| 653 | } | 705 | } |
| 654 | #endif | 706 | #endif |
| 655 | - memcpy(kenv->regs, env->regs, sizeof(kenv->regs)); | 707 | + for(i = 0; i < CPU_NB_REGS; i++) |
| 708 | + kenv->regs[i] = env->regs[i]; | ||
| 656 | kenv->eip = env->eip; | 709 | kenv->eip = env->eip; |
| 657 | kenv->eflags = env->eflags; | 710 | kenv->eflags = env->eflags; |
| 658 | - memcpy(&kenv->segs, &env->segs, sizeof(env->segs)); | ||
| 659 | - memcpy(&kenv->ldt, &env->ldt, sizeof(env->ldt)); | ||
| 660 | - memcpy(&kenv->tr, &env->tr, sizeof(env->tr)); | ||
| 661 | - memcpy(&kenv->gdt, &env->gdt, sizeof(env->gdt)); | ||
| 662 | - memcpy(&kenv->idt, &env->idt, sizeof(env->idt)); | 711 | + for(i = 0; i < 6; i++) |
| 712 | + kqemu_load_seg(&kenv->segs[i], &env->segs[i]); | ||
| 713 | + kqemu_load_seg(&kenv->ldt, &env->ldt); | ||
| 714 | + kqemu_load_seg(&kenv->tr, &env->tr); | ||
| 715 | + kqemu_load_seg(&kenv->gdt, &env->gdt); | ||
| 716 | + kqemu_load_seg(&kenv->idt, &env->idt); | ||
| 663 | kenv->cr0 = env->cr[0]; | 717 | kenv->cr0 = env->cr[0]; |
| 664 | kenv->cr2 = env->cr[2]; | 718 | kenv->cr2 = env->cr[2]; |
| 665 | kenv->cr3 = env->cr[3]; | 719 | kenv->cr3 = env->cr[3]; |
| 666 | kenv->cr4 = env->cr[4]; | 720 | kenv->cr4 = env->cr[4]; |
| 667 | kenv->a20_mask = env->a20_mask; | 721 | kenv->a20_mask = env->a20_mask; |
| 668 | -#if KQEMU_VERSION >= 0x010100 | ||
| 669 | kenv->efer = env->efer; | 722 | kenv->efer = env->efer; |
| 670 | -#endif | ||
| 671 | -#if KQEMU_VERSION >= 0x010300 | ||
| 672 | kenv->tsc_offset = 0; | 723 | kenv->tsc_offset = 0; |
| 673 | kenv->star = env->star; | 724 | kenv->star = env->star; |
| 674 | kenv->sysenter_cs = env->sysenter_cs; | 725 | kenv->sysenter_cs = env->sysenter_cs; |
| 675 | kenv->sysenter_esp = env->sysenter_esp; | 726 | kenv->sysenter_esp = env->sysenter_esp; |
| 676 | kenv->sysenter_eip = env->sysenter_eip; | 727 | kenv->sysenter_eip = env->sysenter_eip; |
| 677 | -#ifdef __x86_64__ | 728 | +#ifdef TARGET_X86_64 |
| 678 | kenv->lstar = env->lstar; | 729 | kenv->lstar = env->lstar; |
| 679 | kenv->cstar = env->cstar; | 730 | kenv->cstar = env->cstar; |
| 680 | kenv->fmask = env->fmask; | 731 | kenv->fmask = env->fmask; |
| 681 | kenv->kernelgsbase = env->kernelgsbase; | 732 | kenv->kernelgsbase = env->kernelgsbase; |
| 682 | #endif | 733 | #endif |
| 683 | -#endif | ||
| 684 | if (env->dr[7] & 0xff) { | 734 | if (env->dr[7] & 0xff) { |
| 685 | kenv->dr7 = env->dr[7]; | 735 | kenv->dr7 = env->dr[7]; |
| 686 | kenv->dr0 = env->dr[0]; | 736 | kenv->dr0 = env->dr[0]; |
| @@ -694,15 +744,11 @@ int kqemu_cpu_exec(CPUState *env) | @@ -694,15 +744,11 @@ int kqemu_cpu_exec(CPUState *env) | ||
| 694 | cpl = (env->hflags & HF_CPL_MASK); | 744 | cpl = (env->hflags & HF_CPL_MASK); |
| 695 | kenv->cpl = cpl; | 745 | kenv->cpl = cpl; |
| 696 | kenv->nb_pages_to_flush = nb_pages_to_flush; | 746 | kenv->nb_pages_to_flush = nb_pages_to_flush; |
| 697 | -#if KQEMU_VERSION >= 0x010200 | ||
| 698 | kenv->user_only = (env->kqemu_enabled == 1); | 747 | kenv->user_only = (env->kqemu_enabled == 1); |
| 699 | kenv->nb_ram_pages_to_update = nb_ram_pages_to_update; | 748 | kenv->nb_ram_pages_to_update = nb_ram_pages_to_update; |
| 700 | -#endif | ||
| 701 | nb_ram_pages_to_update = 0; | 749 | nb_ram_pages_to_update = 0; |
| 702 | - | ||
| 703 | -#if KQEMU_VERSION >= 0x010300 | ||
| 704 | kenv->nb_modified_ram_pages = nb_modified_ram_pages; | 750 | kenv->nb_modified_ram_pages = nb_modified_ram_pages; |
| 705 | -#endif | 751 | + |
| 706 | kqemu_reset_modified_ram_pages(); | 752 | kqemu_reset_modified_ram_pages(); |
| 707 | 753 | ||
| 708 | if (env->cpuid_features & CPUID_FXSR) | 754 | if (env->cpuid_features & CPUID_FXSR) |
| @@ -720,41 +766,30 @@ int kqemu_cpu_exec(CPUState *env) | @@ -720,41 +766,30 @@ int kqemu_cpu_exec(CPUState *env) | ||
| 720 | ret = -1; | 766 | ret = -1; |
| 721 | } | 767 | } |
| 722 | #else | 768 | #else |
| 723 | -#if KQEMU_VERSION >= 0x010100 | ||
| 724 | ioctl(kqemu_fd, KQEMU_EXEC, kenv); | 769 | ioctl(kqemu_fd, KQEMU_EXEC, kenv); |
| 725 | ret = kenv->retval; | 770 | ret = kenv->retval; |
| 726 | -#else | ||
| 727 | - ret = ioctl(kqemu_fd, KQEMU_EXEC, kenv); | ||
| 728 | -#endif | ||
| 729 | #endif | 771 | #endif |
| 730 | if (env->cpuid_features & CPUID_FXSR) | 772 | if (env->cpuid_features & CPUID_FXSR) |
| 731 | save_native_fp_fxsave(env); | 773 | save_native_fp_fxsave(env); |
| 732 | else | 774 | else |
| 733 | save_native_fp_fsave(env); | 775 | save_native_fp_fsave(env); |
| 734 | 776 | ||
| 735 | - memcpy(env->regs, kenv->regs, sizeof(env->regs)); | 777 | + for(i = 0; i < CPU_NB_REGS; i++) |
| 778 | + env->regs[i] = kenv->regs[i]; | ||
| 736 | env->eip = kenv->eip; | 779 | env->eip = kenv->eip; |
| 737 | env->eflags = kenv->eflags; | 780 | env->eflags = kenv->eflags; |
| 738 | - memcpy(env->segs, kenv->segs, sizeof(env->segs)); | 781 | + for(i = 0; i < 6; i++) |
| 782 | + kqemu_save_seg(&env->segs[i], &kenv->segs[i]); | ||
| 739 | cpu_x86_set_cpl(env, kenv->cpl); | 783 | cpu_x86_set_cpl(env, kenv->cpl); |
| 740 | - memcpy(&env->ldt, &kenv->ldt, sizeof(env->ldt)); | ||
| 741 | -#if 0 | ||
| 742 | - /* no need to restore that */ | ||
| 743 | - memcpy(env->tr, kenv->tr, sizeof(env->tr)); | ||
| 744 | - memcpy(env->gdt, kenv->gdt, sizeof(env->gdt)); | ||
| 745 | - memcpy(env->idt, kenv->idt, sizeof(env->idt)); | ||
| 746 | - env->a20_mask = kenv->a20_mask; | ||
| 747 | -#endif | 784 | + kqemu_save_seg(&env->ldt, &kenv->ldt); |
| 748 | env->cr[0] = kenv->cr0; | 785 | env->cr[0] = kenv->cr0; |
| 749 | env->cr[4] = kenv->cr4; | 786 | env->cr[4] = kenv->cr4; |
| 750 | env->cr[3] = kenv->cr3; | 787 | env->cr[3] = kenv->cr3; |
| 751 | env->cr[2] = kenv->cr2; | 788 | env->cr[2] = kenv->cr2; |
| 752 | env->dr[6] = kenv->dr6; | 789 | env->dr[6] = kenv->dr6; |
| 753 | -#if KQEMU_VERSION >= 0x010300 | ||
| 754 | -#ifdef __x86_64__ | 790 | +#ifdef TARGET_X86_64 |
| 755 | env->kernelgsbase = kenv->kernelgsbase; | 791 | env->kernelgsbase = kenv->kernelgsbase; |
| 756 | #endif | 792 | #endif |
| 757 | -#endif | ||
| 758 | 793 | ||
| 759 | /* flush pages as indicated by kqemu */ | 794 | /* flush pages as indicated by kqemu */ |
| 760 | if (kenv->nb_pages_to_flush >= KQEMU_FLUSH_ALL) { | 795 | if (kenv->nb_pages_to_flush >= KQEMU_FLUSH_ALL) { |
| @@ -771,13 +806,10 @@ int kqemu_cpu_exec(CPUState *env) | @@ -771,13 +806,10 @@ int kqemu_cpu_exec(CPUState *env) | ||
| 771 | kqemu_exec_count++; | 806 | kqemu_exec_count++; |
| 772 | #endif | 807 | #endif |
| 773 | 808 | ||
| 774 | -#if KQEMU_VERSION >= 0x010200 | ||
| 775 | if (kenv->nb_ram_pages_to_update > 0) { | 809 | if (kenv->nb_ram_pages_to_update > 0) { |
| 776 | cpu_tlb_update_dirty(env); | 810 | cpu_tlb_update_dirty(env); |
| 777 | } | 811 | } |
| 778 | -#endif | ||
| 779 | 812 | ||
| 780 | -#if KQEMU_VERSION >= 0x010300 | ||
| 781 | if (kenv->nb_modified_ram_pages > 0) { | 813 | if (kenv->nb_modified_ram_pages > 0) { |
| 782 | for(i = 0; i < kenv->nb_modified_ram_pages; i++) { | 814 | for(i = 0; i < kenv->nb_modified_ram_pages; i++) { |
| 783 | unsigned long addr; | 815 | unsigned long addr; |
| @@ -785,7 +817,6 @@ int kqemu_cpu_exec(CPUState *env) | @@ -785,7 +817,6 @@ int kqemu_cpu_exec(CPUState *env) | ||
| 785 | tb_invalidate_phys_page_range(addr, addr + TARGET_PAGE_SIZE, 0); | 817 | tb_invalidate_phys_page_range(addr, addr + TARGET_PAGE_SIZE, 0); |
| 786 | } | 818 | } |
| 787 | } | 819 | } |
| 788 | -#endif | ||
| 789 | 820 | ||
| 790 | /* restore the hidden flags */ | 821 | /* restore the hidden flags */ |
| 791 | { | 822 | { |
| @@ -905,11 +936,85 @@ int kqemu_cpu_exec(CPUState *env) | @@ -905,11 +936,85 @@ int kqemu_cpu_exec(CPUState *env) | ||
| 905 | 936 | ||
| 906 | void kqemu_cpu_interrupt(CPUState *env) | 937 | void kqemu_cpu_interrupt(CPUState *env) |
| 907 | { | 938 | { |
| 908 | -#if defined(_WIN32) && KQEMU_VERSION >= 0x010101 | 939 | +#if defined(_WIN32) |
| 909 | /* cancelling the I/O request causes KQEMU to finish executing the | 940 | /* cancelling the I/O request causes KQEMU to finish executing the |
| 910 | current block and successfully returning. */ | 941 | current block and successfully returning. */ |
| 911 | CancelIo(kqemu_fd); | 942 | CancelIo(kqemu_fd); |
| 912 | #endif | 943 | #endif |
| 913 | } | 944 | } |
| 914 | 945 | ||
| 946 | +/* | ||
| 947 | + QEMU paravirtualization interface. The current interface only | ||
| 948 | + allows to modify the IF and IOPL flags when running in | ||
| 949 | + kqemu. | ||
| 950 | + | ||
| 951 | + At this point it is not very satisfactory. I leave it for reference | ||
| 952 | + as it adds little complexity. | ||
| 953 | +*/ | ||
| 954 | + | ||
| 955 | +#define QPI_COMM_PAGE_PHYS_ADDR 0xff000000 | ||
| 956 | + | ||
| 957 | +static uint32_t qpi_mem_readb(void *opaque, target_phys_addr_t addr) | ||
| 958 | +{ | ||
| 959 | + return 0; | ||
| 960 | +} | ||
| 961 | + | ||
| 962 | +static uint32_t qpi_mem_readw(void *opaque, target_phys_addr_t addr) | ||
| 963 | +{ | ||
| 964 | + return 0; | ||
| 965 | +} | ||
| 966 | + | ||
| 967 | +static void qpi_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) | ||
| 968 | +{ | ||
| 969 | +} | ||
| 970 | + | ||
| 971 | +static void qpi_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) | ||
| 972 | +{ | ||
| 973 | +} | ||
| 974 | + | ||
| 975 | +static uint32_t qpi_mem_readl(void *opaque, target_phys_addr_t addr) | ||
| 976 | +{ | ||
| 977 | + CPUState *env; | ||
| 978 | + | ||
| 979 | + env = cpu_single_env; | ||
| 980 | + if (!env) | ||
| 981 | + return 0; | ||
| 982 | + return env->eflags & (IF_MASK | IOPL_MASK); | ||
| 983 | +} | ||
| 984 | + | ||
| 985 | +/* Note: after writing to this address, the guest code must make sure | ||
| 986 | + it is exiting the current TB. pushf/popf can be used for that | ||
| 987 | + purpose. */ | ||
| 988 | +static void qpi_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) | ||
| 989 | +{ | ||
| 990 | + CPUState *env; | ||
| 991 | + | ||
| 992 | + env = cpu_single_env; | ||
| 993 | + if (!env) | ||
| 994 | + return; | ||
| 995 | + env->eflags = (env->eflags & ~(IF_MASK | IOPL_MASK)) | | ||
| 996 | + (val & (IF_MASK | IOPL_MASK)); | ||
| 997 | +} | ||
| 998 | + | ||
| 999 | +static CPUReadMemoryFunc *qpi_mem_read[3] = { | ||
| 1000 | + qpi_mem_readb, | ||
| 1001 | + qpi_mem_readw, | ||
| 1002 | + qpi_mem_readl, | ||
| 1003 | +}; | ||
| 1004 | + | ||
| 1005 | +static CPUWriteMemoryFunc *qpi_mem_write[3] = { | ||
| 1006 | + qpi_mem_writeb, | ||
| 1007 | + qpi_mem_writew, | ||
| 1008 | + qpi_mem_writel, | ||
| 1009 | +}; | ||
| 1010 | + | ||
| 1011 | +static void qpi_init(void) | ||
| 1012 | +{ | ||
| 1013 | + kqemu_comm_base = 0xff000000 | 1; | ||
| 1014 | + qpi_io_memory = cpu_register_io_memory(0, | ||
| 1015 | + qpi_mem_read, | ||
| 1016 | + qpi_mem_write, NULL); | ||
| 1017 | + cpu_register_physical_memory(kqemu_comm_base & ~0xfff, | ||
| 1018 | + 0x1000, qpi_io_memory); | ||
| 1019 | +} | ||
| 915 | #endif | 1020 | #endif |
kqemu.h
| 1 | /* | 1 | /* |
| 2 | * KQEMU header | 2 | * KQEMU header |
| 3 | - * | ||
| 4 | - * Copyright (c) 2004-2006 Fabrice Bellard | ||
| 5 | - * | 3 | + * |
| 4 | + * Copyright (c) 2004-2008 Fabrice Bellard | ||
| 5 | + * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to deal | 7 | * of this software and associated documentation files (the "Software"), to deal |
| 8 | * in the Software without restriction, including without limitation the rights | 8 | * in the Software without restriction, including without limitation the rights |
| @@ -24,25 +24,27 @@ | @@ -24,25 +24,27 @@ | ||
| 24 | #ifndef KQEMU_H | 24 | #ifndef KQEMU_H |
| 25 | #define KQEMU_H | 25 | #define KQEMU_H |
| 26 | 26 | ||
| 27 | -#define KQEMU_VERSION 0x010300 | 27 | +#if defined(__i386__) |
| 28 | +#define KQEMU_PAD32(x) x | ||
| 29 | +#else | ||
| 30 | +#define KQEMU_PAD32(x) | ||
| 31 | +#endif | ||
| 32 | + | ||
| 33 | +#define KQEMU_VERSION 0x010400 | ||
| 28 | 34 | ||
| 29 | struct kqemu_segment_cache { | 35 | struct kqemu_segment_cache { |
| 30 | - uint32_t selector; | ||
| 31 | - unsigned long base; | ||
| 32 | - uint32_t limit; | 36 | + uint16_t selector; |
| 37 | + uint16_t padding1; | ||
| 33 | uint32_t flags; | 38 | uint32_t flags; |
| 39 | + uint64_t base; | ||
| 40 | + uint32_t limit; | ||
| 41 | + uint32_t padding2; | ||
| 34 | }; | 42 | }; |
| 35 | 43 | ||
| 36 | struct kqemu_cpu_state { | 44 | struct kqemu_cpu_state { |
| 37 | -#ifdef __x86_64__ | ||
| 38 | - unsigned long regs[16]; | ||
| 39 | -#else | ||
| 40 | - unsigned long regs[8]; | ||
| 41 | -#endif | ||
| 42 | - unsigned long eip; | ||
| 43 | - unsigned long eflags; | ||
| 44 | - | ||
| 45 | - uint32_t dummy0, dummy1, dumm2, dummy3, dummy4; | 45 | + uint64_t regs[16]; |
| 46 | + uint64_t eip; | ||
| 47 | + uint64_t eflags; | ||
| 46 | 48 | ||
| 47 | struct kqemu_segment_cache segs[6]; /* selector values */ | 49 | struct kqemu_segment_cache segs[6]; /* selector values */ |
| 48 | struct kqemu_segment_cache ldt; | 50 | struct kqemu_segment_cache ldt; |
| @@ -50,63 +52,81 @@ struct kqemu_cpu_state { | @@ -50,63 +52,81 @@ struct kqemu_cpu_state { | ||
| 50 | struct kqemu_segment_cache gdt; /* only base and limit are used */ | 52 | struct kqemu_segment_cache gdt; /* only base and limit are used */ |
| 51 | struct kqemu_segment_cache idt; /* only base and limit are used */ | 53 | struct kqemu_segment_cache idt; /* only base and limit are used */ |
| 52 | 54 | ||
| 53 | - unsigned long cr0; | ||
| 54 | - unsigned long dummy5; | ||
| 55 | - unsigned long cr2; | ||
| 56 | - unsigned long cr3; | ||
| 57 | - unsigned long cr4; | ||
| 58 | - uint32_t a20_mask; | 55 | + uint64_t cr0; |
| 56 | + uint64_t cr2; | ||
| 57 | + uint64_t cr3; | ||
| 58 | + uint64_t cr4; | ||
| 59 | + uint64_t a20_mask; | ||
| 59 | 60 | ||
| 60 | /* sysenter registers */ | 61 | /* sysenter registers */ |
| 61 | - uint32_t sysenter_cs; | ||
| 62 | - uint32_t sysenter_esp; | ||
| 63 | - uint32_t sysenter_eip; | ||
| 64 | - uint64_t efer __attribute__((aligned(8))); | 62 | + uint64_t sysenter_cs; |
| 63 | + uint64_t sysenter_esp; | ||
| 64 | + uint64_t sysenter_eip; | ||
| 65 | + uint64_t efer; | ||
| 65 | uint64_t star; | 66 | uint64_t star; |
| 66 | -#ifdef __x86_64__ | ||
| 67 | - unsigned long lstar; | ||
| 68 | - unsigned long cstar; | ||
| 69 | - unsigned long fmask; | ||
| 70 | - unsigned long kernelgsbase; | ||
| 71 | -#endif | 67 | + |
| 68 | + uint64_t lstar; | ||
| 69 | + uint64_t cstar; | ||
| 70 | + uint64_t fmask; | ||
| 71 | + uint64_t kernelgsbase; | ||
| 72 | + | ||
| 72 | uint64_t tsc_offset; | 73 | uint64_t tsc_offset; |
| 73 | 74 | ||
| 74 | - unsigned long dr0; | ||
| 75 | - unsigned long dr1; | ||
| 76 | - unsigned long dr2; | ||
| 77 | - unsigned long dr3; | ||
| 78 | - unsigned long dr6; | ||
| 79 | - unsigned long dr7; | 75 | + uint64_t dr0; |
| 76 | + uint64_t dr1; | ||
| 77 | + uint64_t dr2; | ||
| 78 | + uint64_t dr3; | ||
| 79 | + uint64_t dr6; | ||
| 80 | + uint64_t dr7; | ||
| 80 | 81 | ||
| 81 | uint8_t cpl; | 82 | uint8_t cpl; |
| 82 | uint8_t user_only; | 83 | uint8_t user_only; |
| 84 | + uint16_t padding1; | ||
| 83 | 85 | ||
| 84 | uint32_t error_code; /* error_code when exiting with an exception */ | 86 | uint32_t error_code; /* error_code when exiting with an exception */ |
| 85 | - unsigned long next_eip; /* next eip value when exiting with an interrupt */ | ||
| 86 | - unsigned int nb_pages_to_flush; /* number of pages to flush, | 87 | + uint64_t next_eip; /* next eip value when exiting with an interrupt */ |
| 88 | + uint32_t nb_pages_to_flush; /* number of pages to flush, | ||
| 87 | KQEMU_FLUSH_ALL means full flush */ | 89 | KQEMU_FLUSH_ALL means full flush */ |
| 88 | #define KQEMU_MAX_PAGES_TO_FLUSH 512 | 90 | #define KQEMU_MAX_PAGES_TO_FLUSH 512 |
| 89 | #define KQEMU_FLUSH_ALL (KQEMU_MAX_PAGES_TO_FLUSH + 1) | 91 | #define KQEMU_FLUSH_ALL (KQEMU_MAX_PAGES_TO_FLUSH + 1) |
| 90 | 92 | ||
| 91 | - long retval; | 93 | + int32_t retval; |
| 92 | 94 | ||
| 93 | /* number of ram_dirty entries to update */ | 95 | /* number of ram_dirty entries to update */ |
| 94 | - unsigned int nb_ram_pages_to_update; | 96 | + uint32_t nb_ram_pages_to_update; |
| 95 | #define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512 | 97 | #define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512 |
| 96 | #define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1) | 98 | #define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1) |
| 97 | 99 | ||
| 98 | #define KQEMU_MAX_MODIFIED_RAM_PAGES 512 | 100 | #define KQEMU_MAX_MODIFIED_RAM_PAGES 512 |
| 99 | - unsigned int nb_modified_ram_pages; | 101 | + uint32_t nb_modified_ram_pages; |
| 100 | }; | 102 | }; |
| 101 | 103 | ||
| 102 | struct kqemu_init { | 104 | struct kqemu_init { |
| 103 | uint8_t *ram_base; /* must be page aligned */ | 105 | uint8_t *ram_base; /* must be page aligned */ |
| 104 | - unsigned long ram_size; /* must be multiple of 4 KB */ | 106 | + KQEMU_PAD32(uint32_t padding1;) |
| 107 | + uint64_t ram_size; /* must be multiple of 4 KB */ | ||
| 105 | uint8_t *ram_dirty; /* must be page aligned */ | 108 | uint8_t *ram_dirty; /* must be page aligned */ |
| 106 | - uint32_t **phys_to_ram_map; /* must be page aligned */ | ||
| 107 | - unsigned long *pages_to_flush; /* must be page aligned */ | ||
| 108 | - unsigned long *ram_pages_to_update; /* must be page aligned */ | ||
| 109 | - unsigned long *modified_ram_pages; /* must be page aligned */ | 109 | + KQEMU_PAD32(uint32_t padding2;) |
| 110 | + uint64_t *pages_to_flush; /* must be page aligned */ | ||
| 111 | + KQEMU_PAD32(uint32_t padding4;) | ||
| 112 | + uint64_t *ram_pages_to_update; /* must be page aligned */ | ||
| 113 | + KQEMU_PAD32(uint32_t padding5;) | ||
| 114 | + uint64_t *modified_ram_pages; /* must be page aligned */ | ||
| 115 | + KQEMU_PAD32(uint32_t padding6;) | ||
| 116 | +}; | ||
| 117 | + | ||
| 118 | +#define KQEMU_IO_MEM_RAM 0 | ||
| 119 | +#define KQEMU_IO_MEM_ROM 1 | ||
| 120 | +#define KQEMU_IO_MEM_COMM 2 /* kqemu communication page */ | ||
| 121 | +#define KQEMU_IO_MEM_UNASSIGNED 3 /* any device: return to application */ | ||
| 122 | + | ||
| 123 | +struct kqemu_phys_mem { | ||
| 124 | + uint64_t phys_addr; /* physical address range: phys_addr, | ||
| 125 | + phys_addr + size */ | ||
| 126 | + uint64_t size; | ||
| 127 | + uint64_t ram_addr; /* corresponding ram address */ | ||
| 128 | + uint32_t io_index; /* memory type: see KQEMU_IO_MEM_xxx */ | ||
| 129 | + uint32_t padding1; | ||
| 110 | }; | 130 | }; |
| 111 | 131 | ||
| 112 | #define KQEMU_RET_ABORT (-1) | 132 | #define KQEMU_RET_ABORT (-1) |
| @@ -122,11 +142,13 @@ struct kqemu_init { | @@ -122,11 +142,13 @@ struct kqemu_init { | ||
| 122 | #define KQEMU_INIT CTL_CODE(FILE_DEVICE_UNKNOWN, 2, METHOD_BUFFERED, FILE_WRITE_ACCESS) | 142 | #define KQEMU_INIT CTL_CODE(FILE_DEVICE_UNKNOWN, 2, METHOD_BUFFERED, FILE_WRITE_ACCESS) |
| 123 | #define KQEMU_GET_VERSION CTL_CODE(FILE_DEVICE_UNKNOWN, 3, METHOD_BUFFERED, FILE_READ_ACCESS) | 143 | #define KQEMU_GET_VERSION CTL_CODE(FILE_DEVICE_UNKNOWN, 3, METHOD_BUFFERED, FILE_READ_ACCESS) |
| 124 | #define KQEMU_MODIFY_RAM_PAGES CTL_CODE(FILE_DEVICE_UNKNOWN, 4, METHOD_BUFFERED, FILE_WRITE_ACCESS) | 144 | #define KQEMU_MODIFY_RAM_PAGES CTL_CODE(FILE_DEVICE_UNKNOWN, 4, METHOD_BUFFERED, FILE_WRITE_ACCESS) |
| 145 | +#define KQEMU_SET_PHYS_MEM CTL_CODE(FILE_DEVICE_UNKNOWN, 5, METHOD_BUFFERED, FILE_WRITE_ACCESS) | ||
| 125 | #else | 146 | #else |
| 126 | #define KQEMU_EXEC _IOWR('q', 1, struct kqemu_cpu_state) | 147 | #define KQEMU_EXEC _IOWR('q', 1, struct kqemu_cpu_state) |
| 127 | #define KQEMU_INIT _IOW('q', 2, struct kqemu_init) | 148 | #define KQEMU_INIT _IOW('q', 2, struct kqemu_init) |
| 128 | #define KQEMU_GET_VERSION _IOR('q', 3, int) | 149 | #define KQEMU_GET_VERSION _IOR('q', 3, int) |
| 129 | #define KQEMU_MODIFY_RAM_PAGES _IOW('q', 4, int) | 150 | #define KQEMU_MODIFY_RAM_PAGES _IOW('q', 4, int) |
| 151 | +#define KQEMU_SET_PHYS_MEM _IOW('q', 5, struct kqemu_phys_mem) | ||
| 130 | #endif | 152 | #endif |
| 131 | 153 | ||
| 132 | #endif /* KQEMU_H */ | 154 | #endif /* KQEMU_H */ |
target-i386/op_helper.c
| @@ -1950,20 +1950,21 @@ void helper_cpuid(void) | @@ -1950,20 +1950,21 @@ void helper_cpuid(void) | ||
| 1950 | case 0x80000008: | 1950 | case 0x80000008: |
| 1951 | /* virtual & phys address size in low 2 bytes. */ | 1951 | /* virtual & phys address size in low 2 bytes. */ |
| 1952 | /* XXX: This value must match the one used in the MMU code. */ | 1952 | /* XXX: This value must match the one used in the MMU code. */ |
| 1953 | -#if defined(TARGET_X86_64) | ||
| 1954 | -# if defined(USE_KQEMU) | ||
| 1955 | - EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */ | ||
| 1956 | -# else | 1953 | + if (env->cpuid_ext2_features & CPUID_EXT2_LM) { |
| 1954 | + /* 64 bit processor */ | ||
| 1955 | +#if defined(USE_KQEMU) | ||
| 1956 | + EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */ | ||
| 1957 | +#else | ||
| 1957 | /* XXX: The physical address space is limited to 42 bits in exec.c. */ | 1958 | /* XXX: The physical address space is limited to 42 bits in exec.c. */ |
| 1958 | - EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */ | ||
| 1959 | -# endif | 1959 | + EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */ |
| 1960 | +#endif | ||
| 1961 | + } else { | ||
| 1962 | +#if defined(USE_KQEMU) | ||
| 1963 | + EAX = 0x00000020; /* 32 bits physical */ | ||
| 1960 | #else | 1964 | #else |
| 1961 | -# if defined(USE_KQEMU) | ||
| 1962 | - EAX = 0x00000020; /* 32 bits physical */ | ||
| 1963 | -# else | ||
| 1964 | - EAX = 0x00000024; /* 36 bits physical */ | ||
| 1965 | -# endif | 1965 | + EAX = 0x00000024; /* 36 bits physical */ |
| 1966 | #endif | 1966 | #endif |
| 1967 | + } | ||
| 1967 | EBX = 0; | 1968 | EBX = 0; |
| 1968 | ECX = 0; | 1969 | ECX = 0; |
| 1969 | EDX = 0; | 1970 | EDX = 0; |
| @@ -3158,6 +3159,15 @@ void helper_rdmsr(void) | @@ -3158,6 +3159,15 @@ void helper_rdmsr(void) | ||
| 3158 | val = env->kernelgsbase; | 3159 | val = env->kernelgsbase; |
| 3159 | break; | 3160 | break; |
| 3160 | #endif | 3161 | #endif |
| 3162 | +#ifdef USE_KQEMU | ||
| 3163 | + case MSR_QPI_COMMBASE: | ||
| 3164 | + if (env->kqemu_enabled) { | ||
| 3165 | + val = kqemu_comm_base; | ||
| 3166 | + } else { | ||
| 3167 | + val = 0; | ||
| 3168 | + } | ||
| 3169 | + break; | ||
| 3170 | +#endif | ||
| 3161 | default: | 3171 | default: |
| 3162 | /* XXX: exception ? */ | 3172 | /* XXX: exception ? */ |
| 3163 | val = 0; | 3173 | val = 0; |