Commit 14ce26e755135e80f3726d42a5a887723d615291
1 parent
c4687878
x86_64 target support
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1197 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
9 changed files
with
2124 additions
and
687 deletions
Too many changes to show.
To preserve performance only 9 of 11 files are displayed.
target-i386/cpu.h
| @@ -20,7 +20,13 @@ | @@ -20,7 +20,13 @@ | ||
| 20 | #ifndef CPU_I386_H | 20 | #ifndef CPU_I386_H |
| 21 | #define CPU_I386_H | 21 | #define CPU_I386_H |
| 22 | 22 | ||
| 23 | +#include "config.h" | ||
| 24 | + | ||
| 25 | +#ifdef TARGET_X86_64 | ||
| 26 | +#define TARGET_LONG_BITS 64 | ||
| 27 | +#else | ||
| 23 | #define TARGET_LONG_BITS 32 | 28 | #define TARGET_LONG_BITS 32 |
| 29 | +#endif | ||
| 24 | 30 | ||
| 25 | /* target supports implicit self modifying code */ | 31 | /* target supports implicit self modifying code */ |
| 26 | #define TARGET_HAS_SMC | 32 | #define TARGET_HAS_SMC |
| @@ -63,6 +69,8 @@ | @@ -63,6 +69,8 @@ | ||
| 63 | #define DESC_G_MASK (1 << 23) | 69 | #define DESC_G_MASK (1 << 23) |
| 64 | #define DESC_B_SHIFT 22 | 70 | #define DESC_B_SHIFT 22 |
| 65 | #define DESC_B_MASK (1 << DESC_B_SHIFT) | 71 | #define DESC_B_MASK (1 << DESC_B_SHIFT) |
| 72 | +#define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ | ||
| 73 | +#define DESC_L_MASK (1 << DESC_L_SHIFT) | ||
| 66 | #define DESC_AVL_MASK (1 << 20) | 74 | #define DESC_AVL_MASK (1 << 20) |
| 67 | #define DESC_P_MASK (1 << 15) | 75 | #define DESC_P_MASK (1 << 15) |
| 68 | #define DESC_DPL_SHIFT 13 | 76 | #define DESC_DPL_SHIFT 13 |
| @@ -125,6 +133,8 @@ | @@ -125,6 +133,8 @@ | ||
| 125 | #define HF_EM_SHIFT 10 | 133 | #define HF_EM_SHIFT 10 |
| 126 | #define HF_TS_SHIFT 11 | 134 | #define HF_TS_SHIFT 11 |
| 127 | #define HF_IOPL_SHIFT 12 /* must be same as eflags */ | 135 | #define HF_IOPL_SHIFT 12 /* must be same as eflags */ |
| 136 | +#define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ | ||
| 137 | +#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ | ||
| 128 | #define HF_VM_SHIFT 17 /* must be same as eflags */ | 138 | #define HF_VM_SHIFT 17 /* must be same as eflags */ |
| 129 | 139 | ||
| 130 | #define HF_CPL_MASK (3 << HF_CPL_SHIFT) | 140 | #define HF_CPL_MASK (3 << HF_CPL_SHIFT) |
| @@ -138,6 +148,8 @@ | @@ -138,6 +148,8 @@ | ||
| 138 | #define HF_MP_MASK (1 << HF_MP_SHIFT) | 148 | #define HF_MP_MASK (1 << HF_MP_SHIFT) |
| 139 | #define HF_EM_MASK (1 << HF_EM_SHIFT) | 149 | #define HF_EM_MASK (1 << HF_EM_SHIFT) |
| 140 | #define HF_TS_MASK (1 << HF_TS_SHIFT) | 150 | #define HF_TS_MASK (1 << HF_TS_SHIFT) |
| 151 | +#define HF_LMA_MASK (1 << HF_LMA_SHIFT) | ||
| 152 | +#define HF_CS64_MASK (1 << HF_CS64_SHIFT) | ||
| 141 | 153 | ||
| 142 | #define CR0_PE_MASK (1 << 0) | 154 | #define CR0_PE_MASK (1 << 0) |
| 143 | #define CR0_MP_MASK (1 << 1) | 155 | #define CR0_MP_MASK (1 << 1) |
| @@ -156,6 +168,9 @@ | @@ -156,6 +168,9 @@ | ||
| 156 | #define CR4_PSE_MASK (1 << 4) | 168 | #define CR4_PSE_MASK (1 << 4) |
| 157 | #define CR4_PAE_MASK (1 << 5) | 169 | #define CR4_PAE_MASK (1 << 5) |
| 158 | #define CR4_PGE_MASK (1 << 7) | 170 | #define CR4_PGE_MASK (1 << 7) |
| 171 | +#define CR4_PCE_MASK (1 << 8) | ||
| 172 | +#define CR4_OSFXSR_MASK (1 << 9) | ||
| 173 | +#define CR4_OSXMMEXCPT_MASK (1 << 10) | ||
| 159 | 174 | ||
| 160 | #define PG_PRESENT_BIT 0 | 175 | #define PG_PRESENT_BIT 0 |
| 161 | #define PG_RW_BIT 1 | 176 | #define PG_RW_BIT 1 |
| @@ -193,6 +208,44 @@ | @@ -193,6 +208,44 @@ | ||
| 193 | #define MSR_IA32_SYSENTER_ESP 0x175 | 208 | #define MSR_IA32_SYSENTER_ESP 0x175 |
| 194 | #define MSR_IA32_SYSENTER_EIP 0x176 | 209 | #define MSR_IA32_SYSENTER_EIP 0x176 |
| 195 | 210 | ||
| 211 | +#define MSR_EFER 0xc0000080 | ||
| 212 | + | ||
| 213 | +#define MSR_EFER_SCE (1 << 0) | ||
| 214 | +#define MSR_EFER_LME (1 << 8) | ||
| 215 | +#define MSR_EFER_LMA (1 << 10) | ||
| 216 | +#define MSR_EFER_NXE (1 << 11) | ||
| 217 | +#define MSR_EFER_FFXSR (1 << 14) | ||
| 218 | + | ||
| 219 | +#define MSR_STAR 0xc0000081 | ||
| 220 | +#define MSR_LSTAR 0xc0000082 | ||
| 221 | +#define MSR_CSTAR 0xc0000083 | ||
| 222 | +#define MSR_FMASK 0xc0000084 | ||
| 223 | +#define MSR_FSBASE 0xc0000100 | ||
| 224 | +#define MSR_GSBASE 0xc0000101 | ||
| 225 | +#define MSR_KERNELGSBASE 0xc0000102 | ||
| 226 | + | ||
| 227 | +/* cpuid_features bits */ | ||
| 228 | +#define CPUID_FP87 (1 << 0) | ||
| 229 | +#define CPUID_VME (1 << 1) | ||
| 230 | +#define CPUID_DE (1 << 2) | ||
| 231 | +#define CPUID_PSE (1 << 3) | ||
| 232 | +#define CPUID_TSC (1 << 4) | ||
| 233 | +#define CPUID_MSR (1 << 5) | ||
| 234 | +#define CPUID_PAE (1 << 6) | ||
| 235 | +#define CPUID_MCE (1 << 7) | ||
| 236 | +#define CPUID_CX8 (1 << 8) | ||
| 237 | +#define CPUID_APIC (1 << 9) | ||
| 238 | +#define CPUID_SEP (1 << 11) /* sysenter/sysexit */ | ||
| 239 | +#define CPUID_MTRR (1 << 12) | ||
| 240 | +#define CPUID_PGE (1 << 13) | ||
| 241 | +#define CPUID_MCA (1 << 14) | ||
| 242 | +#define CPUID_CMOV (1 << 15) | ||
| 243 | +/* ... */ | ||
| 244 | +#define CPUID_MMX (1 << 23) | ||
| 245 | +#define CPUID_FXSR (1 << 24) | ||
| 246 | +#define CPUID_SSE (1 << 25) | ||
| 247 | +#define CPUID_SSE2 (1 << 26) | ||
| 248 | + | ||
| 196 | #define EXCP00_DIVZ 0 | 249 | #define EXCP00_DIVZ 0 |
| 197 | #define EXCP01_SSTP 1 | 250 | #define EXCP01_SSTP 1 |
| 198 | #define EXCP02_NMI 2 | 251 | #define EXCP02_NMI 2 |
| @@ -219,42 +272,52 @@ enum { | @@ -219,42 +272,52 @@ enum { | ||
| 219 | CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ | 272 | CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ |
| 220 | CC_OP_MULW, | 273 | CC_OP_MULW, |
| 221 | CC_OP_MULL, | 274 | CC_OP_MULL, |
| 275 | + CC_OP_MULQ, | ||
| 222 | 276 | ||
| 223 | CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | 277 | CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ |
| 224 | CC_OP_ADDW, | 278 | CC_OP_ADDW, |
| 225 | CC_OP_ADDL, | 279 | CC_OP_ADDL, |
| 280 | + CC_OP_ADDQ, | ||
| 226 | 281 | ||
| 227 | CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | 282 | CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ |
| 228 | CC_OP_ADCW, | 283 | CC_OP_ADCW, |
| 229 | CC_OP_ADCL, | 284 | CC_OP_ADCL, |
| 285 | + CC_OP_ADCQ, | ||
| 230 | 286 | ||
| 231 | CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | 287 | CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ |
| 232 | CC_OP_SUBW, | 288 | CC_OP_SUBW, |
| 233 | CC_OP_SUBL, | 289 | CC_OP_SUBL, |
| 290 | + CC_OP_SUBQ, | ||
| 234 | 291 | ||
| 235 | CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | 292 | CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ |
| 236 | CC_OP_SBBW, | 293 | CC_OP_SBBW, |
| 237 | CC_OP_SBBL, | 294 | CC_OP_SBBL, |
| 295 | + CC_OP_SBBQ, | ||
| 238 | 296 | ||
| 239 | CC_OP_LOGICB, /* modify all flags, CC_DST = res */ | 297 | CC_OP_LOGICB, /* modify all flags, CC_DST = res */ |
| 240 | CC_OP_LOGICW, | 298 | CC_OP_LOGICW, |
| 241 | CC_OP_LOGICL, | 299 | CC_OP_LOGICL, |
| 300 | + CC_OP_LOGICQ, | ||
| 242 | 301 | ||
| 243 | CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ | 302 | CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ |
| 244 | CC_OP_INCW, | 303 | CC_OP_INCW, |
| 245 | CC_OP_INCL, | 304 | CC_OP_INCL, |
| 305 | + CC_OP_INCQ, | ||
| 246 | 306 | ||
| 247 | CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ | 307 | CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ |
| 248 | CC_OP_DECW, | 308 | CC_OP_DECW, |
| 249 | CC_OP_DECL, | 309 | CC_OP_DECL, |
| 310 | + CC_OP_DECQ, | ||
| 250 | 311 | ||
| 251 | CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ | 312 | CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ |
| 252 | CC_OP_SHLW, | 313 | CC_OP_SHLW, |
| 253 | CC_OP_SHLL, | 314 | CC_OP_SHLL, |
| 315 | + CC_OP_SHLQ, | ||
| 254 | 316 | ||
| 255 | CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ | 317 | CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ |
| 256 | CC_OP_SARW, | 318 | CC_OP_SARW, |
| 257 | CC_OP_SARL, | 319 | CC_OP_SARL, |
| 320 | + CC_OP_SARQ, | ||
| 258 | 321 | ||
| 259 | CC_OP_NB, | 322 | CC_OP_NB, |
| 260 | }; | 323 | }; |
| @@ -271,22 +334,42 @@ typedef double CPU86_LDouble; | @@ -271,22 +334,42 @@ typedef double CPU86_LDouble; | ||
| 271 | 334 | ||
| 272 | typedef struct SegmentCache { | 335 | typedef struct SegmentCache { |
| 273 | uint32_t selector; | 336 | uint32_t selector; |
| 274 | - uint8_t *base; | 337 | + target_ulong base; |
| 275 | uint32_t limit; | 338 | uint32_t limit; |
| 276 | uint32_t flags; | 339 | uint32_t flags; |
| 277 | } SegmentCache; | 340 | } SegmentCache; |
| 278 | 341 | ||
| 342 | +typedef struct { | ||
| 343 | + union { | ||
| 344 | + uint8_t b[16]; | ||
| 345 | + uint16_t w[8]; | ||
| 346 | + uint32_t l[4]; | ||
| 347 | + uint64_t q[2]; | ||
| 348 | + } u; | ||
| 349 | +} XMMReg; | ||
| 350 | + | ||
| 351 | +#ifdef TARGET_X86_64 | ||
| 352 | +#define CPU_NB_REGS 16 | ||
| 353 | +#else | ||
| 354 | +#define CPU_NB_REGS 8 | ||
| 355 | +#endif | ||
| 356 | + | ||
| 279 | typedef struct CPUX86State { | 357 | typedef struct CPUX86State { |
| 358 | +#if TARGET_LONG_BITS > HOST_LONG_BITS | ||
| 359 | + /* temporaries if we cannot store them in host registers */ | ||
| 360 | + target_ulong t0, t1, t2; | ||
| 361 | +#endif | ||
| 362 | + | ||
| 280 | /* standard registers */ | 363 | /* standard registers */ |
| 281 | - uint32_t regs[8]; | ||
| 282 | - uint32_t eip; | ||
| 283 | - uint32_t eflags; /* eflags register. During CPU emulation, CC | 364 | + target_ulong regs[CPU_NB_REGS]; |
| 365 | + target_ulong eip; | ||
| 366 | + target_ulong eflags; /* eflags register. During CPU emulation, CC | ||
| 284 | flags and DF are set to zero because they are | 367 | flags and DF are set to zero because they are |
| 285 | stored elsewhere */ | 368 | stored elsewhere */ |
| 286 | 369 | ||
| 287 | /* emulator internal eflags handling */ | 370 | /* emulator internal eflags handling */ |
| 288 | - uint32_t cc_src; | ||
| 289 | - uint32_t cc_dst; | 371 | + target_ulong cc_src; |
| 372 | + target_ulong cc_dst; | ||
| 290 | uint32_t cc_op; | 373 | uint32_t cc_op; |
| 291 | int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ | 374 | int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ |
| 292 | uint32_t hflags; /* hidden flags, see HF_xxx constants */ | 375 | uint32_t hflags; /* hidden flags, see HF_xxx constants */ |
| @@ -314,10 +397,21 @@ typedef struct CPUX86State { | @@ -314,10 +397,21 @@ typedef struct CPUX86State { | ||
| 314 | SegmentCache gdt; /* only base and limit are used */ | 397 | SegmentCache gdt; /* only base and limit are used */ |
| 315 | SegmentCache idt; /* only base and limit are used */ | 398 | SegmentCache idt; /* only base and limit are used */ |
| 316 | 399 | ||
| 400 | + XMMReg xmm_regs[CPU_NB_REGS]; | ||
| 401 | + XMMReg xmm_t0; | ||
| 402 | + | ||
| 317 | /* sysenter registers */ | 403 | /* sysenter registers */ |
| 318 | uint32_t sysenter_cs; | 404 | uint32_t sysenter_cs; |
| 319 | uint32_t sysenter_esp; | 405 | uint32_t sysenter_esp; |
| 320 | uint32_t sysenter_eip; | 406 | uint32_t sysenter_eip; |
| 407 | +#ifdef TARGET_X86_64 | ||
| 408 | + target_ulong efer; | ||
| 409 | + target_ulong star; | ||
| 410 | + target_ulong lstar; | ||
| 411 | + target_ulong cstar; | ||
| 412 | + target_ulong fmask; | ||
| 413 | + target_ulong kernelgsbase; | ||
| 414 | +#endif | ||
| 321 | 415 | ||
| 322 | /* temporary data for USE_CODE_COPY mode */ | 416 | /* temporary data for USE_CODE_COPY mode */ |
| 323 | #ifdef USE_CODE_COPY | 417 | #ifdef USE_CODE_COPY |
| @@ -333,8 +427,8 @@ typedef struct CPUX86State { | @@ -333,8 +427,8 @@ typedef struct CPUX86State { | ||
| 333 | int exception_is_int; | 427 | int exception_is_int; |
| 334 | int exception_next_eip; | 428 | int exception_next_eip; |
| 335 | struct TranslationBlock *current_tb; /* currently executing TB */ | 429 | struct TranslationBlock *current_tb; /* currently executing TB */ |
| 336 | - uint32_t cr[5]; /* NOTE: cr1 is unused */ | ||
| 337 | - uint32_t dr[8]; /* debug registers */ | 430 | + target_ulong cr[5]; /* NOTE: cr1 is unused */ |
| 431 | + target_ulong dr[8]; /* debug registers */ | ||
| 338 | int interrupt_request; | 432 | int interrupt_request; |
| 339 | int user_mode_only; /* user mode only simulation */ | 433 | int user_mode_only; /* user mode only simulation */ |
| 340 | 434 | ||
| @@ -346,18 +440,28 @@ typedef struct CPUX86State { | @@ -346,18 +440,28 @@ typedef struct CPUX86State { | ||
| 346 | context) */ | 440 | context) */ |
| 347 | unsigned long mem_write_pc; /* host pc at which the memory was | 441 | unsigned long mem_write_pc; /* host pc at which the memory was |
| 348 | written */ | 442 | written */ |
| 349 | - unsigned long mem_write_vaddr; /* target virtual addr at which the | ||
| 350 | - memory was written */ | 443 | + target_ulong mem_write_vaddr; /* target virtual addr at which the |
| 444 | + memory was written */ | ||
| 351 | /* 0 = kernel, 1 = user */ | 445 | /* 0 = kernel, 1 = user */ |
| 352 | CPUTLBEntry tlb_read[2][CPU_TLB_SIZE]; | 446 | CPUTLBEntry tlb_read[2][CPU_TLB_SIZE]; |
| 353 | CPUTLBEntry tlb_write[2][CPU_TLB_SIZE]; | 447 | CPUTLBEntry tlb_write[2][CPU_TLB_SIZE]; |
| 354 | 448 | ||
| 355 | /* from this point: preserved by CPU reset */ | 449 | /* from this point: preserved by CPU reset */ |
| 356 | /* ice debug support */ | 450 | /* ice debug support */ |
| 357 | - uint32_t breakpoints[MAX_BREAKPOINTS]; | 451 | + target_ulong breakpoints[MAX_BREAKPOINTS]; |
| 358 | int nb_breakpoints; | 452 | int nb_breakpoints; |
| 359 | int singlestep_enabled; | 453 | int singlestep_enabled; |
| 360 | 454 | ||
| 455 | + /* processor features (e.g. for CPUID insn) */ | ||
| 456 | + uint32_t cpuid_vendor1; | ||
| 457 | + uint32_t cpuid_vendor2; | ||
| 458 | + uint32_t cpuid_vendor3; | ||
| 459 | + uint32_t cpuid_version; | ||
| 460 | + uint32_t cpuid_features; | ||
| 461 | + | ||
| 462 | + /* in order to simplify APIC support, we leave this pointer to the | ||
| 463 | + user */ | ||
| 464 | + struct APICState *apic_state; | ||
| 361 | /* user data */ | 465 | /* user data */ |
| 362 | void *opaque; | 466 | void *opaque; |
| 363 | } CPUX86State; | 467 | } CPUX86State; |
| @@ -382,7 +486,7 @@ void cpu_set_ferr(CPUX86State *s); | @@ -382,7 +486,7 @@ void cpu_set_ferr(CPUX86State *s); | ||
| 382 | cache: it synchronizes the hflags with the segment cache values */ | 486 | cache: it synchronizes the hflags with the segment cache values */ |
| 383 | static inline void cpu_x86_load_seg_cache(CPUX86State *env, | 487 | static inline void cpu_x86_load_seg_cache(CPUX86State *env, |
| 384 | int seg_reg, unsigned int selector, | 488 | int seg_reg, unsigned int selector, |
| 385 | - uint8_t *base, unsigned int limit, | 489 | + uint32_t base, unsigned int limit, |
| 386 | unsigned int flags) | 490 | unsigned int flags) |
| 387 | { | 491 | { |
| 388 | SegmentCache *sc; | 492 | SegmentCache *sc; |
| @@ -395,27 +499,45 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env, | @@ -395,27 +499,45 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env, | ||
| 395 | sc->flags = flags; | 499 | sc->flags = flags; |
| 396 | 500 | ||
| 397 | /* update the hidden flags */ | 501 | /* update the hidden flags */ |
| 398 | - new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) | ||
| 399 | - >> (DESC_B_SHIFT - HF_CS32_SHIFT); | ||
| 400 | - new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK) | ||
| 401 | - >> (DESC_B_SHIFT - HF_SS32_SHIFT); | ||
| 402 | - if (!(env->cr[0] & CR0_PE_MASK) || | ||
| 403 | - (env->eflags & VM_MASK) || | ||
| 404 | - !(new_hflags & HF_CS32_MASK)) { | ||
| 405 | - /* XXX: try to avoid this test. The problem comes from the | ||
| 406 | - fact that is real mode or vm86 mode we only modify the | ||
| 407 | - 'base' and 'selector' fields of the segment cache to go | ||
| 408 | - faster. A solution may be to force addseg to one in | ||
| 409 | - translate-i386.c. */ | ||
| 410 | - new_hflags |= HF_ADDSEG_MASK; | ||
| 411 | - } else { | ||
| 412 | - new_hflags |= (((unsigned long)env->segs[R_DS].base | | ||
| 413 | - (unsigned long)env->segs[R_ES].base | | ||
| 414 | - (unsigned long)env->segs[R_SS].base) != 0) << | ||
| 415 | - HF_ADDSEG_SHIFT; | 502 | + { |
| 503 | + if (seg_reg == R_CS) { | ||
| 504 | +#ifdef TARGET_X86_64 | ||
| 505 | + if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { | ||
| 506 | + /* long mode */ | ||
| 507 | + env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; | ||
| 508 | + env->hflags &= ~(HF_ADDSEG_MASK); | ||
| 509 | + } else | ||
| 510 | +#endif | ||
| 511 | + { | ||
| 512 | + /* legacy / compatibility case */ | ||
| 513 | + new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) | ||
| 514 | + >> (DESC_B_SHIFT - HF_CS32_SHIFT); | ||
| 515 | + env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | | ||
| 516 | + new_hflags; | ||
| 517 | + } | ||
| 518 | + } | ||
| 519 | + new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) | ||
| 520 | + >> (DESC_B_SHIFT - HF_SS32_SHIFT); | ||
| 521 | + if (env->hflags & HF_CS64_MASK) { | ||
| 522 | + /* zero base assumed for DS, ES and SS in long mode */ | ||
| 523 | + } else if (!(env->cr[0] & CR0_PE_MASK) || | ||
| 524 | + (env->eflags & VM_MASK) || | ||
| 525 | + !(new_hflags & HF_CS32_MASK)) { | ||
| 526 | + /* XXX: try to avoid this test. The problem comes from the | ||
| 527 | + fact that is real mode or vm86 mode we only modify the | ||
| 528 | + 'base' and 'selector' fields of the segment cache to go | ||
| 529 | + faster. A solution may be to force addseg to one in | ||
| 530 | + translate-i386.c. */ | ||
| 531 | + new_hflags |= HF_ADDSEG_MASK; | ||
| 532 | + } else { | ||
| 533 | + new_hflags |= (((unsigned long)env->segs[R_DS].base | | ||
| 534 | + (unsigned long)env->segs[R_ES].base | | ||
| 535 | + (unsigned long)env->segs[R_SS].base) != 0) << | ||
| 536 | + HF_ADDSEG_SHIFT; | ||
| 537 | + } | ||
| 538 | + env->hflags = (env->hflags & | ||
| 539 | + ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; | ||
| 416 | } | 540 | } |
| 417 | - env->hflags = (env->hflags & | ||
| 418 | - ~(HF_CS32_MASK | HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; | ||
| 419 | } | 541 | } |
| 420 | 542 | ||
| 421 | /* wrapper, just in case memory mappings must be changed */ | 543 | /* wrapper, just in case memory mappings must be changed */ |
| @@ -448,6 +570,9 @@ void cpu_x86_set_a20(CPUX86State *env, int a20_state); | @@ -448,6 +570,9 @@ void cpu_x86_set_a20(CPUX86State *env, int a20_state); | ||
| 448 | 570 | ||
| 449 | uint64_t cpu_get_tsc(CPUX86State *env); | 571 | uint64_t cpu_get_tsc(CPUX86State *env); |
| 450 | 572 | ||
| 573 | +void cpu_set_apic_base(CPUX86State *env, uint64_t val); | ||
| 574 | +uint64_t cpu_get_apic_base(CPUX86State *env); | ||
| 575 | + | ||
| 451 | /* will be suppressed */ | 576 | /* will be suppressed */ |
| 452 | void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); | 577 | void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); |
| 453 | 578 |
target-i386/exec.h
| @@ -20,14 +20,29 @@ | @@ -20,14 +20,29 @@ | ||
| 20 | #include "config.h" | 20 | #include "config.h" |
| 21 | #include "dyngen-exec.h" | 21 | #include "dyngen-exec.h" |
| 22 | 22 | ||
| 23 | +/* XXX: factorize this mess */ | ||
| 24 | +#if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) | ||
| 25 | +#define HOST_LONG_BITS 64 | ||
| 26 | +#else | ||
| 27 | +#define HOST_LONG_BITS 32 | ||
| 28 | +#endif | ||
| 29 | + | ||
| 30 | +#ifdef TARGET_X86_64 | ||
| 31 | +#define TARGET_LONG_BITS 64 | ||
| 32 | +#else | ||
| 33 | +#define TARGET_LONG_BITS 32 | ||
| 34 | +#endif | ||
| 35 | + | ||
| 23 | /* at least 4 register variables are defined */ | 36 | /* at least 4 register variables are defined */ |
| 24 | register struct CPUX86State *env asm(AREG0); | 37 | register struct CPUX86State *env asm(AREG0); |
| 38 | + | ||
| 39 | +/* XXX: use 64 bit regs if HOST_LONG_BITS == 64 */ | ||
| 40 | +#if TARGET_LONG_BITS == 32 | ||
| 41 | + | ||
| 25 | register uint32_t T0 asm(AREG1); | 42 | register uint32_t T0 asm(AREG1); |
| 26 | register uint32_t T1 asm(AREG2); | 43 | register uint32_t T1 asm(AREG2); |
| 27 | register uint32_t T2 asm(AREG3); | 44 | register uint32_t T2 asm(AREG3); |
| 28 | 45 | ||
| 29 | -#define A0 T2 | ||
| 30 | - | ||
| 31 | /* if more registers are available, we define some registers too */ | 46 | /* if more registers are available, we define some registers too */ |
| 32 | #ifdef AREG4 | 47 | #ifdef AREG4 |
| 33 | register uint32_t EAX asm(AREG4); | 48 | register uint32_t EAX asm(AREG4); |
| @@ -69,6 +84,17 @@ register uint32_t EDI asm(AREG11); | @@ -69,6 +84,17 @@ register uint32_t EDI asm(AREG11); | ||
| 69 | #define reg_EDI | 84 | #define reg_EDI |
| 70 | #endif | 85 | #endif |
| 71 | 86 | ||
| 87 | +#else | ||
| 88 | + | ||
| 89 | +/* no registers can be used */ | ||
| 90 | +#define T0 (env->t0) | ||
| 91 | +#define T1 (env->t1) | ||
| 92 | +#define T2 (env->t2) | ||
| 93 | + | ||
| 94 | +#endif | ||
| 95 | + | ||
| 96 | +#define A0 T2 | ||
| 97 | + | ||
| 72 | extern FILE *logfile; | 98 | extern FILE *logfile; |
| 73 | extern int loglevel; | 99 | extern int loglevel; |
| 74 | 100 | ||
| @@ -136,26 +162,24 @@ void helper_movl_crN_T0(int reg); | @@ -136,26 +162,24 @@ void helper_movl_crN_T0(int reg); | ||
| 136 | void helper_movl_drN_T0(int reg); | 162 | void helper_movl_drN_T0(int reg); |
| 137 | void helper_invlpg(unsigned int addr); | 163 | void helper_invlpg(unsigned int addr); |
| 138 | void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); | 164 | void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); |
| 139 | -void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3); | 165 | +void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); |
| 140 | void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); | 166 | void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); |
| 141 | void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr); | 167 | void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr); |
| 142 | -int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, | 168 | +int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, |
| 143 | int is_write, int is_user, int is_softmmu); | 169 | int is_write, int is_user, int is_softmmu); |
| 144 | -void tlb_fill(unsigned long addr, int is_write, int is_user, | 170 | +void tlb_fill(target_ulong addr, int is_write, int is_user, |
| 145 | void *retaddr); | 171 | void *retaddr); |
| 146 | void __hidden cpu_lock(void); | 172 | void __hidden cpu_lock(void); |
| 147 | void __hidden cpu_unlock(void); | 173 | void __hidden cpu_unlock(void); |
| 148 | void do_interrupt(int intno, int is_int, int error_code, | 174 | void do_interrupt(int intno, int is_int, int error_code, |
| 149 | - unsigned int next_eip, int is_hw); | 175 | + target_ulong next_eip, int is_hw); |
| 150 | void do_interrupt_user(int intno, int is_int, int error_code, | 176 | void do_interrupt_user(int intno, int is_int, int error_code, |
| 151 | - unsigned int next_eip); | 177 | + target_ulong next_eip); |
| 152 | void raise_interrupt(int intno, int is_int, int error_code, | 178 | void raise_interrupt(int intno, int is_int, int error_code, |
| 153 | unsigned int next_eip); | 179 | unsigned int next_eip); |
| 154 | void raise_exception_err(int exception_index, int error_code); | 180 | void raise_exception_err(int exception_index, int error_code); |
| 155 | void raise_exception(int exception_index); | 181 | void raise_exception(int exception_index); |
| 156 | void __hidden cpu_loop_exit(void); | 182 | void __hidden cpu_loop_exit(void); |
| 157 | -void helper_fsave(uint8_t *ptr, int data32); | ||
| 158 | -void helper_frstor(uint8_t *ptr, int data32); | ||
| 159 | 183 | ||
| 160 | void OPPROTO op_movl_eflags_T0(void); | 184 | void OPPROTO op_movl_eflags_T0(void); |
| 161 | void OPPROTO op_movl_T0_eflags(void); | 185 | void OPPROTO op_movl_T0_eflags(void); |
| @@ -163,13 +187,20 @@ void raise_interrupt(int intno, int is_int, int error_code, | @@ -163,13 +187,20 @@ void raise_interrupt(int intno, int is_int, int error_code, | ||
| 163 | unsigned int next_eip); | 187 | unsigned int next_eip); |
| 164 | void raise_exception_err(int exception_index, int error_code); | 188 | void raise_exception_err(int exception_index, int error_code); |
| 165 | void raise_exception(int exception_index); | 189 | void raise_exception(int exception_index); |
| 166 | -void helper_divl_EAX_T0(uint32_t eip); | ||
| 167 | -void helper_idivl_EAX_T0(uint32_t eip); | 190 | +void helper_divl_EAX_T0(void); |
| 191 | +void helper_idivl_EAX_T0(void); | ||
| 192 | +void helper_mulq_EAX_T0(void); | ||
| 193 | +void helper_imulq_EAX_T0(void); | ||
| 194 | +void helper_imulq_T0_T1(void); | ||
| 195 | +void helper_divq_EAX_T0(void); | ||
| 196 | +void helper_idivq_EAX_T0(void); | ||
| 168 | void helper_cmpxchg8b(void); | 197 | void helper_cmpxchg8b(void); |
| 169 | void helper_cpuid(void); | 198 | void helper_cpuid(void); |
| 170 | void helper_enter_level(int level, int data32); | 199 | void helper_enter_level(int level, int data32); |
| 171 | void helper_sysenter(void); | 200 | void helper_sysenter(void); |
| 172 | void helper_sysexit(void); | 201 | void helper_sysexit(void); |
| 202 | +void helper_syscall(void); | ||
| 203 | +void helper_sysret(int dflag); | ||
| 173 | void helper_rdtsc(void); | 204 | void helper_rdtsc(void); |
| 174 | void helper_rdmsr(void); | 205 | void helper_rdmsr(void); |
| 175 | void helper_wrmsr(void); | 206 | void helper_wrmsr(void); |
| @@ -252,7 +283,7 @@ void check_iol_DX(void); | @@ -252,7 +283,7 @@ void check_iol_DX(void); | ||
| 252 | #define stl(p, v) stl_data(p, v) | 283 | #define stl(p, v) stl_data(p, v) |
| 253 | #define stq(p, v) stq_data(p, v) | 284 | #define stq(p, v) stq_data(p, v) |
| 254 | 285 | ||
| 255 | -static inline double ldfq(void *ptr) | 286 | +static inline double ldfq(target_ulong ptr) |
| 256 | { | 287 | { |
| 257 | union { | 288 | union { |
| 258 | double d; | 289 | double d; |
| @@ -262,7 +293,7 @@ static inline double ldfq(void *ptr) | @@ -262,7 +293,7 @@ static inline double ldfq(void *ptr) | ||
| 262 | return u.d; | 293 | return u.d; |
| 263 | } | 294 | } |
| 264 | 295 | ||
| 265 | -static inline void stfq(void *ptr, double v) | 296 | +static inline void stfq(target_ulong ptr, double v) |
| 266 | { | 297 | { |
| 267 | union { | 298 | union { |
| 268 | double d; | 299 | double d; |
| @@ -272,7 +303,7 @@ static inline void stfq(void *ptr, double v) | @@ -272,7 +303,7 @@ static inline void stfq(void *ptr, double v) | ||
| 272 | stq(ptr, u.i); | 303 | stq(ptr, u.i); |
| 273 | } | 304 | } |
| 274 | 305 | ||
| 275 | -static inline float ldfl(void *ptr) | 306 | +static inline float ldfl(target_ulong ptr) |
| 276 | { | 307 | { |
| 277 | union { | 308 | union { |
| 278 | float f; | 309 | float f; |
| @@ -282,7 +313,7 @@ static inline float ldfl(void *ptr) | @@ -282,7 +313,7 @@ static inline float ldfl(void *ptr) | ||
| 282 | return u.f; | 313 | return u.f; |
| 283 | } | 314 | } |
| 284 | 315 | ||
| 285 | -static inline void stfl(void *ptr, float v) | 316 | +static inline void stfl(target_ulong ptr, float v) |
| 286 | { | 317 | { |
| 287 | union { | 318 | union { |
| 288 | float f; | 319 | float f; |
| @@ -411,7 +442,7 @@ static inline void fpop(void) | @@ -411,7 +442,7 @@ static inline void fpop(void) | ||
| 411 | } | 442 | } |
| 412 | 443 | ||
| 413 | #ifndef USE_X86LDOUBLE | 444 | #ifndef USE_X86LDOUBLE |
| 414 | -static inline CPU86_LDouble helper_fldt(uint8_t *ptr) | 445 | +static inline CPU86_LDouble helper_fldt(target_ulong ptr) |
| 415 | { | 446 | { |
| 416 | CPU86_LDoubleU temp; | 447 | CPU86_LDoubleU temp; |
| 417 | int upper, e; | 448 | int upper, e; |
| @@ -451,12 +482,12 @@ static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr) | @@ -451,12 +482,12 @@ static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr) | ||
| 451 | 482 | ||
| 452 | #ifdef CONFIG_USER_ONLY | 483 | #ifdef CONFIG_USER_ONLY |
| 453 | 484 | ||
| 454 | -static inline CPU86_LDouble helper_fldt(uint8_t *ptr) | 485 | +static inline CPU86_LDouble helper_fldt(target_ulong ptr) |
| 455 | { | 486 | { |
| 456 | return *(CPU86_LDouble *)ptr; | 487 | return *(CPU86_LDouble *)ptr; |
| 457 | } | 488 | } |
| 458 | 489 | ||
| 459 | -static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr) | 490 | +static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr) |
| 460 | { | 491 | { |
| 461 | *(CPU86_LDouble *)ptr = f; | 492 | *(CPU86_LDouble *)ptr = f; |
| 462 | } | 493 | } |
| @@ -465,7 +496,7 @@ static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr) | @@ -465,7 +496,7 @@ static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr) | ||
| 465 | 496 | ||
| 466 | /* we use memory access macros */ | 497 | /* we use memory access macros */ |
| 467 | 498 | ||
| 468 | -static inline CPU86_LDouble helper_fldt(uint8_t *ptr) | 499 | +static inline CPU86_LDouble helper_fldt(target_ulong ptr) |
| 469 | { | 500 | { |
| 470 | CPU86_LDoubleU temp; | 501 | CPU86_LDoubleU temp; |
| 471 | 502 | ||
| @@ -474,7 +505,7 @@ static inline CPU86_LDouble helper_fldt(uint8_t *ptr) | @@ -474,7 +505,7 @@ static inline CPU86_LDouble helper_fldt(uint8_t *ptr) | ||
| 474 | return temp.d; | 505 | return temp.d; |
| 475 | } | 506 | } |
| 476 | 507 | ||
| 477 | -static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr) | 508 | +static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr) |
| 478 | { | 509 | { |
| 479 | CPU86_LDoubleU temp; | 510 | CPU86_LDoubleU temp; |
| 480 | 511 | ||
| @@ -522,10 +553,12 @@ void helper_fscale(void); | @@ -522,10 +553,12 @@ void helper_fscale(void); | ||
| 522 | void helper_fsin(void); | 553 | void helper_fsin(void); |
| 523 | void helper_fcos(void); | 554 | void helper_fcos(void); |
| 524 | void helper_fxam_ST0(void); | 555 | void helper_fxam_ST0(void); |
| 525 | -void helper_fstenv(uint8_t *ptr, int data32); | ||
| 526 | -void helper_fldenv(uint8_t *ptr, int data32); | ||
| 527 | -void helper_fsave(uint8_t *ptr, int data32); | ||
| 528 | -void helper_frstor(uint8_t *ptr, int data32); | 556 | +void helper_fstenv(target_ulong ptr, int data32); |
| 557 | +void helper_fldenv(target_ulong ptr, int data32); | ||
| 558 | +void helper_fsave(target_ulong ptr, int data32); | ||
| 559 | +void helper_frstor(target_ulong ptr, int data32); | ||
| 560 | +void helper_fxsave(target_ulong ptr, int data64); | ||
| 561 | +void helper_fxrstor(target_ulong ptr, int data64); | ||
| 529 | void restore_native_fp_state(CPUState *env); | 562 | void restore_native_fp_state(CPUState *env); |
| 530 | void save_native_fp_state(CPUState *env); | 563 | void save_native_fp_state(CPUState *env); |
| 531 | 564 |
target-i386/helper.c
| @@ -119,7 +119,7 @@ static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, | @@ -119,7 +119,7 @@ static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, | ||
| 119 | { | 119 | { |
| 120 | SegmentCache *dt; | 120 | SegmentCache *dt; |
| 121 | int index; | 121 | int index; |
| 122 | - uint8_t *ptr; | 122 | + target_ulong ptr; |
| 123 | 123 | ||
| 124 | if (selector & 0x4) | 124 | if (selector & 0x4) |
| 125 | dt = &env->ldt; | 125 | dt = &env->ldt; |
| @@ -143,9 +143,9 @@ static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) | @@ -143,9 +143,9 @@ static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) | ||
| 143 | return limit; | 143 | return limit; |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | -static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2) | 146 | +static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) |
| 147 | { | 147 | { |
| 148 | - return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); | 148 | + return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2) | 151 | static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2) |
| @@ -160,7 +160,7 @@ static inline void load_seg_vm(int seg, int selector) | @@ -160,7 +160,7 @@ static inline void load_seg_vm(int seg, int selector) | ||
| 160 | { | 160 | { |
| 161 | selector &= 0xffff; | 161 | selector &= 0xffff; |
| 162 | cpu_x86_load_seg_cache(env, seg, selector, | 162 | cpu_x86_load_seg_cache(env, seg, selector, |
| 163 | - (uint8_t *)(selector << 4), 0xffff, 0); | 163 | + (selector << 4), 0xffff, 0); |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, | 166 | static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, |
| @@ -258,13 +258,13 @@ static void switch_tss(int tss_selector, | @@ -258,13 +258,13 @@ static void switch_tss(int tss_selector, | ||
| 258 | uint32_t next_eip) | 258 | uint32_t next_eip) |
| 259 | { | 259 | { |
| 260 | int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; | 260 | int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; |
| 261 | - uint8_t *tss_base; | 261 | + target_ulong tss_base; |
| 262 | uint32_t new_regs[8], new_segs[6]; | 262 | uint32_t new_regs[8], new_segs[6]; |
| 263 | uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; | 263 | uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; |
| 264 | uint32_t old_eflags, eflags_mask; | 264 | uint32_t old_eflags, eflags_mask; |
| 265 | SegmentCache *dt; | 265 | SegmentCache *dt; |
| 266 | int index; | 266 | int index; |
| 267 | - uint8_t *ptr; | 267 | + target_ulong ptr; |
| 268 | 268 | ||
| 269 | type = (e2 >> DESC_TYPE_SHIFT) & 0xf; | 269 | type = (e2 >> DESC_TYPE_SHIFT) & 0xf; |
| 270 | #ifdef DEBUG_PCALL | 270 | #ifdef DEBUG_PCALL |
| @@ -345,7 +345,7 @@ static void switch_tss(int tss_selector, | @@ -345,7 +345,7 @@ static void switch_tss(int tss_selector, | ||
| 345 | 345 | ||
| 346 | /* clear busy bit (it is restartable) */ | 346 | /* clear busy bit (it is restartable) */ |
| 347 | if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { | 347 | if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { |
| 348 | - uint8_t *ptr; | 348 | + target_ulong ptr; |
| 349 | uint32_t e2; | 349 | uint32_t e2; |
| 350 | ptr = env->gdt.base + (env->tr.selector & ~7); | 350 | ptr = env->gdt.base + (env->tr.selector & ~7); |
| 351 | e2 = ldl_kernel(ptr + 4); | 351 | e2 = ldl_kernel(ptr + 4); |
| @@ -397,7 +397,7 @@ static void switch_tss(int tss_selector, | @@ -397,7 +397,7 @@ static void switch_tss(int tss_selector, | ||
| 397 | 397 | ||
| 398 | /* set busy bit */ | 398 | /* set busy bit */ |
| 399 | if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { | 399 | if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { |
| 400 | - uint8_t *ptr; | 400 | + target_ulong ptr; |
| 401 | uint32_t e2; | 401 | uint32_t e2; |
| 402 | ptr = env->gdt.base + (tss_selector & ~7); | 402 | ptr = env->gdt.base + (tss_selector & ~7); |
| 403 | e2 = ldl_kernel(ptr + 4); | 403 | e2 = ldl_kernel(ptr + 4); |
| @@ -445,11 +445,11 @@ static void switch_tss(int tss_selector, | @@ -445,11 +445,11 @@ static void switch_tss(int tss_selector, | ||
| 445 | cpu_x86_set_cpl(env, new_segs[R_CS] & 3); | 445 | cpu_x86_set_cpl(env, new_segs[R_CS] & 3); |
| 446 | /* first just selectors as the rest may trigger exceptions */ | 446 | /* first just selectors as the rest may trigger exceptions */ |
| 447 | for(i = 0; i < 6; i++) | 447 | for(i = 0; i < 6; i++) |
| 448 | - cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0); | 448 | + cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); |
| 449 | } | 449 | } |
| 450 | 450 | ||
| 451 | env->ldt.selector = new_ldt & ~4; | 451 | env->ldt.selector = new_ldt & ~4; |
| 452 | - env->ldt.base = NULL; | 452 | + env->ldt.base = 0; |
| 453 | env->ldt.limit = 0; | 453 | env->ldt.limit = 0; |
| 454 | env->ldt.flags = 0; | 454 | env->ldt.flags = 0; |
| 455 | 455 | ||
| @@ -573,7 +573,7 @@ static inline unsigned int get_sp_mask(unsigned int e2) | @@ -573,7 +573,7 @@ static inline unsigned int get_sp_mask(unsigned int e2) | ||
| 573 | 573 | ||
| 574 | #define POPL(ssp, sp, sp_mask, val)\ | 574 | #define POPL(ssp, sp, sp_mask, val)\ |
| 575 | {\ | 575 | {\ |
| 576 | - val = ldl_kernel((ssp) + (sp & (sp_mask)));\ | 576 | + val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\ |
| 577 | sp += 4;\ | 577 | sp += 4;\ |
| 578 | } | 578 | } |
| 579 | 579 | ||
| @@ -582,7 +582,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | @@ -582,7 +582,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | ||
| 582 | unsigned int next_eip, int is_hw) | 582 | unsigned int next_eip, int is_hw) |
| 583 | { | 583 | { |
| 584 | SegmentCache *dt; | 584 | SegmentCache *dt; |
| 585 | - uint8_t *ptr, *ssp; | 585 | + target_ulong ptr, ssp; |
| 586 | int type, dpl, selector, ss_dpl, cpl, sp_mask; | 586 | int type, dpl, selector, ss_dpl, cpl, sp_mask; |
| 587 | int has_error_code, new_stack, shift; | 587 | int has_error_code, new_stack, shift; |
| 588 | uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2; | 588 | uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2; |
| @@ -703,7 +703,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | @@ -703,7 +703,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | ||
| 703 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | 703 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 704 | new_stack = 0; /* avoid warning */ | 704 | new_stack = 0; /* avoid warning */ |
| 705 | sp_mask = 0; /* avoid warning */ | 705 | sp_mask = 0; /* avoid warning */ |
| 706 | - ssp = NULL; /* avoid warning */ | 706 | + ssp = 0; /* avoid warning */ |
| 707 | esp = 0; /* avoid warning */ | 707 | esp = 0; /* avoid warning */ |
| 708 | } | 708 | } |
| 709 | 709 | ||
| @@ -754,10 +754,10 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | @@ -754,10 +754,10 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | ||
| 754 | 754 | ||
| 755 | if (new_stack) { | 755 | if (new_stack) { |
| 756 | if (env->eflags & VM_MASK) { | 756 | if (env->eflags & VM_MASK) { |
| 757 | - cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0, 0); | ||
| 758 | - cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0, 0); | ||
| 759 | - cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0, 0); | ||
| 760 | - cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0, 0); | 757 | + cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); |
| 758 | + cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); | ||
| 759 | + cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); | ||
| 760 | + cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); | ||
| 761 | } | 761 | } |
| 762 | ss = (ss & ~3) | dpl; | 762 | ss = (ss & ~3) | dpl; |
| 763 | cpu_x86_load_seg_cache(env, R_SS, ss, | 763 | cpu_x86_load_seg_cache(env, R_SS, ss, |
| @@ -780,12 +780,264 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | @@ -780,12 +780,264 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, | ||
| 780 | env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); | 780 | env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); |
| 781 | } | 781 | } |
| 782 | 782 | ||
| 783 | +#ifdef TARGET_X86_64 | ||
| 784 | + | ||
| 785 | +#define PUSHQ(sp, val)\ | ||
| 786 | +{\ | ||
| 787 | + sp -= 8;\ | ||
| 788 | + stq_kernel(sp, (val));\ | ||
| 789 | +} | ||
| 790 | + | ||
| 791 | +#define POPQ(sp, val)\ | ||
| 792 | +{\ | ||
| 793 | + val = ldq_kernel(sp);\ | ||
| 794 | + sp += 8;\ | ||
| 795 | +} | ||
| 796 | + | ||
| 797 | +static inline target_ulong get_rsp_from_tss(int level) | ||
| 798 | +{ | ||
| 799 | + int index; | ||
| 800 | + | ||
| 801 | +#if 0 | ||
| 802 | + printf("TR: base=" TARGET_FMT_lx " limit=%x\n", | ||
| 803 | + env->tr.base, env->tr.limit); | ||
| 804 | +#endif | ||
| 805 | + | ||
| 806 | + if (!(env->tr.flags & DESC_P_MASK)) | ||
| 807 | + cpu_abort(env, "invalid tss"); | ||
| 808 | + index = 8 * level + 4; | ||
| 809 | + if ((index + 7) > env->tr.limit) | ||
| 810 | + raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc); | ||
| 811 | + return ldq_kernel(env->tr.base + index); | ||
| 812 | +} | ||
| 813 | + | ||
| 814 | +/* 64 bit interrupt */ | ||
| 815 | +static void do_interrupt64(int intno, int is_int, int error_code, | ||
| 816 | + target_ulong next_eip, int is_hw) | ||
| 817 | +{ | ||
| 818 | + SegmentCache *dt; | ||
| 819 | + target_ulong ptr; | ||
| 820 | + int type, dpl, selector, cpl, ist; | ||
| 821 | + int has_error_code, new_stack; | ||
| 822 | + uint32_t e1, e2, e3, ss; | ||
| 823 | + target_ulong old_eip, esp, offset; | ||
| 824 | + | ||
| 825 | + has_error_code = 0; | ||
| 826 | + if (!is_int && !is_hw) { | ||
| 827 | + switch(intno) { | ||
| 828 | + case 8: | ||
| 829 | + case 10: | ||
| 830 | + case 11: | ||
| 831 | + case 12: | ||
| 832 | + case 13: | ||
| 833 | + case 14: | ||
| 834 | + case 17: | ||
| 835 | + has_error_code = 1; | ||
| 836 | + break; | ||
| 837 | + } | ||
| 838 | + } | ||
| 839 | + if (is_int) | ||
| 840 | + old_eip = next_eip; | ||
| 841 | + else | ||
| 842 | + old_eip = env->eip; | ||
| 843 | + | ||
| 844 | + dt = &env->idt; | ||
| 845 | + if (intno * 16 + 15 > dt->limit) | ||
| 846 | + raise_exception_err(EXCP0D_GPF, intno * 16 + 2); | ||
| 847 | + ptr = dt->base + intno * 16; | ||
| 848 | + e1 = ldl_kernel(ptr); | ||
| 849 | + e2 = ldl_kernel(ptr + 4); | ||
| 850 | + e3 = ldl_kernel(ptr + 8); | ||
| 851 | + /* check gate type */ | ||
| 852 | + type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; | ||
| 853 | + switch(type) { | ||
| 854 | + case 14: /* 386 interrupt gate */ | ||
| 855 | + case 15: /* 386 trap gate */ | ||
| 856 | + break; | ||
| 857 | + default: | ||
| 858 | + raise_exception_err(EXCP0D_GPF, intno * 16 + 2); | ||
| 859 | + break; | ||
| 860 | + } | ||
| 861 | + dpl = (e2 >> DESC_DPL_SHIFT) & 3; | ||
| 862 | + cpl = env->hflags & HF_CPL_MASK; | ||
| 863 | + /* check privledge if software int */ | ||
| 864 | + if (is_int && dpl < cpl) | ||
| 865 | + raise_exception_err(EXCP0D_GPF, intno * 16 + 2); | ||
| 866 | + /* check valid bit */ | ||
| 867 | + if (!(e2 & DESC_P_MASK)) | ||
| 868 | + raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2); | ||
| 869 | + selector = e1 >> 16; | ||
| 870 | + offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); | ||
| 871 | + ist = e2 & 7; | ||
| 872 | + if ((selector & 0xfffc) == 0) | ||
| 873 | + raise_exception_err(EXCP0D_GPF, 0); | ||
| 874 | + | ||
| 875 | + if (load_segment(&e1, &e2, selector) != 0) | ||
| 876 | + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | ||
| 877 | + if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) | ||
| 878 | + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | ||
| 879 | + dpl = (e2 >> DESC_DPL_SHIFT) & 3; | ||
| 880 | + if (dpl > cpl) | ||
| 881 | + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | ||
| 882 | + if (!(e2 & DESC_P_MASK)) | ||
| 883 | + raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); | ||
| 884 | + if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) | ||
| 885 | + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | ||
| 886 | + if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) { | ||
| 887 | + /* to inner priviledge */ | ||
| 888 | + if (ist != 0) | ||
| 889 | + esp = get_rsp_from_tss(ist + 3); | ||
| 890 | + else | ||
| 891 | + esp = get_rsp_from_tss(dpl); | ||
| 892 | + ss = 0; | ||
| 893 | + new_stack = 1; | ||
| 894 | + } else if ((e2 & DESC_C_MASK) || dpl == cpl) { | ||
| 895 | + /* to same priviledge */ | ||
| 896 | + if (env->eflags & VM_MASK) | ||
| 897 | + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | ||
| 898 | + new_stack = 0; | ||
| 899 | + esp = ESP & ~0xf; /* align stack */ | ||
| 900 | + dpl = cpl; | ||
| 901 | + } else { | ||
| 902 | + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | ||
| 903 | + new_stack = 0; /* avoid warning */ | ||
| 904 | + esp = 0; /* avoid warning */ | ||
| 905 | + } | ||
| 906 | + | ||
| 907 | + PUSHQ(esp, env->segs[R_SS].selector); | ||
| 908 | + PUSHQ(esp, ESP); | ||
| 909 | + PUSHQ(esp, compute_eflags()); | ||
| 910 | + PUSHQ(esp, env->segs[R_CS].selector); | ||
| 911 | + PUSHQ(esp, old_eip); | ||
| 912 | + if (has_error_code) { | ||
| 913 | + PUSHQ(esp, error_code); | ||
| 914 | + } | ||
| 915 | + | ||
| 916 | + if (new_stack) { | ||
| 917 | + ss = 0 | dpl; | ||
| 918 | + cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); | ||
| 919 | + } | ||
| 920 | + ESP = esp; | ||
| 921 | + | ||
| 922 | + selector = (selector & ~3) | dpl; | ||
| 923 | + cpu_x86_load_seg_cache(env, R_CS, selector, | ||
| 924 | + get_seg_base(e1, e2), | ||
| 925 | + get_seg_limit(e1, e2), | ||
| 926 | + e2); | ||
| 927 | + cpu_x86_set_cpl(env, dpl); | ||
| 928 | + env->eip = offset; | ||
| 929 | + | ||
| 930 | + /* interrupt gate clear IF mask */ | ||
| 931 | + if ((type & 1) == 0) { | ||
| 932 | + env->eflags &= ~IF_MASK; | ||
| 933 | + } | ||
| 934 | + env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); | ||
| 935 | +} | ||
| 936 | + | ||
| 937 | +void helper_syscall(void) | ||
| 938 | +{ | ||
| 939 | + int selector; | ||
| 940 | + | ||
| 941 | + if (!(env->efer & MSR_EFER_SCE)) { | ||
| 942 | + raise_exception_err(EXCP06_ILLOP, 0); | ||
| 943 | + } | ||
| 944 | + selector = (env->star >> 32) & 0xffff; | ||
| 945 | + if (env->hflags & HF_LMA_MASK) { | ||
| 946 | + ECX = env->eip; | ||
| 947 | + env->regs[11] = compute_eflags(); | ||
| 948 | + | ||
| 949 | + cpu_x86_set_cpl(env, 0); | ||
| 950 | + cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, | ||
| 951 | + 0, 0xffffffff, | ||
| 952 | + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | ||
| 953 | + DESC_S_MASK | | ||
| 954 | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); | ||
| 955 | + cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, | ||
| 956 | + 0, 0xffffffff, | ||
| 957 | + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | ||
| 958 | + DESC_S_MASK | | ||
| 959 | + DESC_W_MASK | DESC_A_MASK); | ||
| 960 | + env->eflags &= ~env->fmask; | ||
| 961 | + if (env->hflags & HF_CS64_MASK) | ||
| 962 | + env->eip = env->lstar; | ||
| 963 | + else | ||
| 964 | + env->eip = env->cstar; | ||
| 965 | + } else { | ||
| 966 | + ECX = (uint32_t)env->eip; | ||
| 967 | + | ||
| 968 | + cpu_x86_set_cpl(env, 0); | ||
| 969 | + cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, | ||
| 970 | + 0, 0xffffffff, | ||
| 971 | + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | ||
| 972 | + DESC_S_MASK | | ||
| 973 | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); | ||
| 974 | + cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, | ||
| 975 | + 0, 0xffffffff, | ||
| 976 | + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | ||
| 977 | + DESC_S_MASK | | ||
| 978 | + DESC_W_MASK | DESC_A_MASK); | ||
| 979 | + env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK); | ||
| 980 | + env->eip = (uint32_t)env->star; | ||
| 981 | + } | ||
| 982 | +} | ||
| 983 | + | ||
| 984 | +void helper_sysret(int dflag) | ||
| 985 | +{ | ||
| 986 | + int cpl, selector; | ||
| 987 | + | ||
| 988 | + cpl = env->hflags & HF_CPL_MASK; | ||
| 989 | + if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { | ||
| 990 | + raise_exception_err(EXCP0D_GPF, 0); | ||
| 991 | + } | ||
| 992 | + selector = (env->star >> 48) & 0xffff; | ||
| 993 | + if (env->hflags & HF_LMA_MASK) { | ||
| 994 | + if (dflag == 2) { | ||
| 995 | + cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, | ||
| 996 | + 0, 0xffffffff, | ||
| 997 | + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | ||
| 998 | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | | ||
| 999 | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | | ||
| 1000 | + DESC_L_MASK); | ||
| 1001 | + env->eip = ECX; | ||
| 1002 | + } else { | ||
| 1003 | + cpu_x86_load_seg_cache(env, R_CS, selector | 3, | ||
| 1004 | + 0, 0xffffffff, | ||
| 1005 | + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | ||
| 1006 | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | | ||
| 1007 | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); | ||
| 1008 | + env->eip = (uint32_t)ECX; | ||
| 1009 | + } | ||
| 1010 | + cpu_x86_load_seg_cache(env, R_SS, selector + 8, | ||
| 1011 | + 0, 0xffffffff, | ||
| 1012 | + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | ||
| 1013 | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | | ||
| 1014 | + DESC_W_MASK | DESC_A_MASK); | ||
| 1015 | + load_eflags((uint32_t)(env->regs[11]), 0xffffffff); | ||
| 1016 | + cpu_x86_set_cpl(env, 3); | ||
| 1017 | + } else { | ||
| 1018 | + cpu_x86_load_seg_cache(env, R_CS, selector | 3, | ||
| 1019 | + 0, 0xffffffff, | ||
| 1020 | + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | ||
| 1021 | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | | ||
| 1022 | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); | ||
| 1023 | + env->eip = (uint32_t)ECX; | ||
| 1024 | + cpu_x86_load_seg_cache(env, R_SS, selector + 8, | ||
| 1025 | + 0, 0xffffffff, | ||
| 1026 | + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | ||
| 1027 | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | | ||
| 1028 | + DESC_W_MASK | DESC_A_MASK); | ||
| 1029 | + env->eflags |= IF_MASK; | ||
| 1030 | + cpu_x86_set_cpl(env, 3); | ||
| 1031 | + } | ||
| 1032 | +} | ||
| 1033 | +#endif | ||
| 1034 | + | ||
| 783 | /* real mode interrupt */ | 1035 | /* real mode interrupt */ |
| 784 | static void do_interrupt_real(int intno, int is_int, int error_code, | 1036 | static void do_interrupt_real(int intno, int is_int, int error_code, |
| 785 | unsigned int next_eip) | 1037 | unsigned int next_eip) |
| 786 | { | 1038 | { |
| 787 | SegmentCache *dt; | 1039 | SegmentCache *dt; |
| 788 | - uint8_t *ptr, *ssp; | 1040 | + target_ulong ptr, ssp; |
| 789 | int selector; | 1041 | int selector; |
| 790 | uint32_t offset, esp; | 1042 | uint32_t offset, esp; |
| 791 | uint32_t old_cs, old_eip; | 1043 | uint32_t old_cs, old_eip; |
| @@ -813,16 +1065,16 @@ static void do_interrupt_real(int intno, int is_int, int error_code, | @@ -813,16 +1065,16 @@ static void do_interrupt_real(int intno, int is_int, int error_code, | ||
| 813 | ESP = (ESP & ~0xffff) | (esp & 0xffff); | 1065 | ESP = (ESP & ~0xffff) | (esp & 0xffff); |
| 814 | env->eip = offset; | 1066 | env->eip = offset; |
| 815 | env->segs[R_CS].selector = selector; | 1067 | env->segs[R_CS].selector = selector; |
| 816 | - env->segs[R_CS].base = (uint8_t *)(selector << 4); | 1068 | + env->segs[R_CS].base = (selector << 4); |
| 817 | env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); | 1069 | env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); |
| 818 | } | 1070 | } |
| 819 | 1071 | ||
| 820 | /* fake user mode interrupt */ | 1072 | /* fake user mode interrupt */ |
| 821 | void do_interrupt_user(int intno, int is_int, int error_code, | 1073 | void do_interrupt_user(int intno, int is_int, int error_code, |
| 822 | - unsigned int next_eip) | 1074 | + target_ulong next_eip) |
| 823 | { | 1075 | { |
| 824 | SegmentCache *dt; | 1076 | SegmentCache *dt; |
| 825 | - uint8_t *ptr; | 1077 | + target_ulong ptr; |
| 826 | int dpl, cpl; | 1078 | int dpl, cpl; |
| 827 | uint32_t e2; | 1079 | uint32_t e2; |
| 828 | 1080 | ||
| @@ -849,26 +1101,26 @@ void do_interrupt_user(int intno, int is_int, int error_code, | @@ -849,26 +1101,26 @@ void do_interrupt_user(int intno, int is_int, int error_code, | ||
| 849 | * instruction. It is only relevant if is_int is TRUE. | 1101 | * instruction. It is only relevant if is_int is TRUE. |
| 850 | */ | 1102 | */ |
| 851 | void do_interrupt(int intno, int is_int, int error_code, | 1103 | void do_interrupt(int intno, int is_int, int error_code, |
| 852 | - unsigned int next_eip, int is_hw) | 1104 | + target_ulong next_eip, int is_hw) |
| 853 | { | 1105 | { |
| 854 | #ifdef DEBUG_PCALL | 1106 | #ifdef DEBUG_PCALL |
| 855 | if (loglevel & (CPU_LOG_PCALL | CPU_LOG_INT)) { | 1107 | if (loglevel & (CPU_LOG_PCALL | CPU_LOG_INT)) { |
| 856 | if ((env->cr[0] & CR0_PE_MASK)) { | 1108 | if ((env->cr[0] & CR0_PE_MASK)) { |
| 857 | static int count; | 1109 | static int count; |
| 858 | - fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:%08x pc=%08x SP=%04x:%08x", | 1110 | + fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, |
| 859 | count, intno, error_code, is_int, | 1111 | count, intno, error_code, is_int, |
| 860 | env->hflags & HF_CPL_MASK, | 1112 | env->hflags & HF_CPL_MASK, |
| 861 | env->segs[R_CS].selector, EIP, | 1113 | env->segs[R_CS].selector, EIP, |
| 862 | (int)env->segs[R_CS].base + EIP, | 1114 | (int)env->segs[R_CS].base + EIP, |
| 863 | env->segs[R_SS].selector, ESP); | 1115 | env->segs[R_SS].selector, ESP); |
| 864 | if (intno == 0x0e) { | 1116 | if (intno == 0x0e) { |
| 865 | - fprintf(logfile, " CR2=%08x", env->cr[2]); | 1117 | + fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]); |
| 866 | } else { | 1118 | } else { |
| 867 | - fprintf(logfile, " EAX=%08x", EAX); | 1119 | + fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX); |
| 868 | } | 1120 | } |
| 869 | fprintf(logfile, "\n"); | 1121 | fprintf(logfile, "\n"); |
| 870 | -#if 0 | ||
| 871 | cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); | 1122 | cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); |
| 1123 | +#if 0 | ||
| 872 | { | 1124 | { |
| 873 | int i; | 1125 | int i; |
| 874 | uint8_t *ptr; | 1126 | uint8_t *ptr; |
| @@ -885,7 +1137,14 @@ void do_interrupt(int intno, int is_int, int error_code, | @@ -885,7 +1137,14 @@ void do_interrupt(int intno, int is_int, int error_code, | ||
| 885 | } | 1137 | } |
| 886 | #endif | 1138 | #endif |
| 887 | if (env->cr[0] & CR0_PE_MASK) { | 1139 | if (env->cr[0] & CR0_PE_MASK) { |
| 888 | - do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw); | 1140 | +#if TARGET_X86_64 |
| 1141 | + if (env->hflags & HF_LMA_MASK) { | ||
| 1142 | + do_interrupt64(intno, is_int, error_code, next_eip, is_hw); | ||
| 1143 | + } else | ||
| 1144 | +#endif | ||
| 1145 | + { | ||
| 1146 | + do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw); | ||
| 1147 | + } | ||
| 889 | } else { | 1148 | } else { |
| 890 | do_interrupt_real(intno, is_int, error_code, next_eip); | 1149 | do_interrupt_real(intno, is_int, error_code, next_eip); |
| 891 | } | 1150 | } |
| @@ -932,20 +1191,20 @@ void raise_exception(int exception_index) | @@ -932,20 +1191,20 @@ void raise_exception(int exception_index) | ||
| 932 | #ifdef BUGGY_GCC_DIV64 | 1191 | #ifdef BUGGY_GCC_DIV64 |
| 933 | /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we | 1192 | /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we |
| 934 | call it from another function */ | 1193 | call it from another function */ |
| 935 | -uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den) | 1194 | +uint32_t div32(uint32_t *q_ptr, uint64_t num, uint32_t den) |
| 936 | { | 1195 | { |
| 937 | *q_ptr = num / den; | 1196 | *q_ptr = num / den; |
| 938 | return num % den; | 1197 | return num % den; |
| 939 | } | 1198 | } |
| 940 | 1199 | ||
| 941 | -int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den) | 1200 | +int32_t idiv32(int32_t *q_ptr, int64_t num, int32_t den) |
| 942 | { | 1201 | { |
| 943 | *q_ptr = num / den; | 1202 | *q_ptr = num / den; |
| 944 | return num % den; | 1203 | return num % den; |
| 945 | } | 1204 | } |
| 946 | #endif | 1205 | #endif |
| 947 | 1206 | ||
| 948 | -void helper_divl_EAX_T0(uint32_t eip) | 1207 | +void helper_divl_EAX_T0(void) |
| 949 | { | 1208 | { |
| 950 | unsigned int den, q, r; | 1209 | unsigned int den, q, r; |
| 951 | uint64_t num; | 1210 | uint64_t num; |
| @@ -953,20 +1212,19 @@ void helper_divl_EAX_T0(uint32_t eip) | @@ -953,20 +1212,19 @@ void helper_divl_EAX_T0(uint32_t eip) | ||
| 953 | num = EAX | ((uint64_t)EDX << 32); | 1212 | num = EAX | ((uint64_t)EDX << 32); |
| 954 | den = T0; | 1213 | den = T0; |
| 955 | if (den == 0) { | 1214 | if (den == 0) { |
| 956 | - EIP = eip; | ||
| 957 | raise_exception(EXCP00_DIVZ); | 1215 | raise_exception(EXCP00_DIVZ); |
| 958 | } | 1216 | } |
| 959 | #ifdef BUGGY_GCC_DIV64 | 1217 | #ifdef BUGGY_GCC_DIV64 |
| 960 | - r = div64(&q, num, den); | 1218 | + r = div32(&q, num, den); |
| 961 | #else | 1219 | #else |
| 962 | q = (num / den); | 1220 | q = (num / den); |
| 963 | r = (num % den); | 1221 | r = (num % den); |
| 964 | #endif | 1222 | #endif |
| 965 | - EAX = q; | ||
| 966 | - EDX = r; | 1223 | + EAX = (uint32_t)q; |
| 1224 | + EDX = (uint32_t)r; | ||
| 967 | } | 1225 | } |
| 968 | 1226 | ||
| 969 | -void helper_idivl_EAX_T0(uint32_t eip) | 1227 | +void helper_idivl_EAX_T0(void) |
| 970 | { | 1228 | { |
| 971 | int den, q, r; | 1229 | int den, q, r; |
| 972 | int64_t num; | 1230 | int64_t num; |
| @@ -974,17 +1232,16 @@ void helper_idivl_EAX_T0(uint32_t eip) | @@ -974,17 +1232,16 @@ void helper_idivl_EAX_T0(uint32_t eip) | ||
| 974 | num = EAX | ((uint64_t)EDX << 32); | 1232 | num = EAX | ((uint64_t)EDX << 32); |
| 975 | den = T0; | 1233 | den = T0; |
| 976 | if (den == 0) { | 1234 | if (den == 0) { |
| 977 | - EIP = eip; | ||
| 978 | raise_exception(EXCP00_DIVZ); | 1235 | raise_exception(EXCP00_DIVZ); |
| 979 | } | 1236 | } |
| 980 | #ifdef BUGGY_GCC_DIV64 | 1237 | #ifdef BUGGY_GCC_DIV64 |
| 981 | - r = idiv64(&q, num, den); | 1238 | + r = idiv32(&q, num, den); |
| 982 | #else | 1239 | #else |
| 983 | q = (num / den); | 1240 | q = (num / den); |
| 984 | r = (num % den); | 1241 | r = (num % den); |
| 985 | #endif | 1242 | #endif |
| 986 | - EAX = q; | ||
| 987 | - EDX = r; | 1243 | + EAX = (uint32_t)q; |
| 1244 | + EDX = (uint32_t)r; | ||
| 988 | } | 1245 | } |
| 989 | 1246 | ||
| 990 | void helper_cmpxchg8b(void) | 1247 | void helper_cmpxchg8b(void) |
| @@ -993,9 +1250,9 @@ void helper_cmpxchg8b(void) | @@ -993,9 +1250,9 @@ void helper_cmpxchg8b(void) | ||
| 993 | int eflags; | 1250 | int eflags; |
| 994 | 1251 | ||
| 995 | eflags = cc_table[CC_OP].compute_all(); | 1252 | eflags = cc_table[CC_OP].compute_all(); |
| 996 | - d = ldq((uint8_t *)A0); | 1253 | + d = ldq(A0); |
| 997 | if (d == (((uint64_t)EDX << 32) | EAX)) { | 1254 | if (d == (((uint64_t)EDX << 32) | EAX)) { |
| 998 | - stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX); | 1255 | + stq(A0, ((uint64_t)ECX << 32) | EBX); |
| 999 | eflags |= CC_Z; | 1256 | eflags |= CC_Z; |
| 1000 | } else { | 1257 | } else { |
| 1001 | EDX = d >> 32; | 1258 | EDX = d >> 32; |
| @@ -1005,58 +1262,20 @@ void helper_cmpxchg8b(void) | @@ -1005,58 +1262,20 @@ void helper_cmpxchg8b(void) | ||
| 1005 | CC_SRC = eflags; | 1262 | CC_SRC = eflags; |
| 1006 | } | 1263 | } |
| 1007 | 1264 | ||
| 1008 | -#define CPUID_FP87 (1 << 0) | ||
| 1009 | -#define CPUID_VME (1 << 1) | ||
| 1010 | -#define CPUID_DE (1 << 2) | ||
| 1011 | -#define CPUID_PSE (1 << 3) | ||
| 1012 | -#define CPUID_TSC (1 << 4) | ||
| 1013 | -#define CPUID_MSR (1 << 5) | ||
| 1014 | -#define CPUID_PAE (1 << 6) | ||
| 1015 | -#define CPUID_MCE (1 << 7) | ||
| 1016 | -#define CPUID_CX8 (1 << 8) | ||
| 1017 | -#define CPUID_APIC (1 << 9) | ||
| 1018 | -#define CPUID_SEP (1 << 11) /* sysenter/sysexit */ | ||
| 1019 | -#define CPUID_MTRR (1 << 12) | ||
| 1020 | -#define CPUID_PGE (1 << 13) | ||
| 1021 | -#define CPUID_MCA (1 << 14) | ||
| 1022 | -#define CPUID_CMOV (1 << 15) | ||
| 1023 | -/* ... */ | ||
| 1024 | -#define CPUID_MMX (1 << 23) | ||
| 1025 | -#define CPUID_FXSR (1 << 24) | ||
| 1026 | -#define CPUID_SSE (1 << 25) | ||
| 1027 | -#define CPUID_SSE2 (1 << 26) | ||
| 1028 | - | ||
| 1029 | void helper_cpuid(void) | 1265 | void helper_cpuid(void) |
| 1030 | { | 1266 | { |
| 1031 | - switch(EAX) { | 1267 | + switch((uint32_t)EAX) { |
| 1032 | case 0: | 1268 | case 0: |
| 1033 | EAX = 2; /* max EAX index supported */ | 1269 | EAX = 2; /* max EAX index supported */ |
| 1034 | - EBX = 0x756e6547; | ||
| 1035 | - ECX = 0x6c65746e; | ||
| 1036 | - EDX = 0x49656e69; | 1270 | + EBX = env->cpuid_vendor1; |
| 1271 | + EDX = env->cpuid_vendor2; | ||
| 1272 | + ECX = env->cpuid_vendor3; | ||
| 1037 | break; | 1273 | break; |
| 1038 | case 1: | 1274 | case 1: |
| 1039 | - { | ||
| 1040 | - int family, model, stepping; | ||
| 1041 | - /* EAX = 1 info */ | ||
| 1042 | -#if 0 | ||
| 1043 | - /* pentium 75-200 */ | ||
| 1044 | - family = 5; | ||
| 1045 | - model = 2; | ||
| 1046 | - stepping = 11; | ||
| 1047 | -#else | ||
| 1048 | - /* pentium pro */ | ||
| 1049 | - family = 6; | ||
| 1050 | - model = 1; | ||
| 1051 | - stepping = 3; | ||
| 1052 | -#endif | ||
| 1053 | - EAX = (family << 8) | (model << 4) | stepping; | ||
| 1054 | - EBX = 0; | ||
| 1055 | - ECX = 0; | ||
| 1056 | - EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE | | ||
| 1057 | - CPUID_TSC | CPUID_MSR | CPUID_MCE | | ||
| 1058 | - CPUID_CX8 | CPUID_PGE | CPUID_CMOV; | ||
| 1059 | - } | 1275 | + EAX = env->cpuid_version; |
| 1276 | + EBX = 0; | ||
| 1277 | + ECX = 0; | ||
| 1278 | + EDX = env->cpuid_features; | ||
| 1060 | break; | 1279 | break; |
| 1061 | default: | 1280 | default: |
| 1062 | /* cache info: needed for Pentium Pro compatibility */ | 1281 | /* cache info: needed for Pentium Pro compatibility */ |
| @@ -1065,12 +1284,34 @@ void helper_cpuid(void) | @@ -1065,12 +1284,34 @@ void helper_cpuid(void) | ||
| 1065 | ECX = 0; | 1284 | ECX = 0; |
| 1066 | EDX = 0; | 1285 | EDX = 0; |
| 1067 | break; | 1286 | break; |
| 1287 | +#ifdef TARGET_X86_64 | ||
| 1288 | + case 0x80000000: | ||
| 1289 | + EAX = 0x80000008; | ||
| 1290 | + EBX = env->cpuid_vendor1; | ||
| 1291 | + EDX = env->cpuid_vendor2; | ||
| 1292 | + ECX = env->cpuid_vendor3; | ||
| 1293 | + break; | ||
| 1294 | + case 0x80000001: | ||
| 1295 | + EAX = env->cpuid_features; | ||
| 1296 | + EBX = 0; | ||
| 1297 | + ECX = 0; | ||
| 1298 | + /* long mode + syscall/sysret features */ | ||
| 1299 | + EDX = (env->cpuid_features & 0x0183F3FF) | (1 << 29) | (1 << 11); | ||
| 1300 | + break; | ||
| 1301 | + case 0x80000008: | ||
| 1302 | + /* virtual & phys address size in low 2 bytes. */ | ||
| 1303 | + EAX = 0x00003028; | ||
| 1304 | + EBX = 0; | ||
| 1305 | + ECX = 0; | ||
| 1306 | + EDX = 0; | ||
| 1307 | + break; | ||
| 1308 | +#endif | ||
| 1068 | } | 1309 | } |
| 1069 | } | 1310 | } |
| 1070 | 1311 | ||
| 1071 | void helper_enter_level(int level, int data32) | 1312 | void helper_enter_level(int level, int data32) |
| 1072 | { | 1313 | { |
| 1073 | - uint8_t *ssp; | 1314 | + target_ulong ssp; |
| 1074 | uint32_t esp_mask, esp, ebp; | 1315 | uint32_t esp_mask, esp, ebp; |
| 1075 | 1316 | ||
| 1076 | esp_mask = get_sp_mask(env->segs[R_SS].flags); | 1317 | esp_mask = get_sp_mask(env->segs[R_SS].flags); |
| @@ -1105,20 +1346,26 @@ void helper_lldt_T0(void) | @@ -1105,20 +1346,26 @@ void helper_lldt_T0(void) | ||
| 1105 | int selector; | 1346 | int selector; |
| 1106 | SegmentCache *dt; | 1347 | SegmentCache *dt; |
| 1107 | uint32_t e1, e2; | 1348 | uint32_t e1, e2; |
| 1108 | - int index; | ||
| 1109 | - uint8_t *ptr; | 1349 | + int index, entry_limit; |
| 1350 | + target_ulong ptr; | ||
| 1110 | 1351 | ||
| 1111 | selector = T0 & 0xffff; | 1352 | selector = T0 & 0xffff; |
| 1112 | if ((selector & 0xfffc) == 0) { | 1353 | if ((selector & 0xfffc) == 0) { |
| 1113 | /* XXX: NULL selector case: invalid LDT */ | 1354 | /* XXX: NULL selector case: invalid LDT */ |
| 1114 | - env->ldt.base = NULL; | 1355 | + env->ldt.base = 0; |
| 1115 | env->ldt.limit = 0; | 1356 | env->ldt.limit = 0; |
| 1116 | } else { | 1357 | } else { |
| 1117 | if (selector & 0x4) | 1358 | if (selector & 0x4) |
| 1118 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | 1359 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1119 | dt = &env->gdt; | 1360 | dt = &env->gdt; |
| 1120 | index = selector & ~7; | 1361 | index = selector & ~7; |
| 1121 | - if ((index + 7) > dt->limit) | 1362 | +#ifdef TARGET_X86_64 |
| 1363 | + if (env->hflags & HF_LMA_MASK) | ||
| 1364 | + entry_limit = 15; | ||
| 1365 | + else | ||
| 1366 | +#endif | ||
| 1367 | + entry_limit = 7; | ||
| 1368 | + if ((index + entry_limit) > dt->limit) | ||
| 1122 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | 1369 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1123 | ptr = dt->base + index; | 1370 | ptr = dt->base + index; |
| 1124 | e1 = ldl_kernel(ptr); | 1371 | e1 = ldl_kernel(ptr); |
| @@ -1127,7 +1374,17 @@ void helper_lldt_T0(void) | @@ -1127,7 +1374,17 @@ void helper_lldt_T0(void) | ||
| 1127 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | 1374 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1128 | if (!(e2 & DESC_P_MASK)) | 1375 | if (!(e2 & DESC_P_MASK)) |
| 1129 | raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); | 1376 | raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
| 1130 | - load_seg_cache_raw_dt(&env->ldt, e1, e2); | 1377 | +#ifdef TARGET_X86_64 |
| 1378 | + if (env->hflags & HF_LMA_MASK) { | ||
| 1379 | + uint32_t e3; | ||
| 1380 | + e3 = ldl_kernel(ptr + 8); | ||
| 1381 | + load_seg_cache_raw_dt(&env->ldt, e1, e2); | ||
| 1382 | + env->ldt.base |= (target_ulong)e3 << 32; | ||
| 1383 | + } else | ||
| 1384 | +#endif | ||
| 1385 | + { | ||
| 1386 | + load_seg_cache_raw_dt(&env->ldt, e1, e2); | ||
| 1387 | + } | ||
| 1131 | } | 1388 | } |
| 1132 | env->ldt.selector = selector; | 1389 | env->ldt.selector = selector; |
| 1133 | } | 1390 | } |
| @@ -1137,13 +1394,13 @@ void helper_ltr_T0(void) | @@ -1137,13 +1394,13 @@ void helper_ltr_T0(void) | ||
| 1137 | int selector; | 1394 | int selector; |
| 1138 | SegmentCache *dt; | 1395 | SegmentCache *dt; |
| 1139 | uint32_t e1, e2; | 1396 | uint32_t e1, e2; |
| 1140 | - int index, type; | ||
| 1141 | - uint8_t *ptr; | 1397 | + int index, type, entry_limit; |
| 1398 | + target_ulong ptr; | ||
| 1142 | 1399 | ||
| 1143 | selector = T0 & 0xffff; | 1400 | selector = T0 & 0xffff; |
| 1144 | if ((selector & 0xfffc) == 0) { | 1401 | if ((selector & 0xfffc) == 0) { |
| 1145 | - /* NULL selector case: invalid LDT */ | ||
| 1146 | - env->tr.base = NULL; | 1402 | + /* NULL selector case: invalid TR */ |
| 1403 | + env->tr.base = 0; | ||
| 1147 | env->tr.limit = 0; | 1404 | env->tr.limit = 0; |
| 1148 | env->tr.flags = 0; | 1405 | env->tr.flags = 0; |
| 1149 | } else { | 1406 | } else { |
| @@ -1151,7 +1408,13 @@ void helper_ltr_T0(void) | @@ -1151,7 +1408,13 @@ void helper_ltr_T0(void) | ||
| 1151 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | 1408 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1152 | dt = &env->gdt; | 1409 | dt = &env->gdt; |
| 1153 | index = selector & ~7; | 1410 | index = selector & ~7; |
| 1154 | - if ((index + 7) > dt->limit) | 1411 | +#ifdef TARGET_X86_64 |
| 1412 | + if (env->hflags & HF_LMA_MASK) | ||
| 1413 | + entry_limit = 15; | ||
| 1414 | + else | ||
| 1415 | +#endif | ||
| 1416 | + entry_limit = 7; | ||
| 1417 | + if ((index + entry_limit) > dt->limit) | ||
| 1155 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | 1418 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1156 | ptr = dt->base + index; | 1419 | ptr = dt->base + index; |
| 1157 | e1 = ldl_kernel(ptr); | 1420 | e1 = ldl_kernel(ptr); |
| @@ -1162,7 +1425,17 @@ void helper_ltr_T0(void) | @@ -1162,7 +1425,17 @@ void helper_ltr_T0(void) | ||
| 1162 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | 1425 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1163 | if (!(e2 & DESC_P_MASK)) | 1426 | if (!(e2 & DESC_P_MASK)) |
| 1164 | raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); | 1427 | raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
| 1165 | - load_seg_cache_raw_dt(&env->tr, e1, e2); | 1428 | +#ifdef TARGET_X86_64 |
| 1429 | + if (env->hflags & HF_LMA_MASK) { | ||
| 1430 | + uint32_t e3; | ||
| 1431 | + e3 = ldl_kernel(ptr + 8); | ||
| 1432 | + load_seg_cache_raw_dt(&env->tr, e1, e2); | ||
| 1433 | + env->tr.base |= (target_ulong)e3 << 32; | ||
| 1434 | + } else | ||
| 1435 | +#endif | ||
| 1436 | + { | ||
| 1437 | + load_seg_cache_raw_dt(&env->tr, e1, e2); | ||
| 1438 | + } | ||
| 1166 | e2 |= DESC_TSS_BUSY_MASK; | 1439 | e2 |= DESC_TSS_BUSY_MASK; |
| 1167 | stl_kernel(ptr + 4, e2); | 1440 | stl_kernel(ptr + 4, e2); |
| 1168 | } | 1441 | } |
| @@ -1176,14 +1449,14 @@ void load_seg(int seg_reg, int selector) | @@ -1176,14 +1449,14 @@ void load_seg(int seg_reg, int selector) | ||
| 1176 | int cpl, dpl, rpl; | 1449 | int cpl, dpl, rpl; |
| 1177 | SegmentCache *dt; | 1450 | SegmentCache *dt; |
| 1178 | int index; | 1451 | int index; |
| 1179 | - uint8_t *ptr; | 1452 | + target_ulong ptr; |
| 1180 | 1453 | ||
| 1181 | selector &= 0xffff; | 1454 | selector &= 0xffff; |
| 1182 | if ((selector & 0xfffc) == 0) { | 1455 | if ((selector & 0xfffc) == 0) { |
| 1183 | /* null selector case */ | 1456 | /* null selector case */ |
| 1184 | if (seg_reg == R_SS) | 1457 | if (seg_reg == R_SS) |
| 1185 | raise_exception_err(EXCP0D_GPF, 0); | 1458 | raise_exception_err(EXCP0D_GPF, 0); |
| 1186 | - cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0); | 1459 | + cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); |
| 1187 | } else { | 1460 | } else { |
| 1188 | 1461 | ||
| 1189 | if (selector & 0x4) | 1462 | if (selector & 0x4) |
| @@ -1196,7 +1469,7 @@ void load_seg(int seg_reg, int selector) | @@ -1196,7 +1469,7 @@ void load_seg(int seg_reg, int selector) | ||
| 1196 | ptr = dt->base + index; | 1469 | ptr = dt->base + index; |
| 1197 | e1 = ldl_kernel(ptr); | 1470 | e1 = ldl_kernel(ptr); |
| 1198 | e2 = ldl_kernel(ptr + 4); | 1471 | e2 = ldl_kernel(ptr + 4); |
| 1199 | - | 1472 | + |
| 1200 | if (!(e2 & DESC_S_MASK)) | 1473 | if (!(e2 & DESC_S_MASK)) |
| 1201 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); | 1474 | raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
| 1202 | rpl = selector & 3; | 1475 | rpl = selector & 3; |
| @@ -1247,9 +1520,10 @@ void load_seg(int seg_reg, int selector) | @@ -1247,9 +1520,10 @@ void load_seg(int seg_reg, int selector) | ||
| 1247 | /* protected mode jump */ | 1520 | /* protected mode jump */ |
| 1248 | void helper_ljmp_protected_T0_T1(int next_eip) | 1521 | void helper_ljmp_protected_T0_T1(int next_eip) |
| 1249 | { | 1522 | { |
| 1250 | - int new_cs, new_eip, gate_cs, type; | 1523 | + int new_cs, gate_cs, type; |
| 1251 | uint32_t e1, e2, cpl, dpl, rpl, limit; | 1524 | uint32_t e1, e2, cpl, dpl, rpl, limit; |
| 1252 | - | 1525 | + target_ulong new_eip; |
| 1526 | + | ||
| 1253 | new_cs = T0; | 1527 | new_cs = T0; |
| 1254 | new_eip = T1; | 1528 | new_eip = T1; |
| 1255 | if ((new_cs & 0xfffc) == 0) | 1529 | if ((new_cs & 0xfffc) == 0) |
| @@ -1312,7 +1586,7 @@ void helper_ljmp_protected_T0_T1(int next_eip) | @@ -1312,7 +1586,7 @@ void helper_ljmp_protected_T0_T1(int next_eip) | ||
| 1312 | if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != | 1586 | if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != |
| 1313 | (DESC_S_MASK | DESC_CS_MASK))) | 1587 | (DESC_S_MASK | DESC_CS_MASK))) |
| 1314 | raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); | 1588 | raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); |
| 1315 | - if (((e2 & DESC_C_MASK) && (dpl > cpl)) || | 1589 | + if (((e2 & DESC_C_MASK) && (dpl > cpl)) || |
| 1316 | (!(e2 & DESC_C_MASK) && (dpl != cpl))) | 1590 | (!(e2 & DESC_C_MASK) && (dpl != cpl))) |
| 1317 | raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); | 1591 | raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); |
| 1318 | if (!(e2 & DESC_P_MASK)) | 1592 | if (!(e2 & DESC_P_MASK)) |
| @@ -1336,7 +1610,7 @@ void helper_lcall_real_T0_T1(int shift, int next_eip) | @@ -1336,7 +1610,7 @@ void helper_lcall_real_T0_T1(int shift, int next_eip) | ||
| 1336 | { | 1610 | { |
| 1337 | int new_cs, new_eip; | 1611 | int new_cs, new_eip; |
| 1338 | uint32_t esp, esp_mask; | 1612 | uint32_t esp, esp_mask; |
| 1339 | - uint8_t *ssp; | 1613 | + target_ulong ssp; |
| 1340 | 1614 | ||
| 1341 | new_cs = T0; | 1615 | new_cs = T0; |
| 1342 | new_eip = T1; | 1616 | new_eip = T1; |
| @@ -1354,7 +1628,7 @@ void helper_lcall_real_T0_T1(int shift, int next_eip) | @@ -1354,7 +1628,7 @@ void helper_lcall_real_T0_T1(int shift, int next_eip) | ||
| 1354 | ESP = (ESP & ~esp_mask) | (esp & esp_mask); | 1628 | ESP = (ESP & ~esp_mask) | (esp & esp_mask); |
| 1355 | env->eip = new_eip; | 1629 | env->eip = new_eip; |
| 1356 | env->segs[R_CS].selector = new_cs; | 1630 | env->segs[R_CS].selector = new_cs; |
| 1357 | - env->segs[R_CS].base = (uint8_t *)(new_cs << 4); | 1631 | + env->segs[R_CS].base = (new_cs << 4); |
| 1358 | } | 1632 | } |
| 1359 | 1633 | ||
| 1360 | /* protected mode call */ | 1634 | /* protected mode call */ |
| @@ -1364,7 +1638,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) | @@ -1364,7 +1638,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) | ||
| 1364 | uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; | 1638 | uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; |
| 1365 | uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask; | 1639 | uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask; |
| 1366 | uint32_t val, limit, old_sp_mask; | 1640 | uint32_t val, limit, old_sp_mask; |
| 1367 | - uint8_t *ssp, *old_ssp; | 1641 | + target_ulong ssp, old_ssp; |
| 1368 | 1642 | ||
| 1369 | new_cs = T0; | 1643 | new_cs = T0; |
| 1370 | new_eip = T1; | 1644 | new_eip = T1; |
| @@ -1471,7 +1745,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) | @@ -1471,7 +1745,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) | ||
| 1471 | get_ss_esp_from_tss(&ss, &sp, dpl); | 1745 | get_ss_esp_from_tss(&ss, &sp, dpl); |
| 1472 | #ifdef DEBUG_PCALL | 1746 | #ifdef DEBUG_PCALL |
| 1473 | if (loglevel & CPU_LOG_PCALL) | 1747 | if (loglevel & CPU_LOG_PCALL) |
| 1474 | - fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=%x\n", | 1748 | + fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", |
| 1475 | ss, sp, param_count, ESP); | 1749 | ss, sp, param_count, ESP); |
| 1476 | #endif | 1750 | #endif |
| 1477 | if ((ss & 0xfffc) == 0) | 1751 | if ((ss & 0xfffc) == 0) |
| @@ -1555,7 +1829,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) | @@ -1555,7 +1829,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) | ||
| 1555 | void helper_iret_real(int shift) | 1829 | void helper_iret_real(int shift) |
| 1556 | { | 1830 | { |
| 1557 | uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; | 1831 | uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; |
| 1558 | - uint8_t *ssp; | 1832 | + target_ulong ssp; |
| 1559 | int eflags_mask; | 1833 | int eflags_mask; |
| 1560 | 1834 | ||
| 1561 | sp_mask = 0xffff; /* XXXX: use SS segment size ? */ | 1835 | sp_mask = 0xffff; /* XXXX: use SS segment size ? */ |
| @@ -1595,7 +1869,7 @@ static inline void validate_seg(int seg_reg, int cpl) | @@ -1595,7 +1869,7 @@ static inline void validate_seg(int seg_reg, int cpl) | ||
| 1595 | if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { | 1869 | if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { |
| 1596 | /* data or non conforming code segment */ | 1870 | /* data or non conforming code segment */ |
| 1597 | if (dpl < cpl) { | 1871 | if (dpl < cpl) { |
| 1598 | - cpu_x86_load_seg_cache(env, seg_reg, 0, NULL, 0, 0); | 1872 | + cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0); |
| 1599 | } | 1873 | } |
| 1600 | } | 1874 | } |
| 1601 | } | 1875 | } |
| @@ -1603,16 +1877,31 @@ static inline void validate_seg(int seg_reg, int cpl) | @@ -1603,16 +1877,31 @@ static inline void validate_seg(int seg_reg, int cpl) | ||
| 1603 | /* protected mode iret */ | 1877 | /* protected mode iret */ |
| 1604 | static inline void helper_ret_protected(int shift, int is_iret, int addend) | 1878 | static inline void helper_ret_protected(int shift, int is_iret, int addend) |
| 1605 | { | 1879 | { |
| 1606 | - uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss, sp_mask; | 1880 | + uint32_t new_cs, new_eflags, new_ss; |
| 1607 | uint32_t new_es, new_ds, new_fs, new_gs; | 1881 | uint32_t new_es, new_ds, new_fs, new_gs; |
| 1608 | uint32_t e1, e2, ss_e1, ss_e2; | 1882 | uint32_t e1, e2, ss_e1, ss_e2; |
| 1609 | int cpl, dpl, rpl, eflags_mask, iopl; | 1883 | int cpl, dpl, rpl, eflags_mask, iopl; |
| 1610 | - uint8_t *ssp; | 1884 | + target_ulong ssp, sp, new_eip, new_esp, sp_mask; |
| 1611 | 1885 | ||
| 1612 | - sp_mask = get_sp_mask(env->segs[R_SS].flags); | 1886 | +#ifdef TARGET_X86_64 |
| 1887 | + if (shift == 2) | ||
| 1888 | + sp_mask = -1; | ||
| 1889 | + else | ||
| 1890 | +#endif | ||
| 1891 | + sp_mask = get_sp_mask(env->segs[R_SS].flags); | ||
| 1613 | sp = ESP; | 1892 | sp = ESP; |
| 1614 | ssp = env->segs[R_SS].base; | 1893 | ssp = env->segs[R_SS].base; |
| 1615 | new_eflags = 0; /* avoid warning */ | 1894 | new_eflags = 0; /* avoid warning */ |
| 1895 | +#ifdef TARGET_X86_64 | ||
| 1896 | + if (shift == 2) { | ||
| 1897 | + POPQ(sp, new_eip); | ||
| 1898 | + POPQ(sp, new_cs); | ||
| 1899 | + new_cs &= 0xffff; | ||
| 1900 | + if (is_iret) { | ||
| 1901 | + POPQ(sp, new_eflags); | ||
| 1902 | + } | ||
| 1903 | + } else | ||
| 1904 | +#endif | ||
| 1616 | if (shift == 1) { | 1905 | if (shift == 1) { |
| 1617 | /* 32 bits */ | 1906 | /* 32 bits */ |
| 1618 | POPL(ssp, sp, sp_mask, new_eip); | 1907 | POPL(ssp, sp, sp_mask, new_eip); |
| @@ -1632,7 +1921,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) | @@ -1632,7 +1921,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) | ||
| 1632 | } | 1921 | } |
| 1633 | #ifdef DEBUG_PCALL | 1922 | #ifdef DEBUG_PCALL |
| 1634 | if (loglevel & CPU_LOG_PCALL) { | 1923 | if (loglevel & CPU_LOG_PCALL) { |
| 1635 | - fprintf(logfile, "lret new %04x:%08x s=%d addend=0x%x\n", | 1924 | + fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", |
| 1636 | new_cs, new_eip, shift, addend); | 1925 | new_cs, new_eip, shift, addend); |
| 1637 | cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); | 1926 | cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); |
| 1638 | } | 1927 | } |
| @@ -1660,7 +1949,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) | @@ -1660,7 +1949,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) | ||
| 1660 | raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); | 1949 | raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); |
| 1661 | 1950 | ||
| 1662 | sp += addend; | 1951 | sp += addend; |
| 1663 | - if (rpl == cpl) { | 1952 | + if (rpl == cpl && !(env->hflags & HF_CS64_MASK)) { |
| 1664 | /* return to same priledge level */ | 1953 | /* return to same priledge level */ |
| 1665 | cpu_x86_load_seg_cache(env, R_CS, new_cs, | 1954 | cpu_x86_load_seg_cache(env, R_CS, new_cs, |
| 1666 | get_seg_base(e1, e2), | 1955 | get_seg_base(e1, e2), |
| @@ -1668,6 +1957,13 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) | @@ -1668,6 +1957,13 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) | ||
| 1668 | e2); | 1957 | e2); |
| 1669 | } else { | 1958 | } else { |
| 1670 | /* return to different priviledge level */ | 1959 | /* return to different priviledge level */ |
| 1960 | +#ifdef TARGET_X86_64 | ||
| 1961 | + if (shift == 2) { | ||
| 1962 | + POPQ(sp, new_esp); | ||
| 1963 | + POPQ(sp, new_ss); | ||
| 1964 | + new_ss &= 0xffff; | ||
| 1965 | + } else | ||
| 1966 | +#endif | ||
| 1671 | if (shift == 1) { | 1967 | if (shift == 1) { |
| 1672 | /* 32 bits */ | 1968 | /* 32 bits */ |
| 1673 | POPL(ssp, sp, sp_mask, new_esp); | 1969 | POPL(ssp, sp, sp_mask, new_esp); |
| @@ -1680,36 +1976,49 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) | @@ -1680,36 +1976,49 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) | ||
| 1680 | } | 1976 | } |
| 1681 | #ifdef DEBUG_PCALL | 1977 | #ifdef DEBUG_PCALL |
| 1682 | if (loglevel & CPU_LOG_PCALL) { | 1978 | if (loglevel & CPU_LOG_PCALL) { |
| 1683 | - fprintf(logfile, "new ss:esp=%04x:%08x\n", | 1979 | + fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n", |
| 1684 | new_ss, new_esp); | 1980 | new_ss, new_esp); |
| 1685 | } | 1981 | } |
| 1686 | #endif | 1982 | #endif |
| 1687 | - | ||
| 1688 | - if ((new_ss & 3) != rpl) | ||
| 1689 | - raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); | ||
| 1690 | - if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) | ||
| 1691 | - raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); | ||
| 1692 | - if (!(ss_e2 & DESC_S_MASK) || | ||
| 1693 | - (ss_e2 & DESC_CS_MASK) || | ||
| 1694 | - !(ss_e2 & DESC_W_MASK)) | ||
| 1695 | - raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); | ||
| 1696 | - dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; | ||
| 1697 | - if (dpl != rpl) | ||
| 1698 | - raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); | ||
| 1699 | - if (!(ss_e2 & DESC_P_MASK)) | ||
| 1700 | - raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc); | 1983 | + if ((env->hflags & HF_LMA_MASK) && (new_ss & 0xfffc) == 0) { |
| 1984 | + /* NULL ss is allowed in long mode */ | ||
| 1985 | + cpu_x86_load_seg_cache(env, R_SS, new_ss, | ||
| 1986 | + 0, 0xffffffff, | ||
| 1987 | + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | ||
| 1988 | + DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | | ||
| 1989 | + DESC_W_MASK | DESC_A_MASK); | ||
| 1990 | + } else { | ||
| 1991 | + if ((new_ss & 3) != rpl) | ||
| 1992 | + raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); | ||
| 1993 | + if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) | ||
| 1994 | + raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); | ||
| 1995 | + if (!(ss_e2 & DESC_S_MASK) || | ||
| 1996 | + (ss_e2 & DESC_CS_MASK) || | ||
| 1997 | + !(ss_e2 & DESC_W_MASK)) | ||
| 1998 | + raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); | ||
| 1999 | + dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; | ||
| 2000 | + if (dpl != rpl) | ||
| 2001 | + raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); | ||
| 2002 | + if (!(ss_e2 & DESC_P_MASK)) | ||
| 2003 | + raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc); | ||
| 2004 | + cpu_x86_load_seg_cache(env, R_SS, new_ss, | ||
| 2005 | + get_seg_base(ss_e1, ss_e2), | ||
| 2006 | + get_seg_limit(ss_e1, ss_e2), | ||
| 2007 | + ss_e2); | ||
| 2008 | + } | ||
| 1701 | 2009 | ||
| 1702 | cpu_x86_load_seg_cache(env, R_CS, new_cs, | 2010 | cpu_x86_load_seg_cache(env, R_CS, new_cs, |
| 1703 | get_seg_base(e1, e2), | 2011 | get_seg_base(e1, e2), |
| 1704 | get_seg_limit(e1, e2), | 2012 | get_seg_limit(e1, e2), |
| 1705 | e2); | 2013 | e2); |
| 1706 | - cpu_x86_load_seg_cache(env, R_SS, new_ss, | ||
| 1707 | - get_seg_base(ss_e1, ss_e2), | ||
| 1708 | - get_seg_limit(ss_e1, ss_e2), | ||
| 1709 | - ss_e2); | ||
| 1710 | cpu_x86_set_cpl(env, rpl); | 2014 | cpu_x86_set_cpl(env, rpl); |
| 1711 | sp = new_esp; | 2015 | sp = new_esp; |
| 1712 | - sp_mask = get_sp_mask(ss_e2); | 2016 | +#ifdef TARGET_X86_64 |
| 2017 | + if (shift == 2) | ||
| 2018 | + sp_mask = -1; | ||
| 2019 | + else | ||
| 2020 | +#endif | ||
| 2021 | + sp_mask = get_sp_mask(ss_e2); | ||
| 1713 | 2022 | ||
| 1714 | /* validate data segments */ | 2023 | /* validate data segments */ |
| 1715 | validate_seg(R_ES, cpl); | 2024 | validate_seg(R_ES, cpl); |
| @@ -1765,6 +2074,10 @@ void helper_iret_protected(int shift, int next_eip) | @@ -1765,6 +2074,10 @@ void helper_iret_protected(int shift, int next_eip) | ||
| 1765 | 2074 | ||
| 1766 | /* specific case for TSS */ | 2075 | /* specific case for TSS */ |
| 1767 | if (env->eflags & NT_MASK) { | 2076 | if (env->eflags & NT_MASK) { |
| 2077 | +#ifdef TARGET_X86_64 | ||
| 2078 | + if (env->hflags & HF_LMA_MASK) | ||
| 2079 | + raise_exception_err(EXCP0D_GPF, 0); | ||
| 2080 | +#endif | ||
| 1768 | tss_selector = lduw_kernel(env->tr.base + 0); | 2081 | tss_selector = lduw_kernel(env->tr.base + 0); |
| 1769 | if (tss_selector & 4) | 2082 | if (tss_selector & 4) |
| 1770 | raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); | 2083 | raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); |
| @@ -1793,12 +2106,12 @@ void helper_sysenter(void) | @@ -1793,12 +2106,12 @@ void helper_sysenter(void) | ||
| 1793 | env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); | 2106 | env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); |
| 1794 | cpu_x86_set_cpl(env, 0); | 2107 | cpu_x86_set_cpl(env, 0); |
| 1795 | cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, | 2108 | cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, |
| 1796 | - NULL, 0xffffffff, | 2109 | + 0, 0xffffffff, |
| 1797 | DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | 2110 | DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | |
| 1798 | DESC_S_MASK | | 2111 | DESC_S_MASK | |
| 1799 | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); | 2112 | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); |
| 1800 | cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, | 2113 | cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, |
| 1801 | - NULL, 0xffffffff, | 2114 | + 0, 0xffffffff, |
| 1802 | DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | 2115 | DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | |
| 1803 | DESC_S_MASK | | 2116 | DESC_S_MASK | |
| 1804 | DESC_W_MASK | DESC_A_MASK); | 2117 | DESC_W_MASK | DESC_A_MASK); |
| @@ -1816,12 +2129,12 @@ void helper_sysexit(void) | @@ -1816,12 +2129,12 @@ void helper_sysexit(void) | ||
| 1816 | } | 2129 | } |
| 1817 | cpu_x86_set_cpl(env, 3); | 2130 | cpu_x86_set_cpl(env, 3); |
| 1818 | cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, | 2131 | cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, |
| 1819 | - NULL, 0xffffffff, | 2132 | + 0, 0xffffffff, |
| 1820 | DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | 2133 | DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | |
| 1821 | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | | 2134 | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | |
| 1822 | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); | 2135 | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); |
| 1823 | cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, | 2136 | cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, |
| 1824 | - NULL, 0xffffffff, | 2137 | + 0, 0xffffffff, |
| 1825 | DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | | 2138 | DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | |
| 1826 | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | | 2139 | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | |
| 1827 | DESC_W_MASK | DESC_A_MASK); | 2140 | DESC_W_MASK | DESC_A_MASK); |
| @@ -1863,22 +2176,67 @@ void helper_rdtsc(void) | @@ -1863,22 +2176,67 @@ void helper_rdtsc(void) | ||
| 1863 | uint64_t val; | 2176 | uint64_t val; |
| 1864 | 2177 | ||
| 1865 | val = cpu_get_tsc(env); | 2178 | val = cpu_get_tsc(env); |
| 1866 | - EAX = val; | ||
| 1867 | - EDX = val >> 32; | 2179 | + EAX = (uint32_t)(val); |
| 2180 | + EDX = (uint32_t)(val >> 32); | ||
| 2181 | +} | ||
| 2182 | + | ||
| 2183 | +#if defined(CONFIG_USER_ONLY) | ||
| 2184 | +void helper_wrmsr(void) | ||
| 2185 | +{ | ||
| 1868 | } | 2186 | } |
| 1869 | 2187 | ||
| 2188 | +void helper_rdmsr(void) | ||
| 2189 | +{ | ||
| 2190 | +} | ||
| 2191 | +#else | ||
| 1870 | void helper_wrmsr(void) | 2192 | void helper_wrmsr(void) |
| 1871 | { | 2193 | { |
| 1872 | - switch(ECX) { | 2194 | + uint64_t val; |
| 2195 | + | ||
| 2196 | + val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); | ||
| 2197 | + | ||
| 2198 | + switch((uint32_t)ECX) { | ||
| 1873 | case MSR_IA32_SYSENTER_CS: | 2199 | case MSR_IA32_SYSENTER_CS: |
| 1874 | - env->sysenter_cs = EAX & 0xffff; | 2200 | + env->sysenter_cs = val & 0xffff; |
| 1875 | break; | 2201 | break; |
| 1876 | case MSR_IA32_SYSENTER_ESP: | 2202 | case MSR_IA32_SYSENTER_ESP: |
| 1877 | - env->sysenter_esp = EAX; | 2203 | + env->sysenter_esp = val; |
| 1878 | break; | 2204 | break; |
| 1879 | case MSR_IA32_SYSENTER_EIP: | 2205 | case MSR_IA32_SYSENTER_EIP: |
| 1880 | - env->sysenter_eip = EAX; | 2206 | + env->sysenter_eip = val; |
| 2207 | + break; | ||
| 2208 | + case MSR_IA32_APICBASE: | ||
| 2209 | + cpu_set_apic_base(env, val); | ||
| 2210 | + break; | ||
| 2211 | +#ifdef TARGET_X86_64 | ||
| 2212 | + case MSR_EFER: | ||
| 2213 | +#define MSR_EFER_UPDATE_MASK (MSR_EFER_SCE | MSR_EFER_LME | \ | ||
| 2214 | + MSR_EFER_NXE | MSR_EFER_FFXSR) | ||
| 2215 | + env->efer = (env->efer & ~MSR_EFER_UPDATE_MASK) | | ||
| 2216 | + (val & MSR_EFER_UPDATE_MASK); | ||
| 1881 | break; | 2217 | break; |
| 2218 | + case MSR_STAR: | ||
| 2219 | + env->star = val; | ||
| 2220 | + break; | ||
| 2221 | + case MSR_LSTAR: | ||
| 2222 | + env->lstar = val; | ||
| 2223 | + break; | ||
| 2224 | + case MSR_CSTAR: | ||
| 2225 | + env->cstar = val; | ||
| 2226 | + break; | ||
| 2227 | + case MSR_FMASK: | ||
| 2228 | + env->fmask = val; | ||
| 2229 | + break; | ||
| 2230 | + case MSR_FSBASE: | ||
| 2231 | + env->segs[R_FS].base = val; | ||
| 2232 | + break; | ||
| 2233 | + case MSR_GSBASE: | ||
| 2234 | + env->segs[R_GS].base = val; | ||
| 2235 | + break; | ||
| 2236 | + case MSR_KERNELGSBASE: | ||
| 2237 | + env->kernelgsbase = val; | ||
| 2238 | + break; | ||
| 2239 | +#endif | ||
| 1882 | default: | 2240 | default: |
| 1883 | /* XXX: exception ? */ | 2241 | /* XXX: exception ? */ |
| 1884 | break; | 2242 | break; |
| @@ -1887,24 +2245,55 @@ void helper_wrmsr(void) | @@ -1887,24 +2245,55 @@ void helper_wrmsr(void) | ||
| 1887 | 2245 | ||
| 1888 | void helper_rdmsr(void) | 2246 | void helper_rdmsr(void) |
| 1889 | { | 2247 | { |
| 1890 | - switch(ECX) { | 2248 | + uint64_t val; |
| 2249 | + switch((uint32_t)ECX) { | ||
| 1891 | case MSR_IA32_SYSENTER_CS: | 2250 | case MSR_IA32_SYSENTER_CS: |
| 1892 | - EAX = env->sysenter_cs; | ||
| 1893 | - EDX = 0; | 2251 | + val = env->sysenter_cs; |
| 1894 | break; | 2252 | break; |
| 1895 | case MSR_IA32_SYSENTER_ESP: | 2253 | case MSR_IA32_SYSENTER_ESP: |
| 1896 | - EAX = env->sysenter_esp; | ||
| 1897 | - EDX = 0; | 2254 | + val = env->sysenter_esp; |
| 1898 | break; | 2255 | break; |
| 1899 | case MSR_IA32_SYSENTER_EIP: | 2256 | case MSR_IA32_SYSENTER_EIP: |
| 1900 | - EAX = env->sysenter_eip; | ||
| 1901 | - EDX = 0; | 2257 | + val = env->sysenter_eip; |
| 2258 | + break; | ||
| 2259 | + case MSR_IA32_APICBASE: | ||
| 2260 | + val = cpu_get_apic_base(env); | ||
| 2261 | + break; | ||
| 2262 | +#ifdef TARGET_X86_64 | ||
| 2263 | + case MSR_EFER: | ||
| 2264 | + val = env->efer; | ||
| 2265 | + break; | ||
| 2266 | + case MSR_STAR: | ||
| 2267 | + val = env->star; | ||
| 2268 | + break; | ||
| 2269 | + case MSR_LSTAR: | ||
| 2270 | + val = env->lstar; | ||
| 2271 | + break; | ||
| 2272 | + case MSR_CSTAR: | ||
| 2273 | + val = env->cstar; | ||
| 2274 | + break; | ||
| 2275 | + case MSR_FMASK: | ||
| 2276 | + val = env->fmask; | ||
| 2277 | + break; | ||
| 2278 | + case MSR_FSBASE: | ||
| 2279 | + val = env->segs[R_FS].base; | ||
| 2280 | + break; | ||
| 2281 | + case MSR_GSBASE: | ||
| 2282 | + val = env->segs[R_GS].base; | ||
| 1902 | break; | 2283 | break; |
| 2284 | + case MSR_KERNELGSBASE: | ||
| 2285 | + val = env->kernelgsbase; | ||
| 2286 | + break; | ||
| 2287 | +#endif | ||
| 1903 | default: | 2288 | default: |
| 1904 | /* XXX: exception ? */ | 2289 | /* XXX: exception ? */ |
| 2290 | + val = 0; | ||
| 1905 | break; | 2291 | break; |
| 1906 | } | 2292 | } |
| 2293 | + EAX = (uint32_t)(val); | ||
| 2294 | + EDX = (uint32_t)(val >> 32); | ||
| 1907 | } | 2295 | } |
| 2296 | +#endif | ||
| 1908 | 2297 | ||
| 1909 | void helper_lsl(void) | 2298 | void helper_lsl(void) |
| 1910 | { | 2299 | { |
| @@ -2055,14 +2444,14 @@ void helper_fldt_ST0_A0(void) | @@ -2055,14 +2444,14 @@ void helper_fldt_ST0_A0(void) | ||
| 2055 | { | 2444 | { |
| 2056 | int new_fpstt; | 2445 | int new_fpstt; |
| 2057 | new_fpstt = (env->fpstt - 1) & 7; | 2446 | new_fpstt = (env->fpstt - 1) & 7; |
| 2058 | - env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0); | 2447 | + env->fpregs[new_fpstt] = helper_fldt(A0); |
| 2059 | env->fpstt = new_fpstt; | 2448 | env->fpstt = new_fpstt; |
| 2060 | env->fptags[new_fpstt] = 0; /* validate stack entry */ | 2449 | env->fptags[new_fpstt] = 0; /* validate stack entry */ |
| 2061 | } | 2450 | } |
| 2062 | 2451 | ||
| 2063 | void helper_fstt_ST0_A0(void) | 2452 | void helper_fstt_ST0_A0(void) |
| 2064 | { | 2453 | { |
| 2065 | - helper_fstt(ST0, (uint8_t *)A0); | 2454 | + helper_fstt(ST0, A0); |
| 2066 | } | 2455 | } |
| 2067 | 2456 | ||
| 2068 | void fpu_set_exception(int mask) | 2457 | void fpu_set_exception(int mask) |
| @@ -2102,11 +2491,11 @@ void helper_fbld_ST0_A0(void) | @@ -2102,11 +2491,11 @@ void helper_fbld_ST0_A0(void) | ||
| 2102 | 2491 | ||
| 2103 | val = 0; | 2492 | val = 0; |
| 2104 | for(i = 8; i >= 0; i--) { | 2493 | for(i = 8; i >= 0; i--) { |
| 2105 | - v = ldub((uint8_t *)A0 + i); | 2494 | + v = ldub(A0 + i); |
| 2106 | val = (val * 100) + ((v >> 4) * 10) + (v & 0xf); | 2495 | val = (val * 100) + ((v >> 4) * 10) + (v & 0xf); |
| 2107 | } | 2496 | } |
| 2108 | tmp = val; | 2497 | tmp = val; |
| 2109 | - if (ldub((uint8_t *)A0 + 9) & 0x80) | 2498 | + if (ldub(A0 + 9) & 0x80) |
| 2110 | tmp = -tmp; | 2499 | tmp = -tmp; |
| 2111 | fpush(); | 2500 | fpush(); |
| 2112 | ST0 = tmp; | 2501 | ST0 = tmp; |
| @@ -2116,12 +2505,12 @@ void helper_fbst_ST0_A0(void) | @@ -2116,12 +2505,12 @@ void helper_fbst_ST0_A0(void) | ||
| 2116 | { | 2505 | { |
| 2117 | CPU86_LDouble tmp; | 2506 | CPU86_LDouble tmp; |
| 2118 | int v; | 2507 | int v; |
| 2119 | - uint8_t *mem_ref, *mem_end; | 2508 | + target_ulong mem_ref, mem_end; |
| 2120 | int64_t val; | 2509 | int64_t val; |
| 2121 | 2510 | ||
| 2122 | tmp = rint(ST0); | 2511 | tmp = rint(ST0); |
| 2123 | val = (int64_t)tmp; | 2512 | val = (int64_t)tmp; |
| 2124 | - mem_ref = (uint8_t *)A0; | 2513 | + mem_ref = A0; |
| 2125 | mem_end = mem_ref + 9; | 2514 | mem_end = mem_ref + 9; |
| 2126 | if (val < 0) { | 2515 | if (val < 0) { |
| 2127 | stb(mem_end, 0x80); | 2516 | stb(mem_end, 0x80); |
| @@ -2402,7 +2791,7 @@ void helper_fxam_ST0(void) | @@ -2402,7 +2791,7 @@ void helper_fxam_ST0(void) | ||
| 2402 | } | 2791 | } |
| 2403 | } | 2792 | } |
| 2404 | 2793 | ||
| 2405 | -void helper_fstenv(uint8_t *ptr, int data32) | 2794 | +void helper_fstenv(target_ulong ptr, int data32) |
| 2406 | { | 2795 | { |
| 2407 | int fpus, fptag, exp, i; | 2796 | int fpus, fptag, exp, i; |
| 2408 | uint64_t mant; | 2797 | uint64_t mant; |
| @@ -2452,7 +2841,7 @@ void helper_fstenv(uint8_t *ptr, int data32) | @@ -2452,7 +2841,7 @@ void helper_fstenv(uint8_t *ptr, int data32) | ||
| 2452 | } | 2841 | } |
| 2453 | } | 2842 | } |
| 2454 | 2843 | ||
| 2455 | -void helper_fldenv(uint8_t *ptr, int data32) | 2844 | +void helper_fldenv(target_ulong ptr, int data32) |
| 2456 | { | 2845 | { |
| 2457 | int i, fpus, fptag; | 2846 | int i, fpus, fptag; |
| 2458 | 2847 | ||
| @@ -2474,7 +2863,7 @@ void helper_fldenv(uint8_t *ptr, int data32) | @@ -2474,7 +2863,7 @@ void helper_fldenv(uint8_t *ptr, int data32) | ||
| 2474 | } | 2863 | } |
| 2475 | } | 2864 | } |
| 2476 | 2865 | ||
| 2477 | -void helper_fsave(uint8_t *ptr, int data32) | 2866 | +void helper_fsave(target_ulong ptr, int data32) |
| 2478 | { | 2867 | { |
| 2479 | CPU86_LDouble tmp; | 2868 | CPU86_LDouble tmp; |
| 2480 | int i; | 2869 | int i; |
| @@ -2502,7 +2891,7 @@ void helper_fsave(uint8_t *ptr, int data32) | @@ -2502,7 +2891,7 @@ void helper_fsave(uint8_t *ptr, int data32) | ||
| 2502 | env->fptags[7] = 1; | 2891 | env->fptags[7] = 1; |
| 2503 | } | 2892 | } |
| 2504 | 2893 | ||
| 2505 | -void helper_frstor(uint8_t *ptr, int data32) | 2894 | +void helper_frstor(target_ulong ptr, int data32) |
| 2506 | { | 2895 | { |
| 2507 | CPU86_LDouble tmp; | 2896 | CPU86_LDouble tmp; |
| 2508 | int i; | 2897 | int i; |
| @@ -2517,7 +2906,78 @@ void helper_frstor(uint8_t *ptr, int data32) | @@ -2517,7 +2906,78 @@ void helper_frstor(uint8_t *ptr, int data32) | ||
| 2517 | } | 2906 | } |
| 2518 | } | 2907 | } |
| 2519 | 2908 | ||
| 2520 | -/* XXX: merge with helper_fstt ? */ | 2909 | +void helper_fxsave(target_ulong ptr, int data64) |
| 2910 | +{ | ||
| 2911 | + int fpus, fptag, i, nb_xmm_regs; | ||
| 2912 | + CPU86_LDouble tmp; | ||
| 2913 | + target_ulong addr; | ||
| 2914 | + | ||
| 2915 | + fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; | ||
| 2916 | + fptag = 0; | ||
| 2917 | + for(i = 0; i < 8; i++) { | ||
| 2918 | + fptag |= ((!env->fptags[(env->fpstt + i) & 7]) << i); | ||
| 2919 | + } | ||
| 2920 | + stw(ptr, env->fpuc); | ||
| 2921 | + stw(ptr + 2, fpus); | ||
| 2922 | + stw(ptr + 4, fptag); | ||
| 2923 | + | ||
| 2924 | + addr = ptr + 0x20; | ||
| 2925 | + for(i = 0;i < 8; i++) { | ||
| 2926 | + tmp = ST(i); | ||
| 2927 | + helper_fstt(tmp, addr); | ||
| 2928 | + addr += 16; | ||
| 2929 | + } | ||
| 2930 | + | ||
| 2931 | + if (env->cr[4] & CR4_OSFXSR_MASK) { | ||
| 2932 | + /* XXX: finish it, endianness */ | ||
| 2933 | + stl(ptr + 0x18, 0); /* mxcsr */ | ||
| 2934 | + stl(ptr + 0x1c, 0); /* mxcsr_mask */ | ||
| 2935 | + nb_xmm_regs = 8 << data64; | ||
| 2936 | + addr = ptr + 0xa0; | ||
| 2937 | + for(i = 0; i < nb_xmm_regs; i++) { | ||
| 2938 | + stq(addr, env->xmm_regs[i].u.q[0]); | ||
| 2939 | + stq(addr, env->xmm_regs[i].u.q[1]); | ||
| 2940 | + addr += 16; | ||
| 2941 | + } | ||
| 2942 | + } | ||
| 2943 | +} | ||
| 2944 | + | ||
| 2945 | +void helper_fxrstor(target_ulong ptr, int data64) | ||
| 2946 | +{ | ||
| 2947 | + int i, fpus, fptag, nb_xmm_regs; | ||
| 2948 | + CPU86_LDouble tmp; | ||
| 2949 | + target_ulong addr; | ||
| 2950 | + | ||
| 2951 | + env->fpuc = lduw(ptr); | ||
| 2952 | + fpus = lduw(ptr + 2); | ||
| 2953 | + fptag = ldub(ptr + 4); | ||
| 2954 | + env->fpstt = (fpus >> 11) & 7; | ||
| 2955 | + env->fpus = fpus & ~0x3800; | ||
| 2956 | + fptag ^= 0xff; | ||
| 2957 | + for(i = 0;i < 8; i++) { | ||
| 2958 | + env->fptags[(env->fpstt + i) & 7] = ((fptag >> i) & 1); | ||
| 2959 | + } | ||
| 2960 | + | ||
| 2961 | + addr = ptr + 0x20; | ||
| 2962 | + for(i = 0;i < 8; i++) { | ||
| 2963 | + tmp = helper_fldt(addr); | ||
| 2964 | + ST(i) = tmp; | ||
| 2965 | + addr += 16; | ||
| 2966 | + } | ||
| 2967 | + | ||
| 2968 | + if (env->cr[4] & CR4_OSFXSR_MASK) { | ||
| 2969 | + /* XXX: finish it, endianness */ | ||
| 2970 | + //ldl(ptr + 0x18); | ||
| 2971 | + //ldl(ptr + 0x1c); | ||
| 2972 | + nb_xmm_regs = 8 << data64; | ||
| 2973 | + addr = ptr + 0xa0; | ||
| 2974 | + for(i = 0; i < nb_xmm_regs; i++) { | ||
| 2975 | + env->xmm_regs[i].u.q[0] = ldq(addr); | ||
| 2976 | + env->xmm_regs[i].u.q[1] = ldq(addr); | ||
| 2977 | + addr += 16; | ||
| 2978 | + } | ||
| 2979 | + } | ||
| 2980 | +} | ||
| 2521 | 2981 | ||
| 2522 | #ifndef USE_X86LDOUBLE | 2982 | #ifndef USE_X86LDOUBLE |
| 2523 | 2983 | ||
| @@ -2575,6 +3035,179 @@ CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper) | @@ -2575,6 +3035,179 @@ CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper) | ||
| 2575 | } | 3035 | } |
| 2576 | #endif | 3036 | #endif |
| 2577 | 3037 | ||
| 3038 | +#ifdef TARGET_X86_64 | ||
| 3039 | + | ||
| 3040 | +//#define DEBUG_MULDIV | ||
| 3041 | + | ||
| 3042 | +static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) | ||
| 3043 | +{ | ||
| 3044 | + *plow += a; | ||
| 3045 | + /* carry test */ | ||
| 3046 | + if (*plow < a) | ||
| 3047 | + (*phigh)++; | ||
| 3048 | + *phigh += b; | ||
| 3049 | +} | ||
| 3050 | + | ||
| 3051 | +static void neg128(uint64_t *plow, uint64_t *phigh) | ||
| 3052 | +{ | ||
| 3053 | + *plow = ~ *plow; | ||
| 3054 | + *phigh = ~ *phigh; | ||
| 3055 | + add128(plow, phigh, 1, 0); | ||
| 3056 | +} | ||
| 3057 | + | ||
| 3058 | +static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) | ||
| 3059 | +{ | ||
| 3060 | + uint32_t a0, a1, b0, b1; | ||
| 3061 | + uint64_t v; | ||
| 3062 | + | ||
| 3063 | + a0 = a; | ||
| 3064 | + a1 = a >> 32; | ||
| 3065 | + | ||
| 3066 | + b0 = b; | ||
| 3067 | + b1 = b >> 32; | ||
| 3068 | + | ||
| 3069 | + v = (uint64_t)a0 * (uint64_t)b0; | ||
| 3070 | + *plow = v; | ||
| 3071 | + *phigh = 0; | ||
| 3072 | + | ||
| 3073 | + v = (uint64_t)a0 * (uint64_t)b1; | ||
| 3074 | + add128(plow, phigh, v << 32, v >> 32); | ||
| 3075 | + | ||
| 3076 | + v = (uint64_t)a1 * (uint64_t)b0; | ||
| 3077 | + add128(plow, phigh, v << 32, v >> 32); | ||
| 3078 | + | ||
| 3079 | + v = (uint64_t)a1 * (uint64_t)b1; | ||
| 3080 | + *phigh += v; | ||
| 3081 | +#ifdef DEBUG_MULDIV | ||
| 3082 | + printf("mul: 0x%016llx * 0x%016llx = 0x%016llx%016llx\n", | ||
| 3083 | + a, b, *phigh, *plow); | ||
| 3084 | +#endif | ||
| 3085 | +} | ||
| 3086 | + | ||
| 3087 | +static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) | ||
| 3088 | +{ | ||
| 3089 | + int sa, sb; | ||
| 3090 | + sa = (a < 0); | ||
| 3091 | + if (sa) | ||
| 3092 | + a = -a; | ||
| 3093 | + sb = (b < 0); | ||
| 3094 | + if (sb) | ||
| 3095 | + b = -b; | ||
| 3096 | + mul64(plow, phigh, a, b); | ||
| 3097 | + if (sa ^ sb) { | ||
| 3098 | + neg128(plow, phigh); | ||
| 3099 | + } | ||
| 3100 | +} | ||
| 3101 | + | ||
| 3102 | +static void div64(uint64_t *plow, uint64_t *phigh, uint64_t b) | ||
| 3103 | +{ | ||
| 3104 | + uint64_t q, r, a1, a0; | ||
| 3105 | + int i, qb; | ||
| 3106 | + | ||
| 3107 | + a0 = *plow; | ||
| 3108 | + a1 = *phigh; | ||
| 3109 | + if (a1 == 0) { | ||
| 3110 | + q = a0 / b; | ||
| 3111 | + r = a0 % b; | ||
| 3112 | + *plow = q; | ||
| 3113 | + *phigh = r; | ||
| 3114 | + } else { | ||
| 3115 | + /* XXX: use a better algorithm */ | ||
| 3116 | + for(i = 0; i < 64; i++) { | ||
| 3117 | + if (a1 >= b) { | ||
| 3118 | + a1 -= b; | ||
| 3119 | + qb = 1; | ||
| 3120 | + } else { | ||
| 3121 | + qb = 0; | ||
| 3122 | + } | ||
| 3123 | + a1 = (a1 << 1) | (a0 >> 63); | ||
| 3124 | + a0 = (a0 << 1) | qb; | ||
| 3125 | + } | ||
| 3126 | +#if defined(DEBUG_MULDIV) || 1 | ||
| 3127 | + printf("div: 0x%016llx%016llx / 0x%016llx: q=0x%016llx r=0x%016llx\n", | ||
| 3128 | + *phigh, *plow, b, a0, a1); | ||
| 3129 | +#endif | ||
| 3130 | + *plow = a0; | ||
| 3131 | + *phigh = a1; | ||
| 3132 | + } | ||
| 3133 | +} | ||
| 3134 | + | ||
| 3135 | +static void idiv64(uint64_t *plow, uint64_t *phigh, uint64_t b) | ||
| 3136 | +{ | ||
| 3137 | + int sa, sb; | ||
| 3138 | + sa = ((int64_t)*phigh < 0); | ||
| 3139 | + if (sa) | ||
| 3140 | + neg128(plow, phigh); | ||
| 3141 | + sb = (b < 0); | ||
| 3142 | + if (sb) | ||
| 3143 | + b = -b; | ||
| 3144 | + div64(plow, phigh, b); | ||
| 3145 | + if (sa ^ sb) | ||
| 3146 | + *plow = - *plow; | ||
| 3147 | + if (sb) | ||
| 3148 | + *phigh = - *phigh; | ||
| 3149 | +} | ||
| 3150 | + | ||
| 3151 | +void helper_mulq_EAX_T0(void) | ||
| 3152 | +{ | ||
| 3153 | + uint64_t r0, r1; | ||
| 3154 | + | ||
| 3155 | + mul64(&r0, &r1, EAX, T0); | ||
| 3156 | + EAX = r0; | ||
| 3157 | + EDX = r1; | ||
| 3158 | + CC_DST = r0; | ||
| 3159 | + CC_SRC = r1; | ||
| 3160 | +} | ||
| 3161 | + | ||
| 3162 | +void helper_imulq_EAX_T0(void) | ||
| 3163 | +{ | ||
| 3164 | + uint64_t r0, r1; | ||
| 3165 | + | ||
| 3166 | + imul64(&r0, &r1, EAX, T0); | ||
| 3167 | + EAX = r0; | ||
| 3168 | + EDX = r1; | ||
| 3169 | + CC_DST = r0; | ||
| 3170 | + CC_SRC = (r1 != (r0 >> 63)); | ||
| 3171 | +} | ||
| 3172 | + | ||
| 3173 | +void helper_imulq_T0_T1(void) | ||
| 3174 | +{ | ||
| 3175 | + uint64_t r0, r1; | ||
| 3176 | + | ||
| 3177 | + imul64(&r0, &r1, T0, T1); | ||
| 3178 | + T0 = r0; | ||
| 3179 | + CC_DST = r0; | ||
| 3180 | + CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63)); | ||
| 3181 | +} | ||
| 3182 | + | ||
| 3183 | +void helper_divq_EAX_T0(void) | ||
| 3184 | +{ | ||
| 3185 | + uint64_t r0, r1; | ||
| 3186 | + if (T0 == 0) { | ||
| 3187 | + raise_exception(EXCP00_DIVZ); | ||
| 3188 | + } | ||
| 3189 | + r0 = EAX; | ||
| 3190 | + r1 = EDX; | ||
| 3191 | + div64(&r0, &r1, T0); | ||
| 3192 | + EAX = r0; | ||
| 3193 | + EDX = r1; | ||
| 3194 | +} | ||
| 3195 | + | ||
| 3196 | +void helper_idivq_EAX_T0(void) | ||
| 3197 | +{ | ||
| 3198 | + uint64_t r0, r1; | ||
| 3199 | + if (T0 == 0) { | ||
| 3200 | + raise_exception(EXCP00_DIVZ); | ||
| 3201 | + } | ||
| 3202 | + r0 = EAX; | ||
| 3203 | + r1 = EDX; | ||
| 3204 | + idiv64(&r0, &r1, T0); | ||
| 3205 | + EAX = r0; | ||
| 3206 | + EDX = r1; | ||
| 3207 | +} | ||
| 3208 | + | ||
| 3209 | +#endif | ||
| 3210 | + | ||
| 2578 | #if !defined(CONFIG_USER_ONLY) | 3211 | #if !defined(CONFIG_USER_ONLY) |
| 2579 | 3212 | ||
| 2580 | #define MMUSUFFIX _mmu | 3213 | #define MMUSUFFIX _mmu |
| @@ -2598,7 +3231,7 @@ CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper) | @@ -2598,7 +3231,7 @@ CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper) | ||
| 2598 | NULL, it means that the function was called in C code (i.e. not | 3231 | NULL, it means that the function was called in C code (i.e. not |
| 2599 | from generated code or from helper.c) */ | 3232 | from generated code or from helper.c) */ |
| 2600 | /* XXX: fix it to restore all registers */ | 3233 | /* XXX: fix it to restore all registers */ |
| 2601 | -void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr) | 3234 | +void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr) |
| 2602 | { | 3235 | { |
| 2603 | TranslationBlock *tb; | 3236 | TranslationBlock *tb; |
| 2604 | int ret; | 3237 | int ret; |
target-i386/helper2.c
| @@ -77,6 +77,41 @@ CPUX86State *cpu_x86_init(void) | @@ -77,6 +77,41 @@ CPUX86State *cpu_x86_init(void) | ||
| 77 | asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7)); | 77 | asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7)); |
| 78 | } | 78 | } |
| 79 | #endif | 79 | #endif |
| 80 | + { | ||
| 81 | + int family, model, stepping; | ||
| 82 | +#ifdef TARGET_X86_64 | ||
| 83 | + env->cpuid_vendor1 = 0x68747541; /* "Auth" */ | ||
| 84 | + env->cpuid_vendor2 = 0x69746e65; /* "enti" */ | ||
| 85 | + env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */ | ||
| 86 | + family = 6; | ||
| 87 | + model = 2; | ||
| 88 | + stepping = 3; | ||
| 89 | +#else | ||
| 90 | + env->cpuid_vendor1 = 0x756e6547; /* "Genu" */ | ||
| 91 | + env->cpuid_vendor2 = 0x49656e69; /* "ineI" */ | ||
| 92 | + env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */ | ||
| 93 | +#if 0 | ||
| 94 | + /* pentium 75-200 */ | ||
| 95 | + family = 5; | ||
| 96 | + model = 2; | ||
| 97 | + stepping = 11; | ||
| 98 | +#else | ||
| 99 | + /* pentium pro */ | ||
| 100 | + family = 6; | ||
| 101 | + model = 1; | ||
| 102 | + stepping = 3; | ||
| 103 | +#endif | ||
| 104 | +#endif | ||
| 105 | + env->cpuid_version = (family << 8) | (model << 4) | stepping; | ||
| 106 | + env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE | | ||
| 107 | + CPUID_TSC | CPUID_MSR | CPUID_MCE | | ||
| 108 | + CPUID_CX8 | CPUID_PGE | CPUID_CMOV); | ||
| 109 | +#ifdef TARGET_X86_64 | ||
| 110 | + /* currently not enabled for std i386 because not fully tested */ | ||
| 111 | + env->cpuid_features |= CPUID_APIC | CPUID_FXSR | CPUID_PAE | | ||
| 112 | + CPUID_SSE | CPUID_SSE2; | ||
| 113 | +#endif | ||
| 114 | + } | ||
| 80 | cpu_single_env = env; | 115 | cpu_single_env = env; |
| 81 | cpu_reset(env); | 116 | cpu_reset(env); |
| 82 | return env; | 117 | return env; |
| @@ -107,12 +142,12 @@ void cpu_reset(CPUX86State *env) | @@ -107,12 +142,12 @@ void cpu_reset(CPUX86State *env) | ||
| 107 | env->tr.limit = 0xffff; | 142 | env->tr.limit = 0xffff; |
| 108 | env->tr.flags = DESC_P_MASK; | 143 | env->tr.flags = DESC_P_MASK; |
| 109 | 144 | ||
| 110 | - cpu_x86_load_seg_cache(env, R_CS, 0xf000, (uint8_t *)0xffff0000, 0xffff, 0); | ||
| 111 | - cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0xffff, 0); | ||
| 112 | - cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0xffff, 0); | ||
| 113 | - cpu_x86_load_seg_cache(env, R_SS, 0, NULL, 0xffff, 0); | ||
| 114 | - cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0xffff, 0); | ||
| 115 | - cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0xffff, 0); | 145 | + cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0); |
| 146 | + cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0); | ||
| 147 | + cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0); | ||
| 148 | + cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0); | ||
| 149 | + cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0); | ||
| 150 | + cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0); | ||
| 116 | 151 | ||
| 117 | env->eip = 0xfff0; | 152 | env->eip = 0xfff0; |
| 118 | env->regs[R_EDX] = 0x600; /* indicate P6 processor */ | 153 | env->regs[R_EDX] = 0x600; /* indicate P6 processor */ |
| @@ -136,36 +171,56 @@ void cpu_x86_close(CPUX86State *env) | @@ -136,36 +171,56 @@ void cpu_x86_close(CPUX86State *env) | ||
| 136 | static const char *cc_op_str[] = { | 171 | static const char *cc_op_str[] = { |
| 137 | "DYNAMIC", | 172 | "DYNAMIC", |
| 138 | "EFLAGS", | 173 | "EFLAGS", |
| 174 | + | ||
| 139 | "MULB", | 175 | "MULB", |
| 140 | "MULW", | 176 | "MULW", |
| 141 | "MULL", | 177 | "MULL", |
| 178 | + "MULQ", | ||
| 179 | + | ||
| 142 | "ADDB", | 180 | "ADDB", |
| 143 | "ADDW", | 181 | "ADDW", |
| 144 | "ADDL", | 182 | "ADDL", |
| 183 | + "ADDQ", | ||
| 184 | + | ||
| 145 | "ADCB", | 185 | "ADCB", |
| 146 | "ADCW", | 186 | "ADCW", |
| 147 | "ADCL", | 187 | "ADCL", |
| 188 | + "ADCQ", | ||
| 189 | + | ||
| 148 | "SUBB", | 190 | "SUBB", |
| 149 | "SUBW", | 191 | "SUBW", |
| 150 | "SUBL", | 192 | "SUBL", |
| 193 | + "SUBQ", | ||
| 194 | + | ||
| 151 | "SBBB", | 195 | "SBBB", |
| 152 | "SBBW", | 196 | "SBBW", |
| 153 | "SBBL", | 197 | "SBBL", |
| 198 | + "SBBQ", | ||
| 199 | + | ||
| 154 | "LOGICB", | 200 | "LOGICB", |
| 155 | "LOGICW", | 201 | "LOGICW", |
| 156 | "LOGICL", | 202 | "LOGICL", |
| 203 | + "LOGICQ", | ||
| 204 | + | ||
| 157 | "INCB", | 205 | "INCB", |
| 158 | "INCW", | 206 | "INCW", |
| 159 | "INCL", | 207 | "INCL", |
| 208 | + "INCQ", | ||
| 209 | + | ||
| 160 | "DECB", | 210 | "DECB", |
| 161 | "DECW", | 211 | "DECW", |
| 162 | "DECL", | 212 | "DECL", |
| 213 | + "DECQ", | ||
| 214 | + | ||
| 163 | "SHLB", | 215 | "SHLB", |
| 164 | "SHLW", | 216 | "SHLW", |
| 165 | "SHLL", | 217 | "SHLL", |
| 218 | + "SHLQ", | ||
| 219 | + | ||
| 166 | "SARB", | 220 | "SARB", |
| 167 | "SARW", | 221 | "SARW", |
| 168 | "SARL", | 222 | "SARL", |
| 223 | + "SARQ", | ||
| 169 | }; | 224 | }; |
| 170 | 225 | ||
| 171 | void cpu_dump_state(CPUState *env, FILE *f, | 226 | void cpu_dump_state(CPUState *env, FILE *f, |
| @@ -177,55 +232,147 @@ void cpu_dump_state(CPUState *env, FILE *f, | @@ -177,55 +232,147 @@ void cpu_dump_state(CPUState *env, FILE *f, | ||
| 177 | static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" }; | 232 | static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" }; |
| 178 | 233 | ||
| 179 | eflags = env->eflags; | 234 | eflags = env->eflags; |
| 180 | - cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n" | ||
| 181 | - "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n" | ||
| 182 | - "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n", | ||
| 183 | - env->regs[R_EAX], env->regs[R_EBX], env->regs[R_ECX], env->regs[R_EDX], | ||
| 184 | - env->regs[R_ESI], env->regs[R_EDI], env->regs[R_EBP], env->regs[R_ESP], | ||
| 185 | - env->eip, eflags, | ||
| 186 | - eflags & DF_MASK ? 'D' : '-', | ||
| 187 | - eflags & CC_O ? 'O' : '-', | ||
| 188 | - eflags & CC_S ? 'S' : '-', | ||
| 189 | - eflags & CC_Z ? 'Z' : '-', | ||
| 190 | - eflags & CC_A ? 'A' : '-', | ||
| 191 | - eflags & CC_P ? 'P' : '-', | ||
| 192 | - eflags & CC_C ? 'C' : '-', | ||
| 193 | - env->hflags & HF_CPL_MASK, | ||
| 194 | - (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, | ||
| 195 | - (env->a20_mask >> 20) & 1); | ||
| 196 | - for(i = 0; i < 6; i++) { | ||
| 197 | - SegmentCache *sc = &env->segs[i]; | ||
| 198 | - cpu_fprintf(f, "%s =%04x %08x %08x %08x\n", | ||
| 199 | - seg_name[i], | ||
| 200 | - sc->selector, | ||
| 201 | - (int)sc->base, | ||
| 202 | - sc->limit, | ||
| 203 | - sc->flags); | 235 | +#ifdef TARGET_X86_64 |
| 236 | + if (env->hflags & HF_CS64_MASK) { | ||
| 237 | + cpu_fprintf(f, | ||
| 238 | + "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n" | ||
| 239 | + "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n" | ||
| 240 | + "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n" | ||
| 241 | + "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n" | ||
| 242 | + "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n", | ||
| 243 | + env->regs[R_EAX], | ||
| 244 | + env->regs[R_EBX], | ||
| 245 | + env->regs[R_ECX], | ||
| 246 | + env->regs[R_EDX], | ||
| 247 | + env->regs[R_ESI], | ||
| 248 | + env->regs[R_EDI], | ||
| 249 | + env->regs[R_EBP], | ||
| 250 | + env->regs[R_ESP], | ||
| 251 | + env->regs[8], | ||
| 252 | + env->regs[9], | ||
| 253 | + env->regs[10], | ||
| 254 | + env->regs[11], | ||
| 255 | + env->regs[12], | ||
| 256 | + env->regs[13], | ||
| 257 | + env->regs[14], | ||
| 258 | + env->regs[15], | ||
| 259 | + env->eip, eflags, | ||
| 260 | + eflags & DF_MASK ? 'D' : '-', | ||
| 261 | + eflags & CC_O ? 'O' : '-', | ||
| 262 | + eflags & CC_S ? 'S' : '-', | ||
| 263 | + eflags & CC_Z ? 'Z' : '-', | ||
| 264 | + eflags & CC_A ? 'A' : '-', | ||
| 265 | + eflags & CC_P ? 'P' : '-', | ||
| 266 | + eflags & CC_C ? 'C' : '-', | ||
| 267 | + env->hflags & HF_CPL_MASK, | ||
| 268 | + (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, | ||
| 269 | + (env->a20_mask >> 20) & 1); | ||
| 270 | + } else | ||
| 271 | +#endif | ||
| 272 | + { | ||
| 273 | + cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n" | ||
| 274 | + "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n" | ||
| 275 | + "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n", | ||
| 276 | + (uint32_t)env->regs[R_EAX], | ||
| 277 | + (uint32_t)env->regs[R_EBX], | ||
| 278 | + (uint32_t)env->regs[R_ECX], | ||
| 279 | + (uint32_t)env->regs[R_EDX], | ||
| 280 | + (uint32_t)env->regs[R_ESI], | ||
| 281 | + (uint32_t)env->regs[R_EDI], | ||
| 282 | + (uint32_t)env->regs[R_EBP], | ||
| 283 | + (uint32_t)env->regs[R_ESP], | ||
| 284 | + (uint32_t)env->eip, eflags, | ||
| 285 | + eflags & DF_MASK ? 'D' : '-', | ||
| 286 | + eflags & CC_O ? 'O' : '-', | ||
| 287 | + eflags & CC_S ? 'S' : '-', | ||
| 288 | + eflags & CC_Z ? 'Z' : '-', | ||
| 289 | + eflags & CC_A ? 'A' : '-', | ||
| 290 | + eflags & CC_P ? 'P' : '-', | ||
| 291 | + eflags & CC_C ? 'C' : '-', | ||
| 292 | + env->hflags & HF_CPL_MASK, | ||
| 293 | + (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, | ||
| 294 | + (env->a20_mask >> 20) & 1); | ||
| 295 | + } | ||
| 296 | + | ||
| 297 | +#ifdef TARGET_X86_64 | ||
| 298 | + if (env->hflags & HF_LMA_MASK) { | ||
| 299 | + for(i = 0; i < 6; i++) { | ||
| 300 | + SegmentCache *sc = &env->segs[i]; | ||
| 301 | + cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n", | ||
| 302 | + seg_name[i], | ||
| 303 | + sc->selector, | ||
| 304 | + sc->base, | ||
| 305 | + sc->limit, | ||
| 306 | + sc->flags); | ||
| 307 | + } | ||
| 308 | + cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n", | ||
| 309 | + env->ldt.selector, | ||
| 310 | + env->ldt.base, | ||
| 311 | + env->ldt.limit, | ||
| 312 | + env->ldt.flags); | ||
| 313 | + cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n", | ||
| 314 | + env->tr.selector, | ||
| 315 | + env->tr.base, | ||
| 316 | + env->tr.limit, | ||
| 317 | + env->tr.flags); | ||
| 318 | + cpu_fprintf(f, "GDT= %016llx %08x\n", | ||
| 319 | + env->gdt.base, env->gdt.limit); | ||
| 320 | + cpu_fprintf(f, "IDT= %016llx %08x\n", | ||
| 321 | + env->idt.base, env->idt.limit); | ||
| 322 | + cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n", | ||
| 323 | + (uint32_t)env->cr[0], | ||
| 324 | + env->cr[2], | ||
| 325 | + env->cr[3], | ||
| 326 | + (uint32_t)env->cr[4]); | ||
| 327 | + } else | ||
| 328 | +#endif | ||
| 329 | + { | ||
| 330 | + for(i = 0; i < 6; i++) { | ||
| 331 | + SegmentCache *sc = &env->segs[i]; | ||
| 332 | + cpu_fprintf(f, "%s =%04x %08x %08x %08x\n", | ||
| 333 | + seg_name[i], | ||
| 334 | + sc->selector, | ||
| 335 | + (uint32_t)sc->base, | ||
| 336 | + sc->limit, | ||
| 337 | + sc->flags); | ||
| 338 | + } | ||
| 339 | + cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n", | ||
| 340 | + env->ldt.selector, | ||
| 341 | + (uint32_t)env->ldt.base, | ||
| 342 | + env->ldt.limit, | ||
| 343 | + env->ldt.flags); | ||
| 344 | + cpu_fprintf(f, "TR =%04x %08x %08x %08x\n", | ||
| 345 | + env->tr.selector, | ||
| 346 | + (uint32_t)env->tr.base, | ||
| 347 | + env->tr.limit, | ||
| 348 | + env->tr.flags); | ||
| 349 | + cpu_fprintf(f, "GDT= %08x %08x\n", | ||
| 350 | + (uint32_t)env->gdt.base, env->gdt.limit); | ||
| 351 | + cpu_fprintf(f, "IDT= %08x %08x\n", | ||
| 352 | + (uint32_t)env->idt.base, env->idt.limit); | ||
| 353 | + cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n", | ||
| 354 | + (uint32_t)env->cr[0], | ||
| 355 | + (uint32_t)env->cr[2], | ||
| 356 | + (uint32_t)env->cr[3], | ||
| 357 | + (uint32_t)env->cr[4]); | ||
| 204 | } | 358 | } |
| 205 | - cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n", | ||
| 206 | - env->ldt.selector, | ||
| 207 | - (int)env->ldt.base, | ||
| 208 | - env->ldt.limit, | ||
| 209 | - env->ldt.flags); | ||
| 210 | - cpu_fprintf(f, "TR =%04x %08x %08x %08x\n", | ||
| 211 | - env->tr.selector, | ||
| 212 | - (int)env->tr.base, | ||
| 213 | - env->tr.limit, | ||
| 214 | - env->tr.flags); | ||
| 215 | - cpu_fprintf(f, "GDT= %08x %08x\n", | ||
| 216 | - (int)env->gdt.base, env->gdt.limit); | ||
| 217 | - cpu_fprintf(f, "IDT= %08x %08x\n", | ||
| 218 | - (int)env->idt.base, env->idt.limit); | ||
| 219 | - cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n", | ||
| 220 | - env->cr[0], env->cr[2], env->cr[3], env->cr[4]); | ||
| 221 | - | ||
| 222 | if (flags & X86_DUMP_CCOP) { | 359 | if (flags & X86_DUMP_CCOP) { |
| 223 | if ((unsigned)env->cc_op < CC_OP_NB) | 360 | if ((unsigned)env->cc_op < CC_OP_NB) |
| 224 | snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]); | 361 | snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]); |
| 225 | else | 362 | else |
| 226 | snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op); | 363 | snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op); |
| 227 | - cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n", | ||
| 228 | - env->cc_src, env->cc_dst, cc_op_name); | 364 | +#ifdef TARGET_X86_64 |
| 365 | + if (env->hflags & HF_CS64_MASK) { | ||
| 366 | + cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n", | ||
| 367 | + env->cc_src, env->cc_dst, | ||
| 368 | + cc_op_name); | ||
| 369 | + } else | ||
| 370 | +#endif | ||
| 371 | + { | ||
| 372 | + cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n", | ||
| 373 | + (uint32_t)env->cc_src, (uint32_t)env->cc_dst, | ||
| 374 | + cc_op_name); | ||
| 375 | + } | ||
| 229 | } | 376 | } |
| 230 | if (flags & X86_DUMP_FPU) { | 377 | if (flags & X86_DUMP_FPU) { |
| 231 | cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n", | 378 | cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n", |
| @@ -274,6 +421,24 @@ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) | @@ -274,6 +421,24 @@ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) | ||
| 274 | (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) { | 421 | (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) { |
| 275 | tlb_flush(env, 1); | 422 | tlb_flush(env, 1); |
| 276 | } | 423 | } |
| 424 | + | ||
| 425 | +#ifdef TARGET_X86_64 | ||
| 426 | + if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) && | ||
| 427 | + (env->efer & MSR_EFER_LME)) { | ||
| 428 | + /* enter in long mode */ | ||
| 429 | + /* XXX: generate an exception */ | ||
| 430 | + if (!(env->cr[4] & CR4_PAE_MASK)) | ||
| 431 | + return; | ||
| 432 | + env->efer |= MSR_EFER_LMA; | ||
| 433 | + env->hflags |= HF_LMA_MASK; | ||
| 434 | + } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) && | ||
| 435 | + (env->efer & MSR_EFER_LMA)) { | ||
| 436 | + /* exit long mode */ | ||
| 437 | + env->efer &= ~MSR_EFER_LMA; | ||
| 438 | + env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK); | ||
| 439 | + env->eip &= 0xffffffff; | ||
| 440 | + } | ||
| 441 | +#endif | ||
| 277 | env->cr[0] = new_cr0 | CR0_ET_MASK; | 442 | env->cr[0] = new_cr0 | CR0_ET_MASK; |
| 278 | 443 | ||
| 279 | /* update PE flag in hidden flags */ | 444 | /* update PE flag in hidden flags */ |
| @@ -286,12 +451,12 @@ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) | @@ -286,12 +451,12 @@ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) | ||
| 286 | ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)); | 451 | ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)); |
| 287 | } | 452 | } |
| 288 | 453 | ||
| 289 | -void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3) | 454 | +void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3) |
| 290 | { | 455 | { |
| 291 | env->cr[3] = new_cr3; | 456 | env->cr[3] = new_cr3; |
| 292 | if (env->cr[0] & CR0_PG_MASK) { | 457 | if (env->cr[0] & CR0_PG_MASK) { |
| 293 | #if defined(DEBUG_MMU) | 458 | #if defined(DEBUG_MMU) |
| 294 | - printf("CR3 update: CR3=%08x\n", new_cr3); | 459 | + printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3); |
| 295 | #endif | 460 | #endif |
| 296 | tlb_flush(env, 0); | 461 | tlb_flush(env, 0); |
| 297 | } | 462 | } |
| @@ -300,7 +465,7 @@ void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3) | @@ -300,7 +465,7 @@ void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3) | ||
| 300 | void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) | 465 | void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) |
| 301 | { | 466 | { |
| 302 | #if defined(DEBUG_MMU) | 467 | #if defined(DEBUG_MMU) |
| 303 | - printf("CR4 update: CR4=%08x\n", env->cr[4]); | 468 | + printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]); |
| 304 | #endif | 469 | #endif |
| 305 | if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) != | 470 | if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) != |
| 306 | (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) { | 471 | (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) { |
| @@ -315,22 +480,51 @@ void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr) | @@ -315,22 +480,51 @@ void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr) | ||
| 315 | tlb_flush_page(env, addr); | 480 | tlb_flush_page(env, addr); |
| 316 | } | 481 | } |
| 317 | 482 | ||
| 483 | +static inline uint8_t *get_phys_mem_ptr(target_phys_addr_t addr) | ||
| 484 | +{ | ||
| 485 | + /* XXX: incorrect */ | ||
| 486 | + return phys_ram_base + addr; | ||
| 487 | +} | ||
| 488 | + | ||
| 489 | +/* WARNING: addr must be aligned */ | ||
| 490 | +uint32_t ldl_phys_aligned(target_phys_addr_t addr) | ||
| 491 | +{ | ||
| 492 | + uint8_t *ptr; | ||
| 493 | + uint32_t val; | ||
| 494 | + ptr = get_phys_mem_ptr(addr); | ||
| 495 | + if (!ptr) | ||
| 496 | + val = 0; | ||
| 497 | + else | ||
| 498 | + val = ldl_raw(ptr); | ||
| 499 | + return val; | ||
| 500 | +} | ||
| 501 | + | ||
| 502 | +void stl_phys_aligned(target_phys_addr_t addr, uint32_t val) | ||
| 503 | +{ | ||
| 504 | + uint8_t *ptr; | ||
| 505 | + ptr = get_phys_mem_ptr(addr); | ||
| 506 | + if (!ptr) | ||
| 507 | + return; | ||
| 508 | + stl_raw(ptr, val); | ||
| 509 | +} | ||
| 510 | + | ||
| 318 | /* return value: | 511 | /* return value: |
| 319 | -1 = cannot handle fault | 512 | -1 = cannot handle fault |
| 320 | 0 = nothing more to do | 513 | 0 = nothing more to do |
| 321 | 1 = generate PF fault | 514 | 1 = generate PF fault |
| 322 | 2 = soft MMU activation required for this block | 515 | 2 = soft MMU activation required for this block |
| 323 | */ | 516 | */ |
| 324 | -int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, | 517 | +int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, |
| 325 | int is_write, int is_user, int is_softmmu) | 518 | int is_write, int is_user, int is_softmmu) |
| 326 | { | 519 | { |
| 327 | - uint8_t *pde_ptr, *pte_ptr; | ||
| 328 | - uint32_t pde, pte, virt_addr, ptep; | 520 | + uint32_t pdpe_addr, pde_addr, pte_addr; |
| 521 | + uint32_t pde, pte, ptep, pdpe; | ||
| 329 | int error_code, is_dirty, prot, page_size, ret; | 522 | int error_code, is_dirty, prot, page_size, ret; |
| 330 | - unsigned long paddr, vaddr, page_offset; | 523 | + unsigned long paddr, page_offset; |
| 524 | + target_ulong vaddr, virt_addr; | ||
| 331 | 525 | ||
| 332 | #if defined(DEBUG_MMU) | 526 | #if defined(DEBUG_MMU) |
| 333 | - printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n", | 527 | + printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", |
| 334 | addr, is_write, is_user, env->eip); | 528 | addr, is_write, is_user, env->eip); |
| 335 | #endif | 529 | #endif |
| 336 | is_write &= 1; | 530 | is_write &= 1; |
| @@ -349,90 +543,166 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, | @@ -349,90 +543,166 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, | ||
| 349 | goto do_mapping; | 543 | goto do_mapping; |
| 350 | } | 544 | } |
| 351 | 545 | ||
| 352 | - /* page directory entry */ | ||
| 353 | - pde_ptr = phys_ram_base + | ||
| 354 | - (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask); | ||
| 355 | - pde = ldl_raw(pde_ptr); | ||
| 356 | - if (!(pde & PG_PRESENT_MASK)) { | ||
| 357 | - error_code = 0; | ||
| 358 | - goto do_fault; | ||
| 359 | - } | ||
| 360 | - /* if PSE bit is set, then we use a 4MB page */ | ||
| 361 | - if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { | ||
| 362 | - if (is_user) { | ||
| 363 | - if (!(pde & PG_USER_MASK)) | ||
| 364 | - goto do_fault_protect; | ||
| 365 | - if (is_write && !(pde & PG_RW_MASK)) | ||
| 366 | - goto do_fault_protect; | ||
| 367 | - } else { | ||
| 368 | - if ((env->cr[0] & CR0_WP_MASK) && | ||
| 369 | - is_write && !(pde & PG_RW_MASK)) | ||
| 370 | - goto do_fault_protect; | 546 | + |
| 547 | + if (env->cr[4] & CR4_PAE_MASK) { | ||
| 548 | + /* XXX: we only use 32 bit physical addresses */ | ||
| 549 | +#ifdef TARGET_X86_64 | ||
| 550 | + if (env->hflags & HF_LMA_MASK) { | ||
| 551 | + uint32_t pml4e_addr, pml4e; | ||
| 552 | + int32_t sext; | ||
| 553 | + | ||
| 554 | + /* XXX: handle user + rw rights */ | ||
| 555 | + /* XXX: handle NX flag */ | ||
| 556 | + /* test virtual address sign extension */ | ||
| 557 | + sext = (int64_t)addr >> 47; | ||
| 558 | + if (sext != 0 && sext != -1) { | ||
| 559 | + error_code = 0; | ||
| 560 | + goto do_fault; | ||
| 561 | + } | ||
| 562 | + | ||
| 563 | + pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & | ||
| 564 | + env->a20_mask; | ||
| 565 | + pml4e = ldl_phys_aligned(pml4e_addr); | ||
| 566 | + if (!(pml4e & PG_PRESENT_MASK)) { | ||
| 567 | + error_code = 0; | ||
| 568 | + goto do_fault; | ||
| 569 | + } | ||
| 570 | + if (!(pml4e & PG_ACCESSED_MASK)) { | ||
| 571 | + pml4e |= PG_ACCESSED_MASK; | ||
| 572 | + stl_phys_aligned(pml4e_addr, pml4e); | ||
| 573 | + } | ||
| 574 | + | ||
| 575 | + pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & | ||
| 576 | + env->a20_mask; | ||
| 577 | + pdpe = ldl_phys_aligned(pdpe_addr); | ||
| 578 | + if (!(pdpe & PG_PRESENT_MASK)) { | ||
| 579 | + error_code = 0; | ||
| 580 | + goto do_fault; | ||
| 581 | + } | ||
| 582 | + if (!(pdpe & PG_ACCESSED_MASK)) { | ||
| 583 | + pdpe |= PG_ACCESSED_MASK; | ||
| 584 | + stl_phys_aligned(pdpe_addr, pdpe); | ||
| 585 | + } | ||
| 586 | + } else | ||
| 587 | +#endif | ||
| 588 | + { | ||
| 589 | + pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & | ||
| 590 | + env->a20_mask; | ||
| 591 | + pdpe = ldl_phys_aligned(pdpe_addr); | ||
| 592 | + if (!(pdpe & PG_PRESENT_MASK)) { | ||
| 593 | + error_code = 0; | ||
| 594 | + goto do_fault; | ||
| 595 | + } | ||
| 371 | } | 596 | } |
| 372 | - is_dirty = is_write && !(pde & PG_DIRTY_MASK); | ||
| 373 | - if (!(pde & PG_ACCESSED_MASK) || is_dirty) { | ||
| 374 | - pde |= PG_ACCESSED_MASK; | ||
| 375 | - if (is_dirty) | ||
| 376 | - pde |= PG_DIRTY_MASK; | ||
| 377 | - stl_raw(pde_ptr, pde); | 597 | + |
| 598 | + pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) & | ||
| 599 | + env->a20_mask; | ||
| 600 | + pde = ldl_phys_aligned(pde_addr); | ||
| 601 | + if (!(pde & PG_PRESENT_MASK)) { | ||
| 602 | + error_code = 0; | ||
| 603 | + goto do_fault; | ||
| 378 | } | 604 | } |
| 379 | - | ||
| 380 | - pte = pde & ~0x003ff000; /* align to 4MB */ | ||
| 381 | - ptep = pte; | ||
| 382 | - page_size = 4096 * 1024; | ||
| 383 | - virt_addr = addr & ~0x003fffff; | ||
| 384 | - } else { | ||
| 385 | - if (!(pde & PG_ACCESSED_MASK)) { | ||
| 386 | - pde |= PG_ACCESSED_MASK; | ||
| 387 | - stl_raw(pde_ptr, pde); | 605 | + if (pde & PG_PSE_MASK) { |
| 606 | + /* 2 MB page */ | ||
| 607 | + page_size = 2048 * 1024; | ||
| 608 | + goto handle_big_page; | ||
| 609 | + } else { | ||
| 610 | + /* 4 KB page */ | ||
| 611 | + if (!(pde & PG_ACCESSED_MASK)) { | ||
| 612 | + pde |= PG_ACCESSED_MASK; | ||
| 613 | + stl_phys_aligned(pde_addr, pde); | ||
| 614 | + } | ||
| 615 | + pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) & | ||
| 616 | + env->a20_mask; | ||
| 617 | + goto handle_4k_page; | ||
| 388 | } | 618 | } |
| 389 | - | 619 | + } else { |
| 390 | /* page directory entry */ | 620 | /* page directory entry */ |
| 391 | - pte_ptr = phys_ram_base + | ||
| 392 | - (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask); | ||
| 393 | - pte = ldl_raw(pte_ptr); | ||
| 394 | - if (!(pte & PG_PRESENT_MASK)) { | 621 | + pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & |
| 622 | + env->a20_mask; | ||
| 623 | + pde = ldl_phys_aligned(pde_addr); | ||
| 624 | + if (!(pde & PG_PRESENT_MASK)) { | ||
| 395 | error_code = 0; | 625 | error_code = 0; |
| 396 | goto do_fault; | 626 | goto do_fault; |
| 397 | } | 627 | } |
| 398 | - /* combine pde and pte user and rw protections */ | ||
| 399 | - ptep = pte & pde; | ||
| 400 | - if (is_user) { | ||
| 401 | - if (!(ptep & PG_USER_MASK)) | ||
| 402 | - goto do_fault_protect; | ||
| 403 | - if (is_write && !(ptep & PG_RW_MASK)) | ||
| 404 | - goto do_fault_protect; | 628 | + /* if PSE bit is set, then we use a 4MB page */ |
| 629 | + if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { | ||
| 630 | + page_size = 4096 * 1024; | ||
| 631 | + handle_big_page: | ||
| 632 | + if (is_user) { | ||
| 633 | + if (!(pde & PG_USER_MASK)) | ||
| 634 | + goto do_fault_protect; | ||
| 635 | + if (is_write && !(pde & PG_RW_MASK)) | ||
| 636 | + goto do_fault_protect; | ||
| 637 | + } else { | ||
| 638 | + if ((env->cr[0] & CR0_WP_MASK) && | ||
| 639 | + is_write && !(pde & PG_RW_MASK)) | ||
| 640 | + goto do_fault_protect; | ||
| 641 | + } | ||
| 642 | + is_dirty = is_write && !(pde & PG_DIRTY_MASK); | ||
| 643 | + if (!(pde & PG_ACCESSED_MASK) || is_dirty) { | ||
| 644 | + pde |= PG_ACCESSED_MASK; | ||
| 645 | + if (is_dirty) | ||
| 646 | + pde |= PG_DIRTY_MASK; | ||
| 647 | + stl_phys_aligned(pde_addr, pde); | ||
| 648 | + } | ||
| 649 | + | ||
| 650 | + pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */ | ||
| 651 | + ptep = pte; | ||
| 652 | + virt_addr = addr & ~(page_size - 1); | ||
| 405 | } else { | 653 | } else { |
| 406 | - if ((env->cr[0] & CR0_WP_MASK) && | ||
| 407 | - is_write && !(ptep & PG_RW_MASK)) | ||
| 408 | - goto do_fault_protect; | ||
| 409 | - } | ||
| 410 | - is_dirty = is_write && !(pte & PG_DIRTY_MASK); | ||
| 411 | - if (!(pte & PG_ACCESSED_MASK) || is_dirty) { | ||
| 412 | - pte |= PG_ACCESSED_MASK; | ||
| 413 | - if (is_dirty) | ||
| 414 | - pte |= PG_DIRTY_MASK; | ||
| 415 | - stl_raw(pte_ptr, pte); | 654 | + if (!(pde & PG_ACCESSED_MASK)) { |
| 655 | + pde |= PG_ACCESSED_MASK; | ||
| 656 | + stl_phys_aligned(pde_addr, pde); | ||
| 657 | + } | ||
| 658 | + | ||
| 659 | + /* page directory entry */ | ||
| 660 | + pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & | ||
| 661 | + env->a20_mask; | ||
| 662 | + handle_4k_page: | ||
| 663 | + pte = ldl_phys_aligned(pte_addr); | ||
| 664 | + if (!(pte & PG_PRESENT_MASK)) { | ||
| 665 | + error_code = 0; | ||
| 666 | + goto do_fault; | ||
| 667 | + } | ||
| 668 | + /* combine pde and pte user and rw protections */ | ||
| 669 | + ptep = pte & pde; | ||
| 670 | + if (is_user) { | ||
| 671 | + if (!(ptep & PG_USER_MASK)) | ||
| 672 | + goto do_fault_protect; | ||
| 673 | + if (is_write && !(ptep & PG_RW_MASK)) | ||
| 674 | + goto do_fault_protect; | ||
| 675 | + } else { | ||
| 676 | + if ((env->cr[0] & CR0_WP_MASK) && | ||
| 677 | + is_write && !(ptep & PG_RW_MASK)) | ||
| 678 | + goto do_fault_protect; | ||
| 679 | + } | ||
| 680 | + is_dirty = is_write && !(pte & PG_DIRTY_MASK); | ||
| 681 | + if (!(pte & PG_ACCESSED_MASK) || is_dirty) { | ||
| 682 | + pte |= PG_ACCESSED_MASK; | ||
| 683 | + if (is_dirty) | ||
| 684 | + pte |= PG_DIRTY_MASK; | ||
| 685 | + stl_phys_aligned(pte_addr, pte); | ||
| 686 | + } | ||
| 687 | + page_size = 4096; | ||
| 688 | + virt_addr = addr & ~0xfff; | ||
| 416 | } | 689 | } |
| 417 | - page_size = 4096; | ||
| 418 | - virt_addr = addr & ~0xfff; | ||
| 419 | - } | ||
| 420 | 690 | ||
| 421 | - /* the page can be put in the TLB */ | ||
| 422 | - prot = PAGE_READ; | ||
| 423 | - if (pte & PG_DIRTY_MASK) { | ||
| 424 | - /* only set write access if already dirty... otherwise wait | ||
| 425 | - for dirty access */ | ||
| 426 | - if (is_user) { | ||
| 427 | - if (ptep & PG_RW_MASK) | ||
| 428 | - prot |= PAGE_WRITE; | ||
| 429 | - } else { | ||
| 430 | - if (!(env->cr[0] & CR0_WP_MASK) || | ||
| 431 | - (ptep & PG_RW_MASK)) | ||
| 432 | - prot |= PAGE_WRITE; | 691 | + /* the page can be put in the TLB */ |
| 692 | + prot = PAGE_READ; | ||
| 693 | + if (pte & PG_DIRTY_MASK) { | ||
| 694 | + /* only set write access if already dirty... otherwise wait | ||
| 695 | + for dirty access */ | ||
| 696 | + if (is_user) { | ||
| 697 | + if (ptep & PG_RW_MASK) | ||
| 698 | + prot |= PAGE_WRITE; | ||
| 699 | + } else { | ||
| 700 | + if (!(env->cr[0] & CR0_WP_MASK) || | ||
| 701 | + (ptep & PG_RW_MASK)) | ||
| 702 | + prot |= PAGE_WRITE; | ||
| 703 | + } | ||
| 433 | } | 704 | } |
| 434 | } | 705 | } |
| 435 | - | ||
| 436 | do_mapping: | 706 | do_mapping: |
| 437 | pte = pte & env->a20_mask; | 707 | pte = pte & env->a20_mask; |
| 438 | 708 |
target-i386/op.c
| @@ -22,7 +22,7 @@ | @@ -22,7 +22,7 @@ | ||
| 22 | #include "exec.h" | 22 | #include "exec.h" |
| 23 | 23 | ||
| 24 | /* n must be a constant to be efficient */ | 24 | /* n must be a constant to be efficient */ |
| 25 | -static inline int lshift(int x, int n) | 25 | +static inline target_long lshift(target_long x, int n) |
| 26 | { | 26 | { |
| 27 | if (n >= 0) | 27 | if (n >= 0) |
| 28 | return x << n; | 28 | return x << n; |
| @@ -80,6 +80,58 @@ static inline int lshift(int x, int n) | @@ -80,6 +80,58 @@ static inline int lshift(int x, int n) | ||
| 80 | #undef REG | 80 | #undef REG |
| 81 | #undef REGNAME | 81 | #undef REGNAME |
| 82 | 82 | ||
| 83 | +#ifdef TARGET_X86_64 | ||
| 84 | + | ||
| 85 | +#define REG (env->regs[8]) | ||
| 86 | +#define REGNAME _R8 | ||
| 87 | +#include "opreg_template.h" | ||
| 88 | +#undef REG | ||
| 89 | +#undef REGNAME | ||
| 90 | + | ||
| 91 | +#define REG (env->regs[9]) | ||
| 92 | +#define REGNAME _R9 | ||
| 93 | +#include "opreg_template.h" | ||
| 94 | +#undef REG | ||
| 95 | +#undef REGNAME | ||
| 96 | + | ||
| 97 | +#define REG (env->regs[10]) | ||
| 98 | +#define REGNAME _R10 | ||
| 99 | +#include "opreg_template.h" | ||
| 100 | +#undef REG | ||
| 101 | +#undef REGNAME | ||
| 102 | + | ||
| 103 | +#define REG (env->regs[11]) | ||
| 104 | +#define REGNAME _R11 | ||
| 105 | +#include "opreg_template.h" | ||
| 106 | +#undef REG | ||
| 107 | +#undef REGNAME | ||
| 108 | + | ||
| 109 | +#define REG (env->regs[12]) | ||
| 110 | +#define REGNAME _R12 | ||
| 111 | +#include "opreg_template.h" | ||
| 112 | +#undef REG | ||
| 113 | +#undef REGNAME | ||
| 114 | + | ||
| 115 | +#define REG (env->regs[13]) | ||
| 116 | +#define REGNAME _R13 | ||
| 117 | +#include "opreg_template.h" | ||
| 118 | +#undef REG | ||
| 119 | +#undef REGNAME | ||
| 120 | + | ||
| 121 | +#define REG (env->regs[14]) | ||
| 122 | +#define REGNAME _R14 | ||
| 123 | +#include "opreg_template.h" | ||
| 124 | +#undef REG | ||
| 125 | +#undef REGNAME | ||
| 126 | + | ||
| 127 | +#define REG (env->regs[15]) | ||
| 128 | +#define REGNAME _R15 | ||
| 129 | +#include "opreg_template.h" | ||
| 130 | +#undef REG | ||
| 131 | +#undef REGNAME | ||
| 132 | + | ||
| 133 | +#endif | ||
| 134 | + | ||
| 83 | /* operations with flags */ | 135 | /* operations with flags */ |
| 84 | 136 | ||
| 85 | /* update flags with T0 and T1 (add/sub case) */ | 137 | /* update flags with T0 and T1 (add/sub case) */ |
| @@ -170,6 +222,13 @@ void OPPROTO op_bswapl_T0(void) | @@ -170,6 +222,13 @@ void OPPROTO op_bswapl_T0(void) | ||
| 170 | T0 = bswap32(T0); | 222 | T0 = bswap32(T0); |
| 171 | } | 223 | } |
| 172 | 224 | ||
| 225 | +#ifdef TARGET_X86_64 | ||
| 226 | +void OPPROTO op_bswapq_T0(void) | ||
| 227 | +{ | ||
| 228 | + T0 = bswap64(T0); | ||
| 229 | +} | ||
| 230 | +#endif | ||
| 231 | + | ||
| 173 | /* multiply/divide */ | 232 | /* multiply/divide */ |
| 174 | 233 | ||
| 175 | /* XXX: add eflags optimizations */ | 234 | /* XXX: add eflags optimizations */ |
| @@ -179,7 +238,7 @@ void OPPROTO op_mulb_AL_T0(void) | @@ -179,7 +238,7 @@ void OPPROTO op_mulb_AL_T0(void) | ||
| 179 | { | 238 | { |
| 180 | unsigned int res; | 239 | unsigned int res; |
| 181 | res = (uint8_t)EAX * (uint8_t)T0; | 240 | res = (uint8_t)EAX * (uint8_t)T0; |
| 182 | - EAX = (EAX & 0xffff0000) | res; | 241 | + EAX = (EAX & ~0xffff) | res; |
| 183 | CC_DST = res; | 242 | CC_DST = res; |
| 184 | CC_SRC = (res & 0xff00); | 243 | CC_SRC = (res & 0xff00); |
| 185 | } | 244 | } |
| @@ -188,7 +247,7 @@ void OPPROTO op_imulb_AL_T0(void) | @@ -188,7 +247,7 @@ void OPPROTO op_imulb_AL_T0(void) | ||
| 188 | { | 247 | { |
| 189 | int res; | 248 | int res; |
| 190 | res = (int8_t)EAX * (int8_t)T0; | 249 | res = (int8_t)EAX * (int8_t)T0; |
| 191 | - EAX = (EAX & 0xffff0000) | (res & 0xffff); | 250 | + EAX = (EAX & ~0xffff) | (res & 0xffff); |
| 192 | CC_DST = res; | 251 | CC_DST = res; |
| 193 | CC_SRC = (res != (int8_t)res); | 252 | CC_SRC = (res != (int8_t)res); |
| 194 | } | 253 | } |
| @@ -197,8 +256,8 @@ void OPPROTO op_mulw_AX_T0(void) | @@ -197,8 +256,8 @@ void OPPROTO op_mulw_AX_T0(void) | ||
| 197 | { | 256 | { |
| 198 | unsigned int res; | 257 | unsigned int res; |
| 199 | res = (uint16_t)EAX * (uint16_t)T0; | 258 | res = (uint16_t)EAX * (uint16_t)T0; |
| 200 | - EAX = (EAX & 0xffff0000) | (res & 0xffff); | ||
| 201 | - EDX = (EDX & 0xffff0000) | ((res >> 16) & 0xffff); | 259 | + EAX = (EAX & ~0xffff) | (res & 0xffff); |
| 260 | + EDX = (EDX & ~0xffff) | ((res >> 16) & 0xffff); | ||
| 202 | CC_DST = res; | 261 | CC_DST = res; |
| 203 | CC_SRC = res >> 16; | 262 | CC_SRC = res >> 16; |
| 204 | } | 263 | } |
| @@ -207,8 +266,8 @@ void OPPROTO op_imulw_AX_T0(void) | @@ -207,8 +266,8 @@ void OPPROTO op_imulw_AX_T0(void) | ||
| 207 | { | 266 | { |
| 208 | int res; | 267 | int res; |
| 209 | res = (int16_t)EAX * (int16_t)T0; | 268 | res = (int16_t)EAX * (int16_t)T0; |
| 210 | - EAX = (EAX & 0xffff0000) | (res & 0xffff); | ||
| 211 | - EDX = (EDX & 0xffff0000) | ((res >> 16) & 0xffff); | 269 | + EAX = (EAX & ~0xffff) | (res & 0xffff); |
| 270 | + EDX = (EDX & ~0xffff) | ((res >> 16) & 0xffff); | ||
| 212 | CC_DST = res; | 271 | CC_DST = res; |
| 213 | CC_SRC = (res != (int16_t)res); | 272 | CC_SRC = (res != (int16_t)res); |
| 214 | } | 273 | } |
| @@ -217,10 +276,10 @@ void OPPROTO op_mull_EAX_T0(void) | @@ -217,10 +276,10 @@ void OPPROTO op_mull_EAX_T0(void) | ||
| 217 | { | 276 | { |
| 218 | uint64_t res; | 277 | uint64_t res; |
| 219 | res = (uint64_t)((uint32_t)EAX) * (uint64_t)((uint32_t)T0); | 278 | res = (uint64_t)((uint32_t)EAX) * (uint64_t)((uint32_t)T0); |
| 220 | - EAX = res; | ||
| 221 | - EDX = res >> 32; | ||
| 222 | - CC_DST = res; | ||
| 223 | - CC_SRC = res >> 32; | 279 | + EAX = (uint32_t)res; |
| 280 | + EDX = (uint32_t)(res >> 32); | ||
| 281 | + CC_DST = (uint32_t)res; | ||
| 282 | + CC_SRC = (uint32_t)(res >> 32); | ||
| 224 | } | 283 | } |
| 225 | 284 | ||
| 226 | void OPPROTO op_imull_EAX_T0(void) | 285 | void OPPROTO op_imull_EAX_T0(void) |
| @@ -251,6 +310,23 @@ void OPPROTO op_imull_T0_T1(void) | @@ -251,6 +310,23 @@ void OPPROTO op_imull_T0_T1(void) | ||
| 251 | CC_SRC = (res != (int32_t)res); | 310 | CC_SRC = (res != (int32_t)res); |
| 252 | } | 311 | } |
| 253 | 312 | ||
| 313 | +#ifdef TARGET_X86_64 | ||
| 314 | +void OPPROTO op_mulq_EAX_T0(void) | ||
| 315 | +{ | ||
| 316 | + helper_mulq_EAX_T0(); | ||
| 317 | +} | ||
| 318 | + | ||
| 319 | +void OPPROTO op_imulq_EAX_T0(void) | ||
| 320 | +{ | ||
| 321 | + helper_imulq_EAX_T0(); | ||
| 322 | +} | ||
| 323 | + | ||
| 324 | +void OPPROTO op_imulq_T0_T1(void) | ||
| 325 | +{ | ||
| 326 | + helper_imulq_T0_T1(); | ||
| 327 | +} | ||
| 328 | +#endif | ||
| 329 | + | ||
| 254 | /* division, flags are undefined */ | 330 | /* division, flags are undefined */ |
| 255 | /* XXX: add exceptions for overflow */ | 331 | /* XXX: add exceptions for overflow */ |
| 256 | 332 | ||
| @@ -261,12 +337,11 @@ void OPPROTO op_divb_AL_T0(void) | @@ -261,12 +337,11 @@ void OPPROTO op_divb_AL_T0(void) | ||
| 261 | num = (EAX & 0xffff); | 337 | num = (EAX & 0xffff); |
| 262 | den = (T0 & 0xff); | 338 | den = (T0 & 0xff); |
| 263 | if (den == 0) { | 339 | if (den == 0) { |
| 264 | - EIP = PARAM1; | ||
| 265 | raise_exception(EXCP00_DIVZ); | 340 | raise_exception(EXCP00_DIVZ); |
| 266 | } | 341 | } |
| 267 | q = (num / den) & 0xff; | 342 | q = (num / den) & 0xff; |
| 268 | r = (num % den) & 0xff; | 343 | r = (num % den) & 0xff; |
| 269 | - EAX = (EAX & 0xffff0000) | (r << 8) | q; | 344 | + EAX = (EAX & ~0xffff) | (r << 8) | q; |
| 270 | } | 345 | } |
| 271 | 346 | ||
| 272 | void OPPROTO op_idivb_AL_T0(void) | 347 | void OPPROTO op_idivb_AL_T0(void) |
| @@ -276,12 +351,11 @@ void OPPROTO op_idivb_AL_T0(void) | @@ -276,12 +351,11 @@ void OPPROTO op_idivb_AL_T0(void) | ||
| 276 | num = (int16_t)EAX; | 351 | num = (int16_t)EAX; |
| 277 | den = (int8_t)T0; | 352 | den = (int8_t)T0; |
| 278 | if (den == 0) { | 353 | if (den == 0) { |
| 279 | - EIP = PARAM1; | ||
| 280 | raise_exception(EXCP00_DIVZ); | 354 | raise_exception(EXCP00_DIVZ); |
| 281 | } | 355 | } |
| 282 | q = (num / den) & 0xff; | 356 | q = (num / den) & 0xff; |
| 283 | r = (num % den) & 0xff; | 357 | r = (num % den) & 0xff; |
| 284 | - EAX = (EAX & 0xffff0000) | (r << 8) | q; | 358 | + EAX = (EAX & ~0xffff) | (r << 8) | q; |
| 285 | } | 359 | } |
| 286 | 360 | ||
| 287 | void OPPROTO op_divw_AX_T0(void) | 361 | void OPPROTO op_divw_AX_T0(void) |
| @@ -291,13 +365,12 @@ void OPPROTO op_divw_AX_T0(void) | @@ -291,13 +365,12 @@ void OPPROTO op_divw_AX_T0(void) | ||
| 291 | num = (EAX & 0xffff) | ((EDX & 0xffff) << 16); | 365 | num = (EAX & 0xffff) | ((EDX & 0xffff) << 16); |
| 292 | den = (T0 & 0xffff); | 366 | den = (T0 & 0xffff); |
| 293 | if (den == 0) { | 367 | if (den == 0) { |
| 294 | - EIP = PARAM1; | ||
| 295 | raise_exception(EXCP00_DIVZ); | 368 | raise_exception(EXCP00_DIVZ); |
| 296 | } | 369 | } |
| 297 | q = (num / den) & 0xffff; | 370 | q = (num / den) & 0xffff; |
| 298 | r = (num % den) & 0xffff; | 371 | r = (num % den) & 0xffff; |
| 299 | - EAX = (EAX & 0xffff0000) | q; | ||
| 300 | - EDX = (EDX & 0xffff0000) | r; | 372 | + EAX = (EAX & ~0xffff) | q; |
| 373 | + EDX = (EDX & ~0xffff) | r; | ||
| 301 | } | 374 | } |
| 302 | 375 | ||
| 303 | void OPPROTO op_idivw_AX_T0(void) | 376 | void OPPROTO op_idivw_AX_T0(void) |
| @@ -307,30 +380,47 @@ void OPPROTO op_idivw_AX_T0(void) | @@ -307,30 +380,47 @@ void OPPROTO op_idivw_AX_T0(void) | ||
| 307 | num = (EAX & 0xffff) | ((EDX & 0xffff) << 16); | 380 | num = (EAX & 0xffff) | ((EDX & 0xffff) << 16); |
| 308 | den = (int16_t)T0; | 381 | den = (int16_t)T0; |
| 309 | if (den == 0) { | 382 | if (den == 0) { |
| 310 | - EIP = PARAM1; | ||
| 311 | raise_exception(EXCP00_DIVZ); | 383 | raise_exception(EXCP00_DIVZ); |
| 312 | } | 384 | } |
| 313 | q = (num / den) & 0xffff; | 385 | q = (num / den) & 0xffff; |
| 314 | r = (num % den) & 0xffff; | 386 | r = (num % den) & 0xffff; |
| 315 | - EAX = (EAX & 0xffff0000) | q; | ||
| 316 | - EDX = (EDX & 0xffff0000) | r; | 387 | + EAX = (EAX & ~0xffff) | q; |
| 388 | + EDX = (EDX & ~0xffff) | r; | ||
| 317 | } | 389 | } |
| 318 | 390 | ||
| 319 | void OPPROTO op_divl_EAX_T0(void) | 391 | void OPPROTO op_divl_EAX_T0(void) |
| 320 | { | 392 | { |
| 321 | - helper_divl_EAX_T0(PARAM1); | 393 | + helper_divl_EAX_T0(); |
| 322 | } | 394 | } |
| 323 | 395 | ||
| 324 | void OPPROTO op_idivl_EAX_T0(void) | 396 | void OPPROTO op_idivl_EAX_T0(void) |
| 325 | { | 397 | { |
| 326 | - helper_idivl_EAX_T0(PARAM1); | 398 | + helper_idivl_EAX_T0(); |
| 327 | } | 399 | } |
| 328 | 400 | ||
| 401 | +#ifdef TARGET_X86_64 | ||
| 402 | +void OPPROTO op_divq_EAX_T0(void) | ||
| 403 | +{ | ||
| 404 | + helper_divq_EAX_T0(); | ||
| 405 | +} | ||
| 406 | + | ||
| 407 | +void OPPROTO op_idivq_EAX_T0(void) | ||
| 408 | +{ | ||
| 409 | + helper_idivq_EAX_T0(); | ||
| 410 | +} | ||
| 411 | +#endif | ||
| 412 | + | ||
| 329 | /* constant load & misc op */ | 413 | /* constant load & misc op */ |
| 330 | 414 | ||
| 415 | +/* XXX: consistent names */ | ||
| 416 | +void OPPROTO op_movl_T0_imu(void) | ||
| 417 | +{ | ||
| 418 | + T0 = (uint32_t)PARAM1; | ||
| 419 | +} | ||
| 420 | + | ||
| 331 | void OPPROTO op_movl_T0_im(void) | 421 | void OPPROTO op_movl_T0_im(void) |
| 332 | { | 422 | { |
| 333 | - T0 = PARAM1; | 423 | + T0 = (int32_t)PARAM1; |
| 334 | } | 424 | } |
| 335 | 425 | ||
| 336 | void OPPROTO op_addl_T0_im(void) | 426 | void OPPROTO op_addl_T0_im(void) |
| @@ -353,9 +443,14 @@ void OPPROTO op_movl_T0_T1(void) | @@ -353,9 +443,14 @@ void OPPROTO op_movl_T0_T1(void) | ||
| 353 | T0 = T1; | 443 | T0 = T1; |
| 354 | } | 444 | } |
| 355 | 445 | ||
| 446 | +void OPPROTO op_movl_T1_imu(void) | ||
| 447 | +{ | ||
| 448 | + T1 = (uint32_t)PARAM1; | ||
| 449 | +} | ||
| 450 | + | ||
| 356 | void OPPROTO op_movl_T1_im(void) | 451 | void OPPROTO op_movl_T1_im(void) |
| 357 | { | 452 | { |
| 358 | - T1 = PARAM1; | 453 | + T1 = (int32_t)PARAM1; |
| 359 | } | 454 | } |
| 360 | 455 | ||
| 361 | void OPPROTO op_addl_T1_im(void) | 456 | void OPPROTO op_addl_T1_im(void) |
| @@ -370,19 +465,95 @@ void OPPROTO op_movl_T1_A0(void) | @@ -370,19 +465,95 @@ void OPPROTO op_movl_T1_A0(void) | ||
| 370 | 465 | ||
| 371 | void OPPROTO op_movl_A0_im(void) | 466 | void OPPROTO op_movl_A0_im(void) |
| 372 | { | 467 | { |
| 373 | - A0 = PARAM1; | 468 | + A0 = (uint32_t)PARAM1; |
| 374 | } | 469 | } |
| 375 | 470 | ||
| 376 | void OPPROTO op_addl_A0_im(void) | 471 | void OPPROTO op_addl_A0_im(void) |
| 377 | { | 472 | { |
| 378 | - A0 += PARAM1; | 473 | + A0 = (uint32_t)(A0 + PARAM1); |
| 474 | +} | ||
| 475 | + | ||
| 476 | +void OPPROTO op_movl_A0_seg(void) | ||
| 477 | +{ | ||
| 478 | + A0 = (uint32_t)*(target_ulong *)((char *)env + PARAM1); | ||
| 479 | +} | ||
| 480 | + | ||
| 481 | +void OPPROTO op_addl_A0_seg(void) | ||
| 482 | +{ | ||
| 483 | + A0 = (uint32_t)(A0 + *(target_ulong *)((char *)env + PARAM1)); | ||
| 379 | } | 484 | } |
| 380 | 485 | ||
| 381 | void OPPROTO op_addl_A0_AL(void) | 486 | void OPPROTO op_addl_A0_AL(void) |
| 382 | { | 487 | { |
| 383 | - A0 += (EAX & 0xff); | 488 | + A0 = (uint32_t)(A0 + (EAX & 0xff)); |
| 489 | +} | ||
| 490 | + | ||
| 491 | +#ifdef WORDS_BIGENDIAN | ||
| 492 | +typedef union UREG64 { | ||
| 493 | + struct { uint16_t v3, v2, v1, v0; } w; | ||
| 494 | + struct { uint32_t v1, v0; } l; | ||
| 495 | + uint64_t q; | ||
| 496 | +} UREG64; | ||
| 497 | +#else | ||
| 498 | +typedef union UREG64 { | ||
| 499 | + struct { uint16_t v0, v1, v2, v3; } w; | ||
| 500 | + struct { uint32_t v0, v1; } l; | ||
| 501 | + uint64_t q; | ||
| 502 | +} UREG64; | ||
| 503 | +#endif | ||
| 504 | + | ||
| 505 | +#ifdef TARGET_X86_64 | ||
| 506 | + | ||
| 507 | +#define PARAMQ1 \ | ||
| 508 | +({\ | ||
| 509 | + UREG64 __p;\ | ||
| 510 | + __p.l.v1 = PARAM1;\ | ||
| 511 | + __p.l.v0 = PARAM2;\ | ||
| 512 | + __p.q;\ | ||
| 513 | +}) | ||
| 514 | + | ||
| 515 | +void OPPROTO op_movq_T0_im64(void) | ||
| 516 | +{ | ||
| 517 | + T0 = PARAMQ1; | ||
| 384 | } | 518 | } |
| 385 | 519 | ||
| 520 | +void OPPROTO op_movq_A0_im(void) | ||
| 521 | +{ | ||
| 522 | + A0 = (int32_t)PARAM1; | ||
| 523 | +} | ||
| 524 | + | ||
| 525 | +void OPPROTO op_movq_A0_im64(void) | ||
| 526 | +{ | ||
| 527 | + A0 = PARAMQ1; | ||
| 528 | +} | ||
| 529 | + | ||
| 530 | +void OPPROTO op_addq_A0_im(void) | ||
| 531 | +{ | ||
| 532 | + A0 = (A0 + (int32_t)PARAM1); | ||
| 533 | +} | ||
| 534 | + | ||
| 535 | +void OPPROTO op_addq_A0_im64(void) | ||
| 536 | +{ | ||
| 537 | + A0 = (A0 + PARAMQ1); | ||
| 538 | +} | ||
| 539 | + | ||
| 540 | +void OPPROTO op_movq_A0_seg(void) | ||
| 541 | +{ | ||
| 542 | + A0 = *(target_ulong *)((char *)env + PARAM1); | ||
| 543 | +} | ||
| 544 | + | ||
| 545 | +void OPPROTO op_addq_A0_seg(void) | ||
| 546 | +{ | ||
| 547 | + A0 += *(target_ulong *)((char *)env + PARAM1); | ||
| 548 | +} | ||
| 549 | + | ||
| 550 | +void OPPROTO op_addq_A0_AL(void) | ||
| 551 | +{ | ||
| 552 | + A0 = (A0 + (EAX & 0xff)); | ||
| 553 | +} | ||
| 554 | + | ||
| 555 | +#endif | ||
| 556 | + | ||
| 386 | void OPPROTO op_andl_A0_ffff(void) | 557 | void OPPROTO op_andl_A0_ffff(void) |
| 387 | { | 558 | { |
| 388 | A0 = A0 & 0xffff; | 559 | A0 = A0 & 0xffff; |
| @@ -401,29 +572,29 @@ void OPPROTO op_andl_A0_ffff(void) | @@ -401,29 +572,29 @@ void OPPROTO op_andl_A0_ffff(void) | ||
| 401 | #include "ops_mem.h" | 572 | #include "ops_mem.h" |
| 402 | #endif | 573 | #endif |
| 403 | 574 | ||
| 404 | -/* used for bit operations */ | 575 | +/* indirect jump */ |
| 405 | 576 | ||
| 406 | -void OPPROTO op_add_bitw_A0_T1(void) | 577 | +void OPPROTO op_jmp_T0(void) |
| 407 | { | 578 | { |
| 408 | - A0 += ((int16_t)T1 >> 4) << 1; | 579 | + EIP = T0; |
| 409 | } | 580 | } |
| 410 | 581 | ||
| 411 | -void OPPROTO op_add_bitl_A0_T1(void) | 582 | +void OPPROTO op_movl_eip_im(void) |
| 412 | { | 583 | { |
| 413 | - A0 += ((int32_t)T1 >> 5) << 2; | 584 | + EIP = (uint32_t)PARAM1; |
| 414 | } | 585 | } |
| 415 | 586 | ||
| 416 | -/* indirect jump */ | ||
| 417 | - | ||
| 418 | -void OPPROTO op_jmp_T0(void) | 587 | +#ifdef TARGET_X86_64 |
| 588 | +void OPPROTO op_movq_eip_im(void) | ||
| 419 | { | 589 | { |
| 420 | - EIP = T0; | 590 | + EIP = (int32_t)PARAM1; |
| 421 | } | 591 | } |
| 422 | 592 | ||
| 423 | -void OPPROTO op_jmp_im(void) | 593 | +void OPPROTO op_movq_eip_im64(void) |
| 424 | { | 594 | { |
| 425 | - EIP = PARAM1; | 595 | + EIP = PARAMQ1; |
| 426 | } | 596 | } |
| 597 | +#endif | ||
| 427 | 598 | ||
| 428 | void OPPROTO op_hlt(void) | 599 | void OPPROTO op_hlt(void) |
| 429 | { | 600 | { |
| @@ -505,11 +676,10 @@ void OPPROTO op_sti_vm(void) | @@ -505,11 +676,10 @@ void OPPROTO op_sti_vm(void) | ||
| 505 | void OPPROTO op_boundw(void) | 676 | void OPPROTO op_boundw(void) |
| 506 | { | 677 | { |
| 507 | int low, high, v; | 678 | int low, high, v; |
| 508 | - low = ldsw((uint8_t *)A0); | ||
| 509 | - high = ldsw((uint8_t *)A0 + 2); | 679 | + low = ldsw(A0); |
| 680 | + high = ldsw(A0 + 2); | ||
| 510 | v = (int16_t)T0; | 681 | v = (int16_t)T0; |
| 511 | if (v < low || v > high) { | 682 | if (v < low || v > high) { |
| 512 | - EIP = PARAM1; | ||
| 513 | raise_exception(EXCP05_BOUND); | 683 | raise_exception(EXCP05_BOUND); |
| 514 | } | 684 | } |
| 515 | FORCE_RET(); | 685 | FORCE_RET(); |
| @@ -518,11 +688,10 @@ void OPPROTO op_boundw(void) | @@ -518,11 +688,10 @@ void OPPROTO op_boundw(void) | ||
| 518 | void OPPROTO op_boundl(void) | 688 | void OPPROTO op_boundl(void) |
| 519 | { | 689 | { |
| 520 | int low, high, v; | 690 | int low, high, v; |
| 521 | - low = ldl((uint8_t *)A0); | ||
| 522 | - high = ldl((uint8_t *)A0 + 4); | 691 | + low = ldl(A0); |
| 692 | + high = ldl(A0 + 4); | ||
| 523 | v = T0; | 693 | v = T0; |
| 524 | if (v < low || v > high) { | 694 | if (v < low || v > high) { |
| 525 | - EIP = PARAM1; | ||
| 526 | raise_exception(EXCP05_BOUND); | 695 | raise_exception(EXCP05_BOUND); |
| 527 | } | 696 | } |
| 528 | FORCE_RET(); | 697 | FORCE_RET(); |
| @@ -533,11 +702,6 @@ void OPPROTO op_cmpxchg8b(void) | @@ -533,11 +702,6 @@ void OPPROTO op_cmpxchg8b(void) | ||
| 533 | helper_cmpxchg8b(); | 702 | helper_cmpxchg8b(); |
| 534 | } | 703 | } |
| 535 | 704 | ||
| 536 | -void OPPROTO op_jmp(void) | ||
| 537 | -{ | ||
| 538 | - JUMP_TB(op_jmp, PARAM1, 0, PARAM2); | ||
| 539 | -} | ||
| 540 | - | ||
| 541 | void OPPROTO op_movl_T0_0(void) | 705 | void OPPROTO op_movl_T0_0(void) |
| 542 | { | 706 | { |
| 543 | T0 = 0; | 707 | T0 = 0; |
| @@ -564,6 +728,14 @@ void OPPROTO op_exit_tb(void) | @@ -564,6 +728,14 @@ void OPPROTO op_exit_tb(void) | ||
| 564 | #include "ops_template.h" | 728 | #include "ops_template.h" |
| 565 | #undef SHIFT | 729 | #undef SHIFT |
| 566 | 730 | ||
| 731 | +#ifdef TARGET_X86_64 | ||
| 732 | + | ||
| 733 | +#define SHIFT 3 | ||
| 734 | +#include "ops_template.h" | ||
| 735 | +#undef SHIFT | ||
| 736 | + | ||
| 737 | +#endif | ||
| 738 | + | ||
| 567 | /* sign extend */ | 739 | /* sign extend */ |
| 568 | 740 | ||
| 569 | void OPPROTO op_movsbl_T0_T0(void) | 741 | void OPPROTO op_movsbl_T0_T0(void) |
| @@ -581,6 +753,11 @@ void OPPROTO op_movswl_T0_T0(void) | @@ -581,6 +753,11 @@ void OPPROTO op_movswl_T0_T0(void) | ||
| 581 | T0 = (int16_t)T0; | 753 | T0 = (int16_t)T0; |
| 582 | } | 754 | } |
| 583 | 755 | ||
| 756 | +void OPPROTO op_movslq_T0_T0(void) | ||
| 757 | +{ | ||
| 758 | + T0 = (int32_t)T0; | ||
| 759 | +} | ||
| 760 | + | ||
| 584 | void OPPROTO op_movzwl_T0_T0(void) | 761 | void OPPROTO op_movzwl_T0_T0(void) |
| 585 | { | 762 | { |
| 586 | T0 = (uint16_t)T0; | 763 | T0 = (uint16_t)T0; |
| @@ -591,9 +768,16 @@ void OPPROTO op_movswl_EAX_AX(void) | @@ -591,9 +768,16 @@ void OPPROTO op_movswl_EAX_AX(void) | ||
| 591 | EAX = (int16_t)EAX; | 768 | EAX = (int16_t)EAX; |
| 592 | } | 769 | } |
| 593 | 770 | ||
| 771 | +#ifdef TARGET_X86_64 | ||
| 772 | +void OPPROTO op_movslq_RAX_EAX(void) | ||
| 773 | +{ | ||
| 774 | + EAX = (int32_t)EAX; | ||
| 775 | +} | ||
| 776 | +#endif | ||
| 777 | + | ||
| 594 | void OPPROTO op_movsbw_AX_AL(void) | 778 | void OPPROTO op_movsbw_AX_AL(void) |
| 595 | { | 779 | { |
| 596 | - EAX = (EAX & 0xffff0000) | ((int8_t)EAX & 0xffff); | 780 | + EAX = (EAX & ~0xffff) | ((int8_t)EAX & 0xffff); |
| 597 | } | 781 | } |
| 598 | 782 | ||
| 599 | void OPPROTO op_movslq_EDX_EAX(void) | 783 | void OPPROTO op_movslq_EDX_EAX(void) |
| @@ -603,14 +787,21 @@ void OPPROTO op_movslq_EDX_EAX(void) | @@ -603,14 +787,21 @@ void OPPROTO op_movslq_EDX_EAX(void) | ||
| 603 | 787 | ||
| 604 | void OPPROTO op_movswl_DX_AX(void) | 788 | void OPPROTO op_movswl_DX_AX(void) |
| 605 | { | 789 | { |
| 606 | - EDX = (EDX & 0xffff0000) | (((int16_t)EAX >> 15) & 0xffff); | 790 | + EDX = (EDX & ~0xffff) | (((int16_t)EAX >> 15) & 0xffff); |
| 791 | +} | ||
| 792 | + | ||
| 793 | +#ifdef TARGET_X86_64 | ||
| 794 | +void OPPROTO op_movsqo_RDX_RAX(void) | ||
| 795 | +{ | ||
| 796 | + EDX = (int64_t)EAX >> 63; | ||
| 607 | } | 797 | } |
| 798 | +#endif | ||
| 608 | 799 | ||
| 609 | /* string ops helpers */ | 800 | /* string ops helpers */ |
| 610 | 801 | ||
| 611 | void OPPROTO op_addl_ESI_T0(void) | 802 | void OPPROTO op_addl_ESI_T0(void) |
| 612 | { | 803 | { |
| 613 | - ESI += T0; | 804 | + ESI = (uint32_t)(ESI + T0); |
| 614 | } | 805 | } |
| 615 | 806 | ||
| 616 | void OPPROTO op_addw_ESI_T0(void) | 807 | void OPPROTO op_addw_ESI_T0(void) |
| @@ -620,7 +811,7 @@ void OPPROTO op_addw_ESI_T0(void) | @@ -620,7 +811,7 @@ void OPPROTO op_addw_ESI_T0(void) | ||
| 620 | 811 | ||
| 621 | void OPPROTO op_addl_EDI_T0(void) | 812 | void OPPROTO op_addl_EDI_T0(void) |
| 622 | { | 813 | { |
| 623 | - EDI += T0; | 814 | + EDI = (uint32_t)(EDI + T0); |
| 624 | } | 815 | } |
| 625 | 816 | ||
| 626 | void OPPROTO op_addw_EDI_T0(void) | 817 | void OPPROTO op_addw_EDI_T0(void) |
| @@ -630,7 +821,7 @@ void OPPROTO op_addw_EDI_T0(void) | @@ -630,7 +821,7 @@ void OPPROTO op_addw_EDI_T0(void) | ||
| 630 | 821 | ||
| 631 | void OPPROTO op_decl_ECX(void) | 822 | void OPPROTO op_decl_ECX(void) |
| 632 | { | 823 | { |
| 633 | - ECX--; | 824 | + ECX = (uint32_t)(ECX - 1); |
| 634 | } | 825 | } |
| 635 | 826 | ||
| 636 | void OPPROTO op_decw_ECX(void) | 827 | void OPPROTO op_decw_ECX(void) |
| @@ -638,6 +829,23 @@ void OPPROTO op_decw_ECX(void) | @@ -638,6 +829,23 @@ void OPPROTO op_decw_ECX(void) | ||
| 638 | ECX = (ECX & ~0xffff) | ((ECX - 1) & 0xffff); | 829 | ECX = (ECX & ~0xffff) | ((ECX - 1) & 0xffff); |
| 639 | } | 830 | } |
| 640 | 831 | ||
| 832 | +#ifdef TARGET_X86_64 | ||
| 833 | +void OPPROTO op_addq_ESI_T0(void) | ||
| 834 | +{ | ||
| 835 | + ESI = (ESI + T0); | ||
| 836 | +} | ||
| 837 | + | ||
| 838 | +void OPPROTO op_addq_EDI_T0(void) | ||
| 839 | +{ | ||
| 840 | + EDI = (EDI + T0); | ||
| 841 | +} | ||
| 842 | + | ||
| 843 | +void OPPROTO op_decq_ECX(void) | ||
| 844 | +{ | ||
| 845 | + ECX--; | ||
| 846 | +} | ||
| 847 | +#endif | ||
| 848 | + | ||
| 641 | /* push/pop utils */ | 849 | /* push/pop utils */ |
| 642 | 850 | ||
| 643 | void op_addl_A0_SS(void) | 851 | void op_addl_A0_SS(void) |
| @@ -647,22 +855,22 @@ void op_addl_A0_SS(void) | @@ -647,22 +855,22 @@ void op_addl_A0_SS(void) | ||
| 647 | 855 | ||
| 648 | void op_subl_A0_2(void) | 856 | void op_subl_A0_2(void) |
| 649 | { | 857 | { |
| 650 | - A0 -= 2; | 858 | + A0 = (uint32_t)(A0 - 2); |
| 651 | } | 859 | } |
| 652 | 860 | ||
| 653 | void op_subl_A0_4(void) | 861 | void op_subl_A0_4(void) |
| 654 | { | 862 | { |
| 655 | - A0 -= 4; | 863 | + A0 = (uint32_t)(A0 - 4); |
| 656 | } | 864 | } |
| 657 | 865 | ||
| 658 | void op_addl_ESP_4(void) | 866 | void op_addl_ESP_4(void) |
| 659 | { | 867 | { |
| 660 | - ESP += 4; | 868 | + ESP = (uint32_t)(ESP + 4); |
| 661 | } | 869 | } |
| 662 | 870 | ||
| 663 | void op_addl_ESP_2(void) | 871 | void op_addl_ESP_2(void) |
| 664 | { | 872 | { |
| 665 | - ESP += 2; | 873 | + ESP = (uint32_t)(ESP + 2); |
| 666 | } | 874 | } |
| 667 | 875 | ||
| 668 | void op_addw_ESP_4(void) | 876 | void op_addw_ESP_4(void) |
| @@ -677,7 +885,7 @@ void op_addw_ESP_2(void) | @@ -677,7 +885,7 @@ void op_addw_ESP_2(void) | ||
| 677 | 885 | ||
| 678 | void op_addl_ESP_im(void) | 886 | void op_addl_ESP_im(void) |
| 679 | { | 887 | { |
| 680 | - ESP += PARAM1; | 888 | + ESP = (uint32_t)(ESP + PARAM1); |
| 681 | } | 889 | } |
| 682 | 890 | ||
| 683 | void op_addw_ESP_im(void) | 891 | void op_addw_ESP_im(void) |
| @@ -685,6 +893,23 @@ void op_addw_ESP_im(void) | @@ -685,6 +893,23 @@ void op_addw_ESP_im(void) | ||
| 685 | ESP = (ESP & ~0xffff) | ((ESP + PARAM1) & 0xffff); | 893 | ESP = (ESP & ~0xffff) | ((ESP + PARAM1) & 0xffff); |
| 686 | } | 894 | } |
| 687 | 895 | ||
| 896 | +#ifdef TARGET_X86_64 | ||
| 897 | +void op_subq_A0_8(void) | ||
| 898 | +{ | ||
| 899 | + A0 -= 8; | ||
| 900 | +} | ||
| 901 | + | ||
| 902 | +void op_addq_ESP_8(void) | ||
| 903 | +{ | ||
| 904 | + ESP += 8; | ||
| 905 | +} | ||
| 906 | + | ||
| 907 | +void op_addq_ESP_im(void) | ||
| 908 | +{ | ||
| 909 | + ESP += PARAM1; | ||
| 910 | +} | ||
| 911 | +#endif | ||
| 912 | + | ||
| 688 | void OPPROTO op_rdtsc(void) | 913 | void OPPROTO op_rdtsc(void) |
| 689 | { | 914 | { |
| 690 | helper_rdtsc(); | 915 | helper_rdtsc(); |
| @@ -710,6 +935,18 @@ void OPPROTO op_sysexit(void) | @@ -710,6 +935,18 @@ void OPPROTO op_sysexit(void) | ||
| 710 | helper_sysexit(); | 935 | helper_sysexit(); |
| 711 | } | 936 | } |
| 712 | 937 | ||
| 938 | +#ifdef TARGET_X86_64 | ||
| 939 | +void OPPROTO op_syscall(void) | ||
| 940 | +{ | ||
| 941 | + helper_syscall(); | ||
| 942 | +} | ||
| 943 | + | ||
| 944 | +void OPPROTO op_sysret(void) | ||
| 945 | +{ | ||
| 946 | + helper_sysret(PARAM1); | ||
| 947 | +} | ||
| 948 | +#endif | ||
| 949 | + | ||
| 713 | void OPPROTO op_rdmsr(void) | 950 | void OPPROTO op_rdmsr(void) |
| 714 | { | 951 | { |
| 715 | helper_rdmsr(); | 952 | helper_rdmsr(); |
| @@ -868,7 +1105,7 @@ void OPPROTO op_movl_seg_T0_vm(void) | @@ -868,7 +1105,7 @@ void OPPROTO op_movl_seg_T0_vm(void) | ||
| 868 | /* env->segs[] access */ | 1105 | /* env->segs[] access */ |
| 869 | sc = (SegmentCache *)((char *)env + PARAM1); | 1106 | sc = (SegmentCache *)((char *)env + PARAM1); |
| 870 | sc->selector = selector; | 1107 | sc->selector = selector; |
| 871 | - sc->base = (void *)(selector << 4); | 1108 | + sc->base = (selector << 4); |
| 872 | } | 1109 | } |
| 873 | 1110 | ||
| 874 | void OPPROTO op_movl_T0_seg(void) | 1111 | void OPPROTO op_movl_T0_seg(void) |
| @@ -876,16 +1113,6 @@ void OPPROTO op_movl_T0_seg(void) | @@ -876,16 +1113,6 @@ void OPPROTO op_movl_T0_seg(void) | ||
| 876 | T0 = env->segs[PARAM1].selector; | 1113 | T0 = env->segs[PARAM1].selector; |
| 877 | } | 1114 | } |
| 878 | 1115 | ||
| 879 | -void OPPROTO op_movl_A0_seg(void) | ||
| 880 | -{ | ||
| 881 | - A0 = *(unsigned long *)((char *)env + PARAM1); | ||
| 882 | -} | ||
| 883 | - | ||
| 884 | -void OPPROTO op_addl_A0_seg(void) | ||
| 885 | -{ | ||
| 886 | - A0 += *(unsigned long *)((char *)env + PARAM1); | ||
| 887 | -} | ||
| 888 | - | ||
| 889 | void OPPROTO op_lsl(void) | 1116 | void OPPROTO op_lsl(void) |
| 890 | { | 1117 | { |
| 891 | helper_lsl(); | 1118 | helper_lsl(); |
| @@ -1006,6 +1233,26 @@ void OPPROTO op_movl_env_T1(void) | @@ -1006,6 +1233,26 @@ void OPPROTO op_movl_env_T1(void) | ||
| 1006 | *(uint32_t *)((char *)env + PARAM1) = T1; | 1233 | *(uint32_t *)((char *)env + PARAM1) = T1; |
| 1007 | } | 1234 | } |
| 1008 | 1235 | ||
| 1236 | +void OPPROTO op_movtl_T0_env(void) | ||
| 1237 | +{ | ||
| 1238 | + T0 = *(target_ulong *)((char *)env + PARAM1); | ||
| 1239 | +} | ||
| 1240 | + | ||
| 1241 | +void OPPROTO op_movtl_env_T0(void) | ||
| 1242 | +{ | ||
| 1243 | + *(target_ulong *)((char *)env + PARAM1) = T0; | ||
| 1244 | +} | ||
| 1245 | + | ||
| 1246 | +void OPPROTO op_movtl_T1_env(void) | ||
| 1247 | +{ | ||
| 1248 | + T1 = *(target_ulong *)((char *)env + PARAM1); | ||
| 1249 | +} | ||
| 1250 | + | ||
| 1251 | +void OPPROTO op_movtl_env_T1(void) | ||
| 1252 | +{ | ||
| 1253 | + *(target_ulong *)((char *)env + PARAM1) = T1; | ||
| 1254 | +} | ||
| 1255 | + | ||
| 1009 | void OPPROTO op_clts(void) | 1256 | void OPPROTO op_clts(void) |
| 1010 | { | 1257 | { |
| 1011 | env->cr[0] &= ~CR0_TS_MASK; | 1258 | env->cr[0] &= ~CR0_TS_MASK; |
| @@ -1014,25 +1261,31 @@ void OPPROTO op_clts(void) | @@ -1014,25 +1261,31 @@ void OPPROTO op_clts(void) | ||
| 1014 | 1261 | ||
| 1015 | /* flags handling */ | 1262 | /* flags handling */ |
| 1016 | 1263 | ||
| 1017 | -/* slow jumps cases : in order to avoid calling a function with a | ||
| 1018 | - pointer (which can generate a stack frame on PowerPC), we use | ||
| 1019 | - op_setcc to set T0 and then call op_jcc. */ | ||
| 1020 | -void OPPROTO op_jcc(void) | 1264 | +void OPPROTO op_goto_tb0(void) |
| 1021 | { | 1265 | { |
| 1022 | - if (T0) | ||
| 1023 | - JUMP_TB(op_jcc, PARAM1, 0, PARAM2); | ||
| 1024 | - else | ||
| 1025 | - JUMP_TB(op_jcc, PARAM1, 1, PARAM3); | ||
| 1026 | - FORCE_RET(); | 1266 | + GOTO_TB(op_goto_tb0, 0); |
| 1267 | +} | ||
| 1268 | + | ||
| 1269 | +void OPPROTO op_goto_tb1(void) | ||
| 1270 | +{ | ||
| 1271 | + GOTO_TB(op_goto_tb1, 1); | ||
| 1272 | +} | ||
| 1273 | + | ||
| 1274 | +void OPPROTO op_jmp_label(void) | ||
| 1275 | +{ | ||
| 1276 | + GOTO_LABEL_PARAM(1); | ||
| 1027 | } | 1277 | } |
| 1028 | 1278 | ||
| 1029 | -void OPPROTO op_jcc_im(void) | 1279 | +void OPPROTO op_jnz_T0_label(void) |
| 1030 | { | 1280 | { |
| 1031 | if (T0) | 1281 | if (T0) |
| 1032 | - EIP = PARAM1; | ||
| 1033 | - else | ||
| 1034 | - EIP = PARAM2; | ||
| 1035 | - FORCE_RET(); | 1282 | + GOTO_LABEL_PARAM(1); |
| 1283 | +} | ||
| 1284 | + | ||
| 1285 | +void OPPROTO op_jz_T0_label(void) | ||
| 1286 | +{ | ||
| 1287 | + if (!T0) | ||
| 1288 | + GOTO_LABEL_PARAM(1); | ||
| 1036 | } | 1289 | } |
| 1037 | 1290 | ||
| 1038 | /* slow set cases (compute x86 flags) */ | 1291 | /* slow set cases (compute x86 flags) */ |
| @@ -1299,6 +1552,28 @@ CCTable cc_table[CC_OP_NB] = { | @@ -1299,6 +1552,28 @@ CCTable cc_table[CC_OP_NB] = { | ||
| 1299 | [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl }, | 1552 | [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl }, |
| 1300 | [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl }, | 1553 | [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl }, |
| 1301 | [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl }, | 1554 | [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl }, |
| 1555 | + | ||
| 1556 | +#ifdef TARGET_X86_64 | ||
| 1557 | + [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull }, | ||
| 1558 | + | ||
| 1559 | + [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq }, | ||
| 1560 | + | ||
| 1561 | + [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq }, | ||
| 1562 | + | ||
| 1563 | + [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq }, | ||
| 1564 | + | ||
| 1565 | + [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq }, | ||
| 1566 | + | ||
| 1567 | + [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq }, | ||
| 1568 | + | ||
| 1569 | + [CC_OP_INCQ] = { compute_all_incq, compute_c_incl }, | ||
| 1570 | + | ||
| 1571 | + [CC_OP_DECQ] = { compute_all_decq, compute_c_incl }, | ||
| 1572 | + | ||
| 1573 | + [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq }, | ||
| 1574 | + | ||
| 1575 | + [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl }, | ||
| 1576 | +#endif | ||
| 1302 | }; | 1577 | }; |
| 1303 | 1578 | ||
| 1304 | /* floating point support. Some of the code for complicated x87 | 1579 | /* floating point support. Some of the code for complicated x87 |
| @@ -1330,20 +1605,20 @@ double qemu_rint(double x) | @@ -1330,20 +1605,20 @@ double qemu_rint(double x) | ||
| 1330 | void OPPROTO op_flds_FT0_A0(void) | 1605 | void OPPROTO op_flds_FT0_A0(void) |
| 1331 | { | 1606 | { |
| 1332 | #ifdef USE_FP_CONVERT | 1607 | #ifdef USE_FP_CONVERT |
| 1333 | - FP_CONVERT.i32 = ldl((void *)A0); | 1608 | + FP_CONVERT.i32 = ldl(A0); |
| 1334 | FT0 = FP_CONVERT.f; | 1609 | FT0 = FP_CONVERT.f; |
| 1335 | #else | 1610 | #else |
| 1336 | - FT0 = ldfl((void *)A0); | 1611 | + FT0 = ldfl(A0); |
| 1337 | #endif | 1612 | #endif |
| 1338 | } | 1613 | } |
| 1339 | 1614 | ||
| 1340 | void OPPROTO op_fldl_FT0_A0(void) | 1615 | void OPPROTO op_fldl_FT0_A0(void) |
| 1341 | { | 1616 | { |
| 1342 | #ifdef USE_FP_CONVERT | 1617 | #ifdef USE_FP_CONVERT |
| 1343 | - FP_CONVERT.i64 = ldq((void *)A0); | 1618 | + FP_CONVERT.i64 = ldq(A0); |
| 1344 | FT0 = FP_CONVERT.d; | 1619 | FT0 = FP_CONVERT.d; |
| 1345 | #else | 1620 | #else |
| 1346 | - FT0 = ldfq((void *)A0); | 1621 | + FT0 = ldfq(A0); |
| 1347 | #endif | 1622 | #endif |
| 1348 | } | 1623 | } |
| 1349 | 1624 | ||
| @@ -1352,17 +1627,17 @@ void OPPROTO op_fldl_FT0_A0(void) | @@ -1352,17 +1627,17 @@ void OPPROTO op_fldl_FT0_A0(void) | ||
| 1352 | 1627 | ||
| 1353 | void helper_fild_FT0_A0(void) | 1628 | void helper_fild_FT0_A0(void) |
| 1354 | { | 1629 | { |
| 1355 | - FT0 = (CPU86_LDouble)ldsw((void *)A0); | 1630 | + FT0 = (CPU86_LDouble)ldsw(A0); |
| 1356 | } | 1631 | } |
| 1357 | 1632 | ||
| 1358 | void helper_fildl_FT0_A0(void) | 1633 | void helper_fildl_FT0_A0(void) |
| 1359 | { | 1634 | { |
| 1360 | - FT0 = (CPU86_LDouble)((int32_t)ldl((void *)A0)); | 1635 | + FT0 = (CPU86_LDouble)((int32_t)ldl(A0)); |
| 1361 | } | 1636 | } |
| 1362 | 1637 | ||
| 1363 | void helper_fildll_FT0_A0(void) | 1638 | void helper_fildll_FT0_A0(void) |
| 1364 | { | 1639 | { |
| 1365 | - FT0 = (CPU86_LDouble)((int64_t)ldq((void *)A0)); | 1640 | + FT0 = (CPU86_LDouble)((int64_t)ldq(A0)); |
| 1366 | } | 1641 | } |
| 1367 | 1642 | ||
| 1368 | void OPPROTO op_fild_FT0_A0(void) | 1643 | void OPPROTO op_fild_FT0_A0(void) |
| @@ -1385,30 +1660,30 @@ void OPPROTO op_fildll_FT0_A0(void) | @@ -1385,30 +1660,30 @@ void OPPROTO op_fildll_FT0_A0(void) | ||
| 1385 | void OPPROTO op_fild_FT0_A0(void) | 1660 | void OPPROTO op_fild_FT0_A0(void) |
| 1386 | { | 1661 | { |
| 1387 | #ifdef USE_FP_CONVERT | 1662 | #ifdef USE_FP_CONVERT |
| 1388 | - FP_CONVERT.i32 = ldsw((void *)A0); | 1663 | + FP_CONVERT.i32 = ldsw(A0); |
| 1389 | FT0 = (CPU86_LDouble)FP_CONVERT.i32; | 1664 | FT0 = (CPU86_LDouble)FP_CONVERT.i32; |
| 1390 | #else | 1665 | #else |
| 1391 | - FT0 = (CPU86_LDouble)ldsw((void *)A0); | 1666 | + FT0 = (CPU86_LDouble)ldsw(A0); |
| 1392 | #endif | 1667 | #endif |
| 1393 | } | 1668 | } |
| 1394 | 1669 | ||
| 1395 | void OPPROTO op_fildl_FT0_A0(void) | 1670 | void OPPROTO op_fildl_FT0_A0(void) |
| 1396 | { | 1671 | { |
| 1397 | #ifdef USE_FP_CONVERT | 1672 | #ifdef USE_FP_CONVERT |
| 1398 | - FP_CONVERT.i32 = (int32_t) ldl((void *)A0); | 1673 | + FP_CONVERT.i32 = (int32_t) ldl(A0); |
| 1399 | FT0 = (CPU86_LDouble)FP_CONVERT.i32; | 1674 | FT0 = (CPU86_LDouble)FP_CONVERT.i32; |
| 1400 | #else | 1675 | #else |
| 1401 | - FT0 = (CPU86_LDouble)((int32_t)ldl((void *)A0)); | 1676 | + FT0 = (CPU86_LDouble)((int32_t)ldl(A0)); |
| 1402 | #endif | 1677 | #endif |
| 1403 | } | 1678 | } |
| 1404 | 1679 | ||
| 1405 | void OPPROTO op_fildll_FT0_A0(void) | 1680 | void OPPROTO op_fildll_FT0_A0(void) |
| 1406 | { | 1681 | { |
| 1407 | #ifdef USE_FP_CONVERT | 1682 | #ifdef USE_FP_CONVERT |
| 1408 | - FP_CONVERT.i64 = (int64_t) ldq((void *)A0); | 1683 | + FP_CONVERT.i64 = (int64_t) ldq(A0); |
| 1409 | FT0 = (CPU86_LDouble)FP_CONVERT.i64; | 1684 | FT0 = (CPU86_LDouble)FP_CONVERT.i64; |
| 1410 | #else | 1685 | #else |
| 1411 | - FT0 = (CPU86_LDouble)((int64_t)ldq((void *)A0)); | 1686 | + FT0 = (CPU86_LDouble)((int64_t)ldq(A0)); |
| 1412 | #endif | 1687 | #endif |
| 1413 | } | 1688 | } |
| 1414 | #endif | 1689 | #endif |
| @@ -1420,10 +1695,10 @@ void OPPROTO op_flds_ST0_A0(void) | @@ -1420,10 +1695,10 @@ void OPPROTO op_flds_ST0_A0(void) | ||
| 1420 | int new_fpstt; | 1695 | int new_fpstt; |
| 1421 | new_fpstt = (env->fpstt - 1) & 7; | 1696 | new_fpstt = (env->fpstt - 1) & 7; |
| 1422 | #ifdef USE_FP_CONVERT | 1697 | #ifdef USE_FP_CONVERT |
| 1423 | - FP_CONVERT.i32 = ldl((void *)A0); | 1698 | + FP_CONVERT.i32 = ldl(A0); |
| 1424 | env->fpregs[new_fpstt] = FP_CONVERT.f; | 1699 | env->fpregs[new_fpstt] = FP_CONVERT.f; |
| 1425 | #else | 1700 | #else |
| 1426 | - env->fpregs[new_fpstt] = ldfl((void *)A0); | 1701 | + env->fpregs[new_fpstt] = ldfl(A0); |
| 1427 | #endif | 1702 | #endif |
| 1428 | env->fpstt = new_fpstt; | 1703 | env->fpstt = new_fpstt; |
| 1429 | env->fptags[new_fpstt] = 0; /* validate stack entry */ | 1704 | env->fptags[new_fpstt] = 0; /* validate stack entry */ |
| @@ -1434,10 +1709,10 @@ void OPPROTO op_fldl_ST0_A0(void) | @@ -1434,10 +1709,10 @@ void OPPROTO op_fldl_ST0_A0(void) | ||
| 1434 | int new_fpstt; | 1709 | int new_fpstt; |
| 1435 | new_fpstt = (env->fpstt - 1) & 7; | 1710 | new_fpstt = (env->fpstt - 1) & 7; |
| 1436 | #ifdef USE_FP_CONVERT | 1711 | #ifdef USE_FP_CONVERT |
| 1437 | - FP_CONVERT.i64 = ldq((void *)A0); | 1712 | + FP_CONVERT.i64 = ldq(A0); |
| 1438 | env->fpregs[new_fpstt] = FP_CONVERT.d; | 1713 | env->fpregs[new_fpstt] = FP_CONVERT.d; |
| 1439 | #else | 1714 | #else |
| 1440 | - env->fpregs[new_fpstt] = ldfq((void *)A0); | 1715 | + env->fpregs[new_fpstt] = ldfq(A0); |
| 1441 | #endif | 1716 | #endif |
| 1442 | env->fpstt = new_fpstt; | 1717 | env->fpstt = new_fpstt; |
| 1443 | env->fptags[new_fpstt] = 0; /* validate stack entry */ | 1718 | env->fptags[new_fpstt] = 0; /* validate stack entry */ |
| @@ -1455,7 +1730,7 @@ void helper_fild_ST0_A0(void) | @@ -1455,7 +1730,7 @@ void helper_fild_ST0_A0(void) | ||
| 1455 | { | 1730 | { |
| 1456 | int new_fpstt; | 1731 | int new_fpstt; |
| 1457 | new_fpstt = (env->fpstt - 1) & 7; | 1732 | new_fpstt = (env->fpstt - 1) & 7; |
| 1458 | - env->fpregs[new_fpstt] = (CPU86_LDouble)ldsw((void *)A0); | 1733 | + env->fpregs[new_fpstt] = (CPU86_LDouble)ldsw(A0); |
| 1459 | env->fpstt = new_fpstt; | 1734 | env->fpstt = new_fpstt; |
| 1460 | env->fptags[new_fpstt] = 0; /* validate stack entry */ | 1735 | env->fptags[new_fpstt] = 0; /* validate stack entry */ |
| 1461 | } | 1736 | } |
| @@ -1464,7 +1739,7 @@ void helper_fildl_ST0_A0(void) | @@ -1464,7 +1739,7 @@ void helper_fildl_ST0_A0(void) | ||
| 1464 | { | 1739 | { |
| 1465 | int new_fpstt; | 1740 | int new_fpstt; |
| 1466 | new_fpstt = (env->fpstt - 1) & 7; | 1741 | new_fpstt = (env->fpstt - 1) & 7; |
| 1467 | - env->fpregs[new_fpstt] = (CPU86_LDouble)((int32_t)ldl((void *)A0)); | 1742 | + env->fpregs[new_fpstt] = (CPU86_LDouble)((int32_t)ldl(A0)); |
| 1468 | env->fpstt = new_fpstt; | 1743 | env->fpstt = new_fpstt; |
| 1469 | env->fptags[new_fpstt] = 0; /* validate stack entry */ | 1744 | env->fptags[new_fpstt] = 0; /* validate stack entry */ |
| 1470 | } | 1745 | } |
| @@ -1473,7 +1748,7 @@ void helper_fildll_ST0_A0(void) | @@ -1473,7 +1748,7 @@ void helper_fildll_ST0_A0(void) | ||
| 1473 | { | 1748 | { |
| 1474 | int new_fpstt; | 1749 | int new_fpstt; |
| 1475 | new_fpstt = (env->fpstt - 1) & 7; | 1750 | new_fpstt = (env->fpstt - 1) & 7; |
| 1476 | - env->fpregs[new_fpstt] = (CPU86_LDouble)((int64_t)ldq((void *)A0)); | 1751 | + env->fpregs[new_fpstt] = (CPU86_LDouble)((int64_t)ldq(A0)); |
| 1477 | env->fpstt = new_fpstt; | 1752 | env->fpstt = new_fpstt; |
| 1478 | env->fptags[new_fpstt] = 0; /* validate stack entry */ | 1753 | env->fptags[new_fpstt] = 0; /* validate stack entry */ |
| 1479 | } | 1754 | } |
| @@ -1500,10 +1775,10 @@ void OPPROTO op_fild_ST0_A0(void) | @@ -1500,10 +1775,10 @@ void OPPROTO op_fild_ST0_A0(void) | ||
| 1500 | int new_fpstt; | 1775 | int new_fpstt; |
| 1501 | new_fpstt = (env->fpstt - 1) & 7; | 1776 | new_fpstt = (env->fpstt - 1) & 7; |
| 1502 | #ifdef USE_FP_CONVERT | 1777 | #ifdef USE_FP_CONVERT |
| 1503 | - FP_CONVERT.i32 = ldsw((void *)A0); | 1778 | + FP_CONVERT.i32 = ldsw(A0); |
| 1504 | env->fpregs[new_fpstt] = (CPU86_LDouble)FP_CONVERT.i32; | 1779 | env->fpregs[new_fpstt] = (CPU86_LDouble)FP_CONVERT.i32; |
| 1505 | #else | 1780 | #else |
| 1506 | - env->fpregs[new_fpstt] = (CPU86_LDouble)ldsw((void *)A0); | 1781 | + env->fpregs[new_fpstt] = (CPU86_LDouble)ldsw(A0); |
| 1507 | #endif | 1782 | #endif |
| 1508 | env->fpstt = new_fpstt; | 1783 | env->fpstt = new_fpstt; |
| 1509 | env->fptags[new_fpstt] = 0; /* validate stack entry */ | 1784 | env->fptags[new_fpstt] = 0; /* validate stack entry */ |
| @@ -1514,10 +1789,10 @@ void OPPROTO op_fildl_ST0_A0(void) | @@ -1514,10 +1789,10 @@ void OPPROTO op_fildl_ST0_A0(void) | ||
| 1514 | int new_fpstt; | 1789 | int new_fpstt; |
| 1515 | new_fpstt = (env->fpstt - 1) & 7; | 1790 | new_fpstt = (env->fpstt - 1) & 7; |
| 1516 | #ifdef USE_FP_CONVERT | 1791 | #ifdef USE_FP_CONVERT |
| 1517 | - FP_CONVERT.i32 = (int32_t) ldl((void *)A0); | 1792 | + FP_CONVERT.i32 = (int32_t) ldl(A0); |
| 1518 | env->fpregs[new_fpstt] = (CPU86_LDouble)FP_CONVERT.i32; | 1793 | env->fpregs[new_fpstt] = (CPU86_LDouble)FP_CONVERT.i32; |
| 1519 | #else | 1794 | #else |
| 1520 | - env->fpregs[new_fpstt] = (CPU86_LDouble)((int32_t)ldl((void *)A0)); | 1795 | + env->fpregs[new_fpstt] = (CPU86_LDouble)((int32_t)ldl(A0)); |
| 1521 | #endif | 1796 | #endif |
| 1522 | env->fpstt = new_fpstt; | 1797 | env->fpstt = new_fpstt; |
| 1523 | env->fptags[new_fpstt] = 0; /* validate stack entry */ | 1798 | env->fptags[new_fpstt] = 0; /* validate stack entry */ |
| @@ -1528,10 +1803,10 @@ void OPPROTO op_fildll_ST0_A0(void) | @@ -1528,10 +1803,10 @@ void OPPROTO op_fildll_ST0_A0(void) | ||
| 1528 | int new_fpstt; | 1803 | int new_fpstt; |
| 1529 | new_fpstt = (env->fpstt - 1) & 7; | 1804 | new_fpstt = (env->fpstt - 1) & 7; |
| 1530 | #ifdef USE_FP_CONVERT | 1805 | #ifdef USE_FP_CONVERT |
| 1531 | - FP_CONVERT.i64 = (int64_t) ldq((void *)A0); | 1806 | + FP_CONVERT.i64 = (int64_t) ldq(A0); |
| 1532 | env->fpregs[new_fpstt] = (CPU86_LDouble)FP_CONVERT.i64; | 1807 | env->fpregs[new_fpstt] = (CPU86_LDouble)FP_CONVERT.i64; |
| 1533 | #else | 1808 | #else |
| 1534 | - env->fpregs[new_fpstt] = (CPU86_LDouble)((int64_t)ldq((void *)A0)); | 1809 | + env->fpregs[new_fpstt] = (CPU86_LDouble)((int64_t)ldq(A0)); |
| 1535 | #endif | 1810 | #endif |
| 1536 | env->fpstt = new_fpstt; | 1811 | env->fpstt = new_fpstt; |
| 1537 | env->fptags[new_fpstt] = 0; /* validate stack entry */ | 1812 | env->fptags[new_fpstt] = 0; /* validate stack entry */ |
| @@ -1545,15 +1820,15 @@ void OPPROTO op_fsts_ST0_A0(void) | @@ -1545,15 +1820,15 @@ void OPPROTO op_fsts_ST0_A0(void) | ||
| 1545 | { | 1820 | { |
| 1546 | #ifdef USE_FP_CONVERT | 1821 | #ifdef USE_FP_CONVERT |
| 1547 | FP_CONVERT.f = (float)ST0; | 1822 | FP_CONVERT.f = (float)ST0; |
| 1548 | - stfl((void *)A0, FP_CONVERT.f); | 1823 | + stfl(A0, FP_CONVERT.f); |
| 1549 | #else | 1824 | #else |
| 1550 | - stfl((void *)A0, (float)ST0); | 1825 | + stfl(A0, (float)ST0); |
| 1551 | #endif | 1826 | #endif |
| 1552 | } | 1827 | } |
| 1553 | 1828 | ||
| 1554 | void OPPROTO op_fstl_ST0_A0(void) | 1829 | void OPPROTO op_fstl_ST0_A0(void) |
| 1555 | { | 1830 | { |
| 1556 | - stfq((void *)A0, (double)ST0); | 1831 | + stfq(A0, (double)ST0); |
| 1557 | } | 1832 | } |
| 1558 | 1833 | ||
| 1559 | void OPPROTO op_fstt_ST0_A0(void) | 1834 | void OPPROTO op_fstt_ST0_A0(void) |
| @@ -1574,7 +1849,7 @@ void OPPROTO op_fist_ST0_A0(void) | @@ -1574,7 +1849,7 @@ void OPPROTO op_fist_ST0_A0(void) | ||
| 1574 | val = lrint(d); | 1849 | val = lrint(d); |
| 1575 | if (val != (int16_t)val) | 1850 | if (val != (int16_t)val) |
| 1576 | val = -32768; | 1851 | val = -32768; |
| 1577 | - stw((void *)A0, val); | 1852 | + stw(A0, val); |
| 1578 | } | 1853 | } |
| 1579 | 1854 | ||
| 1580 | void OPPROTO op_fistl_ST0_A0(void) | 1855 | void OPPROTO op_fistl_ST0_A0(void) |
| @@ -1588,7 +1863,7 @@ void OPPROTO op_fistl_ST0_A0(void) | @@ -1588,7 +1863,7 @@ void OPPROTO op_fistl_ST0_A0(void) | ||
| 1588 | 1863 | ||
| 1589 | d = ST0; | 1864 | d = ST0; |
| 1590 | val = lrint(d); | 1865 | val = lrint(d); |
| 1591 | - stl((void *)A0, val); | 1866 | + stl(A0, val); |
| 1592 | } | 1867 | } |
| 1593 | 1868 | ||
| 1594 | void OPPROTO op_fistll_ST0_A0(void) | 1869 | void OPPROTO op_fistll_ST0_A0(void) |
| @@ -1602,7 +1877,7 @@ void OPPROTO op_fistll_ST0_A0(void) | @@ -1602,7 +1877,7 @@ void OPPROTO op_fistll_ST0_A0(void) | ||
| 1602 | 1877 | ||
| 1603 | d = ST0; | 1878 | d = ST0; |
| 1604 | val = llrint(d); | 1879 | val = llrint(d); |
| 1605 | - stq((void *)A0, val); | 1880 | + stq(A0, val); |
| 1606 | } | 1881 | } |
| 1607 | 1882 | ||
| 1608 | void OPPROTO op_fbld_ST0_A0(void) | 1883 | void OPPROTO op_fbld_ST0_A0(void) |
| @@ -1934,25 +2209,25 @@ void OPPROTO op_fnstsw_A0(void) | @@ -1934,25 +2209,25 @@ void OPPROTO op_fnstsw_A0(void) | ||
| 1934 | { | 2209 | { |
| 1935 | int fpus; | 2210 | int fpus; |
| 1936 | fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; | 2211 | fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; |
| 1937 | - stw((void *)A0, fpus); | 2212 | + stw(A0, fpus); |
| 1938 | } | 2213 | } |
| 1939 | 2214 | ||
| 1940 | void OPPROTO op_fnstsw_EAX(void) | 2215 | void OPPROTO op_fnstsw_EAX(void) |
| 1941 | { | 2216 | { |
| 1942 | int fpus; | 2217 | int fpus; |
| 1943 | fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; | 2218 | fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; |
| 1944 | - EAX = (EAX & 0xffff0000) | fpus; | 2219 | + EAX = (EAX & ~0xffff) | fpus; |
| 1945 | } | 2220 | } |
| 1946 | 2221 | ||
| 1947 | void OPPROTO op_fnstcw_A0(void) | 2222 | void OPPROTO op_fnstcw_A0(void) |
| 1948 | { | 2223 | { |
| 1949 | - stw((void *)A0, env->fpuc); | 2224 | + stw(A0, env->fpuc); |
| 1950 | } | 2225 | } |
| 1951 | 2226 | ||
| 1952 | void OPPROTO op_fldcw_A0(void) | 2227 | void OPPROTO op_fldcw_A0(void) |
| 1953 | { | 2228 | { |
| 1954 | int rnd_type; | 2229 | int rnd_type; |
| 1955 | - env->fpuc = lduw((void *)A0); | 2230 | + env->fpuc = lduw(A0); |
| 1956 | /* set rounding mode */ | 2231 | /* set rounding mode */ |
| 1957 | switch(env->fpuc & RC_MASK) { | 2232 | switch(env->fpuc & RC_MASK) { |
| 1958 | default: | 2233 | default: |
| @@ -2001,22 +2276,22 @@ void OPPROTO op_fninit(void) | @@ -2001,22 +2276,22 @@ void OPPROTO op_fninit(void) | ||
| 2001 | 2276 | ||
| 2002 | void OPPROTO op_fnstenv_A0(void) | 2277 | void OPPROTO op_fnstenv_A0(void) |
| 2003 | { | 2278 | { |
| 2004 | - helper_fstenv((uint8_t *)A0, PARAM1); | 2279 | + helper_fstenv(A0, PARAM1); |
| 2005 | } | 2280 | } |
| 2006 | 2281 | ||
| 2007 | void OPPROTO op_fldenv_A0(void) | 2282 | void OPPROTO op_fldenv_A0(void) |
| 2008 | { | 2283 | { |
| 2009 | - helper_fldenv((uint8_t *)A0, PARAM1); | 2284 | + helper_fldenv(A0, PARAM1); |
| 2010 | } | 2285 | } |
| 2011 | 2286 | ||
| 2012 | void OPPROTO op_fnsave_A0(void) | 2287 | void OPPROTO op_fnsave_A0(void) |
| 2013 | { | 2288 | { |
| 2014 | - helper_fsave((uint8_t *)A0, PARAM1); | 2289 | + helper_fsave(A0, PARAM1); |
| 2015 | } | 2290 | } |
| 2016 | 2291 | ||
| 2017 | void OPPROTO op_frstor_A0(void) | 2292 | void OPPROTO op_frstor_A0(void) |
| 2018 | { | 2293 | { |
| 2019 | - helper_frstor((uint8_t *)A0, PARAM1); | 2294 | + helper_frstor(A0, PARAM1); |
| 2020 | } | 2295 | } |
| 2021 | 2296 | ||
| 2022 | /* threading support */ | 2297 | /* threading support */ |
| @@ -2030,3 +2305,30 @@ void OPPROTO op_unlock(void) | @@ -2030,3 +2305,30 @@ void OPPROTO op_unlock(void) | ||
| 2030 | cpu_unlock(); | 2305 | cpu_unlock(); |
| 2031 | } | 2306 | } |
| 2032 | 2307 | ||
| 2308 | +/* SSE support */ | ||
| 2309 | +static inline void memcpy16(void *d, void *s) | ||
| 2310 | +{ | ||
| 2311 | + ((uint32_t *)d)[0] = ((uint32_t *)s)[0]; | ||
| 2312 | + ((uint32_t *)d)[1] = ((uint32_t *)s)[1]; | ||
| 2313 | + ((uint32_t *)d)[2] = ((uint32_t *)s)[2]; | ||
| 2314 | + ((uint32_t *)d)[3] = ((uint32_t *)s)[3]; | ||
| 2315 | +} | ||
| 2316 | + | ||
| 2317 | +void OPPROTO op_movo(void) | ||
| 2318 | +{ | ||
| 2319 | + /* XXX: badly generated code */ | ||
| 2320 | + XMMReg *d, *s; | ||
| 2321 | + d = (XMMReg *)((char *)env + PARAM1); | ||
| 2322 | + s = (XMMReg *)((char *)env + PARAM2); | ||
| 2323 | + memcpy16(d, s); | ||
| 2324 | +} | ||
| 2325 | + | ||
| 2326 | +void OPPROTO op_fxsave_A0(void) | ||
| 2327 | +{ | ||
| 2328 | + helper_fxsave(A0, PARAM1); | ||
| 2329 | +} | ||
| 2330 | + | ||
| 2331 | +void OPPROTO op_fxrstor_A0(void) | ||
| 2332 | +{ | ||
| 2333 | + helper_fxrstor(A0, PARAM1); | ||
| 2334 | +} |
target-i386/opreg_template.h
| @@ -20,29 +20,56 @@ | @@ -20,29 +20,56 @@ | ||
| 20 | */ | 20 | */ |
| 21 | void OPPROTO glue(op_movl_A0,REGNAME)(void) | 21 | void OPPROTO glue(op_movl_A0,REGNAME)(void) |
| 22 | { | 22 | { |
| 23 | - A0 = REG; | 23 | + A0 = (uint32_t)REG; |
| 24 | } | 24 | } |
| 25 | 25 | ||
| 26 | void OPPROTO glue(op_addl_A0,REGNAME)(void) | 26 | void OPPROTO glue(op_addl_A0,REGNAME)(void) |
| 27 | { | 27 | { |
| 28 | - A0 += REG; | 28 | + A0 = (uint32_t)(A0 + REG); |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | void OPPROTO glue(glue(op_addl_A0,REGNAME),_s1)(void) | 31 | void OPPROTO glue(glue(op_addl_A0,REGNAME),_s1)(void) |
| 32 | { | 32 | { |
| 33 | - A0 += REG << 1; | 33 | + A0 = (uint32_t)(A0 + (REG << 1)); |
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | void OPPROTO glue(glue(op_addl_A0,REGNAME),_s2)(void) | 36 | void OPPROTO glue(glue(op_addl_A0,REGNAME),_s2)(void) |
| 37 | { | 37 | { |
| 38 | - A0 += REG << 2; | 38 | + A0 = (uint32_t)(A0 + (REG << 2)); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | void OPPROTO glue(glue(op_addl_A0,REGNAME),_s3)(void) | 41 | void OPPROTO glue(glue(op_addl_A0,REGNAME),_s3)(void) |
| 42 | { | 42 | { |
| 43 | - A0 += REG << 3; | 43 | + A0 = (uint32_t)(A0 + (REG << 3)); |
| 44 | +} | ||
| 45 | + | ||
| 46 | +#ifdef TARGET_X86_64 | ||
| 47 | +void OPPROTO glue(op_movq_A0,REGNAME)(void) | ||
| 48 | +{ | ||
| 49 | + A0 = REG; | ||
| 50 | +} | ||
| 51 | + | ||
| 52 | +void OPPROTO glue(op_addq_A0,REGNAME)(void) | ||
| 53 | +{ | ||
| 54 | + A0 = (A0 + REG); | ||
| 55 | +} | ||
| 56 | + | ||
| 57 | +void OPPROTO glue(glue(op_addq_A0,REGNAME),_s1)(void) | ||
| 58 | +{ | ||
| 59 | + A0 = (A0 + (REG << 1)); | ||
| 60 | +} | ||
| 61 | + | ||
| 62 | +void OPPROTO glue(glue(op_addq_A0,REGNAME),_s2)(void) | ||
| 63 | +{ | ||
| 64 | + A0 = (A0 + (REG << 2)); | ||
| 44 | } | 65 | } |
| 45 | 66 | ||
| 67 | +void OPPROTO glue(glue(op_addq_A0,REGNAME),_s3)(void) | ||
| 68 | +{ | ||
| 69 | + A0 = (A0 + (REG << 3)); | ||
| 70 | +} | ||
| 71 | +#endif | ||
| 72 | + | ||
| 46 | void OPPROTO glue(op_movl_T0,REGNAME)(void) | 73 | void OPPROTO glue(op_movl_T0,REGNAME)(void) |
| 47 | { | 74 | { |
| 48 | T0 = REG; | 75 | T0 = REG; |
| @@ -65,72 +92,99 @@ void OPPROTO glue(op_movh_T1,REGNAME)(void) | @@ -65,72 +92,99 @@ void OPPROTO glue(op_movh_T1,REGNAME)(void) | ||
| 65 | 92 | ||
| 66 | void OPPROTO glue(glue(op_movl,REGNAME),_T0)(void) | 93 | void OPPROTO glue(glue(op_movl,REGNAME),_T0)(void) |
| 67 | { | 94 | { |
| 68 | - REG = T0; | 95 | + REG = (uint32_t)T0; |
| 69 | } | 96 | } |
| 70 | 97 | ||
| 71 | void OPPROTO glue(glue(op_movl,REGNAME),_T1)(void) | 98 | void OPPROTO glue(glue(op_movl,REGNAME),_T1)(void) |
| 72 | { | 99 | { |
| 73 | - REG = T1; | 100 | + REG = (uint32_t)T1; |
| 74 | } | 101 | } |
| 75 | 102 | ||
| 76 | void OPPROTO glue(glue(op_movl,REGNAME),_A0)(void) | 103 | void OPPROTO glue(glue(op_movl,REGNAME),_A0)(void) |
| 77 | { | 104 | { |
| 105 | + REG = (uint32_t)A0; | ||
| 106 | +} | ||
| 107 | + | ||
| 108 | +#ifdef TARGET_X86_64 | ||
| 109 | +void OPPROTO glue(glue(op_movq,REGNAME),_T0)(void) | ||
| 110 | +{ | ||
| 111 | + REG = T0; | ||
| 112 | +} | ||
| 113 | + | ||
| 114 | +void OPPROTO glue(glue(op_movq,REGNAME),_T1)(void) | ||
| 115 | +{ | ||
| 116 | + REG = T1; | ||
| 117 | +} | ||
| 118 | + | ||
| 119 | +void OPPROTO glue(glue(op_movq,REGNAME),_A0)(void) | ||
| 120 | +{ | ||
| 78 | REG = A0; | 121 | REG = A0; |
| 79 | } | 122 | } |
| 123 | +#endif | ||
| 80 | 124 | ||
| 81 | /* mov T1 to REG if T0 is true */ | 125 | /* mov T1 to REG if T0 is true */ |
| 82 | void OPPROTO glue(glue(op_cmovw,REGNAME),_T1_T0)(void) | 126 | void OPPROTO glue(glue(op_cmovw,REGNAME),_T1_T0)(void) |
| 83 | { | 127 | { |
| 84 | if (T0) | 128 | if (T0) |
| 85 | - REG = (REG & 0xffff0000) | (T1 & 0xffff); | 129 | + REG = (REG & ~0xffff) | (T1 & 0xffff); |
| 86 | FORCE_RET(); | 130 | FORCE_RET(); |
| 87 | } | 131 | } |
| 88 | 132 | ||
| 89 | void OPPROTO glue(glue(op_cmovl,REGNAME),_T1_T0)(void) | 133 | void OPPROTO glue(glue(op_cmovl,REGNAME),_T1_T0)(void) |
| 90 | { | 134 | { |
| 91 | if (T0) | 135 | if (T0) |
| 136 | + REG = (uint32_t)T1; | ||
| 137 | + FORCE_RET(); | ||
| 138 | +} | ||
| 139 | + | ||
| 140 | +#ifdef TARGET_X86_64 | ||
| 141 | +void OPPROTO glue(glue(op_cmovq,REGNAME),_T1_T0)(void) | ||
| 142 | +{ | ||
| 143 | + if (T0) | ||
| 92 | REG = T1; | 144 | REG = T1; |
| 93 | FORCE_RET(); | 145 | FORCE_RET(); |
| 94 | } | 146 | } |
| 147 | +#endif | ||
| 95 | 148 | ||
| 96 | /* NOTE: T0 high order bits are ignored */ | 149 | /* NOTE: T0 high order bits are ignored */ |
| 97 | void OPPROTO glue(glue(op_movw,REGNAME),_T0)(void) | 150 | void OPPROTO glue(glue(op_movw,REGNAME),_T0)(void) |
| 98 | { | 151 | { |
| 99 | - REG = (REG & 0xffff0000) | (T0 & 0xffff); | 152 | + REG = (REG & ~0xffff) | (T0 & 0xffff); |
| 100 | } | 153 | } |
| 101 | 154 | ||
| 102 | /* NOTE: T0 high order bits are ignored */ | 155 | /* NOTE: T0 high order bits are ignored */ |
| 103 | void OPPROTO glue(glue(op_movw,REGNAME),_T1)(void) | 156 | void OPPROTO glue(glue(op_movw,REGNAME),_T1)(void) |
| 104 | { | 157 | { |
| 105 | - REG = (REG & 0xffff0000) | (T1 & 0xffff); | 158 | + REG = (REG & ~0xffff) | (T1 & 0xffff); |
| 106 | } | 159 | } |
| 107 | 160 | ||
| 108 | /* NOTE: A0 high order bits are ignored */ | 161 | /* NOTE: A0 high order bits are ignored */ |
| 109 | void OPPROTO glue(glue(op_movw,REGNAME),_A0)(void) | 162 | void OPPROTO glue(glue(op_movw,REGNAME),_A0)(void) |
| 110 | { | 163 | { |
| 111 | - REG = (REG & 0xffff0000) | (A0 & 0xffff); | 164 | + REG = (REG & ~0xffff) | (A0 & 0xffff); |
| 112 | } | 165 | } |
| 113 | 166 | ||
| 114 | /* NOTE: T0 high order bits are ignored */ | 167 | /* NOTE: T0 high order bits are ignored */ |
| 115 | void OPPROTO glue(glue(op_movb,REGNAME),_T0)(void) | 168 | void OPPROTO glue(glue(op_movb,REGNAME),_T0)(void) |
| 116 | { | 169 | { |
| 117 | - REG = (REG & 0xffffff00) | (T0 & 0xff); | 170 | + REG = (REG & ~0xff) | (T0 & 0xff); |
| 118 | } | 171 | } |
| 119 | 172 | ||
| 120 | /* NOTE: T0 high order bits are ignored */ | 173 | /* NOTE: T0 high order bits are ignored */ |
| 121 | void OPPROTO glue(glue(op_movh,REGNAME),_T0)(void) | 174 | void OPPROTO glue(glue(op_movh,REGNAME),_T0)(void) |
| 122 | { | 175 | { |
| 123 | - REG = (REG & 0xffff00ff) | ((T0 & 0xff) << 8); | 176 | + REG = (REG & ~0xff00) | ((T0 & 0xff) << 8); |
| 124 | } | 177 | } |
| 125 | 178 | ||
| 126 | /* NOTE: T1 high order bits are ignored */ | 179 | /* NOTE: T1 high order bits are ignored */ |
| 127 | void OPPROTO glue(glue(op_movb,REGNAME),_T1)(void) | 180 | void OPPROTO glue(glue(op_movb,REGNAME),_T1)(void) |
| 128 | { | 181 | { |
| 129 | - REG = (REG & 0xffffff00) | (T1 & 0xff); | 182 | + REG = (REG & ~0xff) | (T1 & 0xff); |
| 130 | } | 183 | } |
| 131 | 184 | ||
| 132 | /* NOTE: T1 high order bits are ignored */ | 185 | /* NOTE: T1 high order bits are ignored */ |
| 133 | void OPPROTO glue(glue(op_movh,REGNAME),_T1)(void) | 186 | void OPPROTO glue(glue(op_movh,REGNAME),_T1)(void) |
| 134 | { | 187 | { |
| 135 | - REG = (REG & 0xffff00ff) | ((T1 & 0xff) << 8); | 188 | + REG = (REG & ~0xff00) | ((T1 & 0xff) << 8); |
| 136 | } | 189 | } |
| 190 | + |
target-i386/ops_mem.h
| 1 | void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T0_A0)(void) | 1 | void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T0_A0)(void) |
| 2 | { | 2 | { |
| 3 | - T0 = glue(ldub, MEMSUFFIX)((uint8_t *)A0); | 3 | + T0 = glue(ldub, MEMSUFFIX)(A0); |
| 4 | } | 4 | } |
| 5 | 5 | ||
| 6 | void OPPROTO glue(glue(op_ldsb, MEMSUFFIX), _T0_A0)(void) | 6 | void OPPROTO glue(glue(op_ldsb, MEMSUFFIX), _T0_A0)(void) |
| 7 | { | 7 | { |
| 8 | - T0 = glue(ldsb, MEMSUFFIX)((int8_t *)A0); | 8 | + T0 = glue(ldsb, MEMSUFFIX)(A0); |
| 9 | } | 9 | } |
| 10 | 10 | ||
| 11 | void OPPROTO glue(glue(op_lduw, MEMSUFFIX), _T0_A0)(void) | 11 | void OPPROTO glue(glue(op_lduw, MEMSUFFIX), _T0_A0)(void) |
| 12 | { | 12 | { |
| 13 | - T0 = glue(lduw, MEMSUFFIX)((uint8_t *)A0); | 13 | + T0 = glue(lduw, MEMSUFFIX)(A0); |
| 14 | } | 14 | } |
| 15 | 15 | ||
| 16 | void OPPROTO glue(glue(op_ldsw, MEMSUFFIX), _T0_A0)(void) | 16 | void OPPROTO glue(glue(op_ldsw, MEMSUFFIX), _T0_A0)(void) |
| 17 | { | 17 | { |
| 18 | - T0 = glue(ldsw, MEMSUFFIX)((int8_t *)A0); | 18 | + T0 = glue(ldsw, MEMSUFFIX)(A0); |
| 19 | } | 19 | } |
| 20 | 20 | ||
| 21 | void OPPROTO glue(glue(op_ldl, MEMSUFFIX), _T0_A0)(void) | 21 | void OPPROTO glue(glue(op_ldl, MEMSUFFIX), _T0_A0)(void) |
| 22 | { | 22 | { |
| 23 | - T0 = glue(ldl, MEMSUFFIX)((uint8_t *)A0); | 23 | + T0 = (uint32_t)glue(ldl, MEMSUFFIX)(A0); |
| 24 | } | 24 | } |
| 25 | 25 | ||
| 26 | void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T1_A0)(void) | 26 | void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T1_A0)(void) |
| 27 | { | 27 | { |
| 28 | - T1 = glue(ldub, MEMSUFFIX)((uint8_t *)A0); | 28 | + T1 = glue(ldub, MEMSUFFIX)(A0); |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | void OPPROTO glue(glue(op_ldsb, MEMSUFFIX), _T1_A0)(void) | 31 | void OPPROTO glue(glue(op_ldsb, MEMSUFFIX), _T1_A0)(void) |
| 32 | { | 32 | { |
| 33 | - T1 = glue(ldsb, MEMSUFFIX)((int8_t *)A0); | 33 | + T1 = glue(ldsb, MEMSUFFIX)(A0); |
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | void OPPROTO glue(glue(op_lduw, MEMSUFFIX), _T1_A0)(void) | 36 | void OPPROTO glue(glue(op_lduw, MEMSUFFIX), _T1_A0)(void) |
| 37 | { | 37 | { |
| 38 | - T1 = glue(lduw, MEMSUFFIX)((uint8_t *)A0); | 38 | + T1 = glue(lduw, MEMSUFFIX)(A0); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | void OPPROTO glue(glue(op_ldsw, MEMSUFFIX), _T1_A0)(void) | 41 | void OPPROTO glue(glue(op_ldsw, MEMSUFFIX), _T1_A0)(void) |
| 42 | { | 42 | { |
| 43 | - T1 = glue(ldsw, MEMSUFFIX)((int8_t *)A0); | 43 | + T1 = glue(ldsw, MEMSUFFIX)(A0); |
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | void OPPROTO glue(glue(op_ldl, MEMSUFFIX), _T1_A0)(void) | 46 | void OPPROTO glue(glue(op_ldl, MEMSUFFIX), _T1_A0)(void) |
| 47 | { | 47 | { |
| 48 | - T1 = glue(ldl, MEMSUFFIX)((uint8_t *)A0); | 48 | + T1 = glue(ldl, MEMSUFFIX)(A0); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | void OPPROTO glue(glue(op_stb, MEMSUFFIX), _T0_A0)(void) | 51 | void OPPROTO glue(glue(op_stb, MEMSUFFIX), _T0_A0)(void) |
| 52 | { | 52 | { |
| 53 | - glue(stb, MEMSUFFIX)((uint8_t *)A0, T0); | 53 | + glue(stb, MEMSUFFIX)(A0, T0); |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | void OPPROTO glue(glue(op_stw, MEMSUFFIX), _T0_A0)(void) | 56 | void OPPROTO glue(glue(op_stw, MEMSUFFIX), _T0_A0)(void) |
| 57 | { | 57 | { |
| 58 | - glue(stw, MEMSUFFIX)((uint8_t *)A0, T0); | 58 | + glue(stw, MEMSUFFIX)(A0, T0); |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | void OPPROTO glue(glue(op_stl, MEMSUFFIX), _T0_A0)(void) | 61 | void OPPROTO glue(glue(op_stl, MEMSUFFIX), _T0_A0)(void) |
| 62 | { | 62 | { |
| 63 | - glue(stl, MEMSUFFIX)((uint8_t *)A0, T0); | 63 | + glue(stl, MEMSUFFIX)(A0, T0); |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | #if 0 | 66 | #if 0 |
| 67 | void OPPROTO glue(glue(op_stb, MEMSUFFIX), _T1_A0)(void) | 67 | void OPPROTO glue(glue(op_stb, MEMSUFFIX), _T1_A0)(void) |
| 68 | { | 68 | { |
| 69 | - glue(stb, MEMSUFFIX)((uint8_t *)A0, T1); | 69 | + glue(stb, MEMSUFFIX)(A0, T1); |
| 70 | } | 70 | } |
| 71 | #endif | 71 | #endif |
| 72 | 72 | ||
| 73 | void OPPROTO glue(glue(op_stw, MEMSUFFIX), _T1_A0)(void) | 73 | void OPPROTO glue(glue(op_stw, MEMSUFFIX), _T1_A0)(void) |
| 74 | { | 74 | { |
| 75 | - glue(stw, MEMSUFFIX)((uint8_t *)A0, T1); | 75 | + glue(stw, MEMSUFFIX)(A0, T1); |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | void OPPROTO glue(glue(op_stl, MEMSUFFIX), _T1_A0)(void) | 78 | void OPPROTO glue(glue(op_stl, MEMSUFFIX), _T1_A0)(void) |
| 79 | { | 79 | { |
| 80 | - glue(stl, MEMSUFFIX)((uint8_t *)A0, T1); | 80 | + glue(stl, MEMSUFFIX)(A0, T1); |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | +/* SSE support */ | ||
| 84 | +void OPPROTO glue(glue(op_ldo, MEMSUFFIX), _env_A0)(void) | ||
| 85 | +{ | ||
| 86 | + XMMReg *p; | ||
| 87 | + p = (XMMReg *)((char *)env + PARAM1); | ||
| 88 | + /* XXX: host endianness ? */ | ||
| 89 | + p->u.q[0] = glue(ldq, MEMSUFFIX)(A0); | ||
| 90 | + p->u.q[1] = glue(ldq, MEMSUFFIX)(A0 + 8); | ||
| 91 | +} | ||
| 92 | + | ||
| 93 | +void OPPROTO glue(glue(op_sto, MEMSUFFIX), _env_A0)(void) | ||
| 94 | +{ | ||
| 95 | + XMMReg *p; | ||
| 96 | + p = (XMMReg *)((char *)env + PARAM1); | ||
| 97 | + /* XXX: host endianness ? */ | ||
| 98 | + glue(stq, MEMSUFFIX)(A0, p->u.q[0]); | ||
| 99 | + glue(stq, MEMSUFFIX)(A0 + 8, p->u.q[1]); | ||
| 100 | +} | ||
| 101 | + | ||
| 102 | +#ifdef TARGET_X86_64 | ||
| 103 | +void OPPROTO glue(glue(op_ldsl, MEMSUFFIX), _T0_A0)(void) | ||
| 104 | +{ | ||
| 105 | + T0 = (int32_t)glue(ldl, MEMSUFFIX)(A0); | ||
| 106 | +} | ||
| 107 | + | ||
| 108 | +void OPPROTO glue(glue(op_ldsl, MEMSUFFIX), _T1_A0)(void) | ||
| 109 | +{ | ||
| 110 | + T1 = (int32_t)glue(ldl, MEMSUFFIX)(A0); | ||
| 111 | +} | ||
| 112 | + | ||
| 113 | +void OPPROTO glue(glue(op_ldq, MEMSUFFIX), _T0_A0)(void) | ||
| 114 | +{ | ||
| 115 | + T0 = glue(ldq, MEMSUFFIX)(A0); | ||
| 116 | +} | ||
| 117 | + | ||
| 118 | +void OPPROTO glue(glue(op_ldq, MEMSUFFIX), _T1_A0)(void) | ||
| 119 | +{ | ||
| 120 | + T1 = glue(ldq, MEMSUFFIX)(A0); | ||
| 121 | +} | ||
| 122 | + | ||
| 123 | +void OPPROTO glue(glue(op_stq, MEMSUFFIX), _T0_A0)(void) | ||
| 124 | +{ | ||
| 125 | + glue(stq, MEMSUFFIX)(A0, T0); | ||
| 126 | +} | ||
| 127 | + | ||
| 128 | +void OPPROTO glue(glue(op_stq, MEMSUFFIX), _T1_A0)(void) | ||
| 129 | +{ | ||
| 130 | + glue(stq, MEMSUFFIX)(A0, T1); | ||
| 131 | +} | ||
| 132 | +#endif | ||
| 133 | + | ||
| 83 | #undef MEMSUFFIX | 134 | #undef MEMSUFFIX |
target-i386/ops_template.h
| @@ -20,7 +20,12 @@ | @@ -20,7 +20,12 @@ | ||
| 20 | */ | 20 | */ |
| 21 | #define DATA_BITS (1 << (3 + SHIFT)) | 21 | #define DATA_BITS (1 << (3 + SHIFT)) |
| 22 | #define SHIFT_MASK (DATA_BITS - 1) | 22 | #define SHIFT_MASK (DATA_BITS - 1) |
| 23 | -#define SIGN_MASK (1 << (DATA_BITS - 1)) | 23 | +#define SIGN_MASK (((target_ulong)1) << (DATA_BITS - 1)) |
| 24 | +#if DATA_BITS <= 32 | ||
| 25 | +#define SHIFT1_MASK 0x1f | ||
| 26 | +#else | ||
| 27 | +#define SHIFT1_MASK 0x3f | ||
| 28 | +#endif | ||
| 24 | 29 | ||
| 25 | #if DATA_BITS == 8 | 30 | #if DATA_BITS == 8 |
| 26 | #define SUFFIX b | 31 | #define SUFFIX b |
| @@ -37,6 +42,11 @@ | @@ -37,6 +42,11 @@ | ||
| 37 | #define DATA_TYPE uint32_t | 42 | #define DATA_TYPE uint32_t |
| 38 | #define DATA_STYPE int32_t | 43 | #define DATA_STYPE int32_t |
| 39 | #define DATA_MASK 0xffffffff | 44 | #define DATA_MASK 0xffffffff |
| 45 | +#elif DATA_BITS == 64 | ||
| 46 | +#define SUFFIX q | ||
| 47 | +#define DATA_TYPE uint64_t | ||
| 48 | +#define DATA_STYPE int64_t | ||
| 49 | +#define DATA_MASK 0xffffffffffffffff | ||
| 40 | #else | 50 | #else |
| 41 | #error unhandled operand size | 51 | #error unhandled operand size |
| 42 | #endif | 52 | #endif |
| @@ -46,7 +56,7 @@ | @@ -46,7 +56,7 @@ | ||
| 46 | static int glue(compute_all_add, SUFFIX)(void) | 56 | static int glue(compute_all_add, SUFFIX)(void) |
| 47 | { | 57 | { |
| 48 | int cf, pf, af, zf, sf, of; | 58 | int cf, pf, af, zf, sf, of; |
| 49 | - int src1, src2; | 59 | + target_long src1, src2; |
| 50 | src1 = CC_SRC; | 60 | src1 = CC_SRC; |
| 51 | src2 = CC_DST - CC_SRC; | 61 | src2 = CC_DST - CC_SRC; |
| 52 | cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1; | 62 | cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1; |
| @@ -60,7 +70,8 @@ static int glue(compute_all_add, SUFFIX)(void) | @@ -60,7 +70,8 @@ static int glue(compute_all_add, SUFFIX)(void) | ||
| 60 | 70 | ||
| 61 | static int glue(compute_c_add, SUFFIX)(void) | 71 | static int glue(compute_c_add, SUFFIX)(void) |
| 62 | { | 72 | { |
| 63 | - int src1, cf; | 73 | + int cf; |
| 74 | + target_long src1; | ||
| 64 | src1 = CC_SRC; | 75 | src1 = CC_SRC; |
| 65 | cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1; | 76 | cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1; |
| 66 | return cf; | 77 | return cf; |
| @@ -69,7 +80,7 @@ static int glue(compute_c_add, SUFFIX)(void) | @@ -69,7 +80,7 @@ static int glue(compute_c_add, SUFFIX)(void) | ||
| 69 | static int glue(compute_all_adc, SUFFIX)(void) | 80 | static int glue(compute_all_adc, SUFFIX)(void) |
| 70 | { | 81 | { |
| 71 | int cf, pf, af, zf, sf, of; | 82 | int cf, pf, af, zf, sf, of; |
| 72 | - int src1, src2; | 83 | + target_long src1, src2; |
| 73 | src1 = CC_SRC; | 84 | src1 = CC_SRC; |
| 74 | src2 = CC_DST - CC_SRC - 1; | 85 | src2 = CC_DST - CC_SRC - 1; |
| 75 | cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1; | 86 | cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1; |
| @@ -83,7 +94,8 @@ static int glue(compute_all_adc, SUFFIX)(void) | @@ -83,7 +94,8 @@ static int glue(compute_all_adc, SUFFIX)(void) | ||
| 83 | 94 | ||
| 84 | static int glue(compute_c_adc, SUFFIX)(void) | 95 | static int glue(compute_c_adc, SUFFIX)(void) |
| 85 | { | 96 | { |
| 86 | - int src1, cf; | 97 | + int cf; |
| 98 | + target_long src1; | ||
| 87 | src1 = CC_SRC; | 99 | src1 = CC_SRC; |
| 88 | cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1; | 100 | cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1; |
| 89 | return cf; | 101 | return cf; |
| @@ -92,7 +104,7 @@ static int glue(compute_c_adc, SUFFIX)(void) | @@ -92,7 +104,7 @@ static int glue(compute_c_adc, SUFFIX)(void) | ||
| 92 | static int glue(compute_all_sub, SUFFIX)(void) | 104 | static int glue(compute_all_sub, SUFFIX)(void) |
| 93 | { | 105 | { |
| 94 | int cf, pf, af, zf, sf, of; | 106 | int cf, pf, af, zf, sf, of; |
| 95 | - int src1, src2; | 107 | + target_long src1, src2; |
| 96 | src1 = CC_DST + CC_SRC; | 108 | src1 = CC_DST + CC_SRC; |
| 97 | src2 = CC_SRC; | 109 | src2 = CC_SRC; |
| 98 | cf = (DATA_TYPE)src1 < (DATA_TYPE)src2; | 110 | cf = (DATA_TYPE)src1 < (DATA_TYPE)src2; |
| @@ -106,7 +118,8 @@ static int glue(compute_all_sub, SUFFIX)(void) | @@ -106,7 +118,8 @@ static int glue(compute_all_sub, SUFFIX)(void) | ||
| 106 | 118 | ||
| 107 | static int glue(compute_c_sub, SUFFIX)(void) | 119 | static int glue(compute_c_sub, SUFFIX)(void) |
| 108 | { | 120 | { |
| 109 | - int src1, src2, cf; | 121 | + int cf; |
| 122 | + target_long src1, src2; | ||
| 110 | src1 = CC_DST + CC_SRC; | 123 | src1 = CC_DST + CC_SRC; |
| 111 | src2 = CC_SRC; | 124 | src2 = CC_SRC; |
| 112 | cf = (DATA_TYPE)src1 < (DATA_TYPE)src2; | 125 | cf = (DATA_TYPE)src1 < (DATA_TYPE)src2; |
| @@ -116,7 +129,7 @@ static int glue(compute_c_sub, SUFFIX)(void) | @@ -116,7 +129,7 @@ static int glue(compute_c_sub, SUFFIX)(void) | ||
| 116 | static int glue(compute_all_sbb, SUFFIX)(void) | 129 | static int glue(compute_all_sbb, SUFFIX)(void) |
| 117 | { | 130 | { |
| 118 | int cf, pf, af, zf, sf, of; | 131 | int cf, pf, af, zf, sf, of; |
| 119 | - int src1, src2; | 132 | + target_long src1, src2; |
| 120 | src1 = CC_DST + CC_SRC + 1; | 133 | src1 = CC_DST + CC_SRC + 1; |
| 121 | src2 = CC_SRC; | 134 | src2 = CC_SRC; |
| 122 | cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2; | 135 | cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2; |
| @@ -130,7 +143,8 @@ static int glue(compute_all_sbb, SUFFIX)(void) | @@ -130,7 +143,8 @@ static int glue(compute_all_sbb, SUFFIX)(void) | ||
| 130 | 143 | ||
| 131 | static int glue(compute_c_sbb, SUFFIX)(void) | 144 | static int glue(compute_c_sbb, SUFFIX)(void) |
| 132 | { | 145 | { |
| 133 | - int src1, src2, cf; | 146 | + int cf; |
| 147 | + target_long src1, src2; | ||
| 134 | src1 = CC_DST + CC_SRC + 1; | 148 | src1 = CC_DST + CC_SRC + 1; |
| 135 | src2 = CC_SRC; | 149 | src2 = CC_SRC; |
| 136 | cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2; | 150 | cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2; |
| @@ -157,7 +171,7 @@ static int glue(compute_c_logic, SUFFIX)(void) | @@ -157,7 +171,7 @@ static int glue(compute_c_logic, SUFFIX)(void) | ||
| 157 | static int glue(compute_all_inc, SUFFIX)(void) | 171 | static int glue(compute_all_inc, SUFFIX)(void) |
| 158 | { | 172 | { |
| 159 | int cf, pf, af, zf, sf, of; | 173 | int cf, pf, af, zf, sf, of; |
| 160 | - int src1, src2; | 174 | + target_long src1, src2; |
| 161 | src1 = CC_DST - 1; | 175 | src1 = CC_DST - 1; |
| 162 | src2 = 1; | 176 | src2 = 1; |
| 163 | cf = CC_SRC; | 177 | cf = CC_SRC; |
| @@ -179,7 +193,7 @@ static int glue(compute_c_inc, SUFFIX)(void) | @@ -179,7 +193,7 @@ static int glue(compute_c_inc, SUFFIX)(void) | ||
| 179 | static int glue(compute_all_dec, SUFFIX)(void) | 193 | static int glue(compute_all_dec, SUFFIX)(void) |
| 180 | { | 194 | { |
| 181 | int cf, pf, af, zf, sf, of; | 195 | int cf, pf, af, zf, sf, of; |
| 182 | - int src1, src2; | 196 | + target_long src1, src2; |
| 183 | src1 = CC_DST + 1; | 197 | src1 = CC_DST + 1; |
| 184 | src2 = 1; | 198 | src2 = 1; |
| 185 | cf = CC_SRC; | 199 | cf = CC_SRC; |
| @@ -187,7 +201,7 @@ static int glue(compute_all_dec, SUFFIX)(void) | @@ -187,7 +201,7 @@ static int glue(compute_all_dec, SUFFIX)(void) | ||
| 187 | af = (CC_DST ^ src1 ^ src2) & 0x10; | 201 | af = (CC_DST ^ src1 ^ src2) & 0x10; |
| 188 | zf = ((DATA_TYPE)CC_DST == 0) << 6; | 202 | zf = ((DATA_TYPE)CC_DST == 0) << 6; |
| 189 | sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80; | 203 | sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80; |
| 190 | - of = ((CC_DST & DATA_MASK) == ((uint32_t)SIGN_MASK - 1)) << 11; | 204 | + of = ((CC_DST & DATA_MASK) == ((target_ulong)SIGN_MASK - 1)) << 11; |
| 191 | return cf | pf | af | zf | sf | of; | 205 | return cf | pf | af | zf | sf | of; |
| 192 | } | 206 | } |
| 193 | 207 | ||
| @@ -256,71 +270,66 @@ static int glue(compute_all_mul, SUFFIX)(void) | @@ -256,71 +270,66 @@ static int glue(compute_all_mul, SUFFIX)(void) | ||
| 256 | 270 | ||
| 257 | void OPPROTO glue(op_jb_sub, SUFFIX)(void) | 271 | void OPPROTO glue(op_jb_sub, SUFFIX)(void) |
| 258 | { | 272 | { |
| 259 | - int src1, src2; | 273 | + target_long src1, src2; |
| 260 | src1 = CC_DST + CC_SRC; | 274 | src1 = CC_DST + CC_SRC; |
| 261 | src2 = CC_SRC; | 275 | src2 = CC_SRC; |
| 262 | 276 | ||
| 263 | if ((DATA_TYPE)src1 < (DATA_TYPE)src2) | 277 | if ((DATA_TYPE)src1 < (DATA_TYPE)src2) |
| 264 | - JUMP_TB(glue(op_jb_sub, SUFFIX), PARAM1, 0, PARAM2); | ||
| 265 | - else | ||
| 266 | - JUMP_TB(glue(op_jb_sub, SUFFIX), PARAM1, 1, PARAM3); | 278 | + GOTO_LABEL_PARAM(1); |
| 267 | FORCE_RET(); | 279 | FORCE_RET(); |
| 268 | } | 280 | } |
| 269 | 281 | ||
| 270 | void OPPROTO glue(op_jz_sub, SUFFIX)(void) | 282 | void OPPROTO glue(op_jz_sub, SUFFIX)(void) |
| 271 | { | 283 | { |
| 272 | if ((DATA_TYPE)CC_DST == 0) | 284 | if ((DATA_TYPE)CC_DST == 0) |
| 273 | - JUMP_TB(glue(op_jz_sub, SUFFIX), PARAM1, 0, PARAM2); | ||
| 274 | - else | ||
| 275 | - JUMP_TB(glue(op_jz_sub, SUFFIX), PARAM1, 1, PARAM3); | 285 | + GOTO_LABEL_PARAM(1); |
| 286 | + FORCE_RET(); | ||
| 287 | +} | ||
| 288 | + | ||
| 289 | +void OPPROTO glue(op_jnz_sub, SUFFIX)(void) | ||
| 290 | +{ | ||
| 291 | + if ((DATA_TYPE)CC_DST != 0) | ||
| 292 | + GOTO_LABEL_PARAM(1); | ||
| 276 | FORCE_RET(); | 293 | FORCE_RET(); |
| 277 | } | 294 | } |
| 278 | 295 | ||
| 279 | void OPPROTO glue(op_jbe_sub, SUFFIX)(void) | 296 | void OPPROTO glue(op_jbe_sub, SUFFIX)(void) |
| 280 | { | 297 | { |
| 281 | - int src1, src2; | 298 | + target_long src1, src2; |
| 282 | src1 = CC_DST + CC_SRC; | 299 | src1 = CC_DST + CC_SRC; |
| 283 | src2 = CC_SRC; | 300 | src2 = CC_SRC; |
| 284 | 301 | ||
| 285 | if ((DATA_TYPE)src1 <= (DATA_TYPE)src2) | 302 | if ((DATA_TYPE)src1 <= (DATA_TYPE)src2) |
| 286 | - JUMP_TB(glue(op_jbe_sub, SUFFIX), PARAM1, 0, PARAM2); | ||
| 287 | - else | ||
| 288 | - JUMP_TB(glue(op_jbe_sub, SUFFIX), PARAM1, 1, PARAM3); | 303 | + GOTO_LABEL_PARAM(1); |
| 289 | FORCE_RET(); | 304 | FORCE_RET(); |
| 290 | } | 305 | } |
| 291 | 306 | ||
| 292 | void OPPROTO glue(op_js_sub, SUFFIX)(void) | 307 | void OPPROTO glue(op_js_sub, SUFFIX)(void) |
| 293 | { | 308 | { |
| 294 | if (CC_DST & SIGN_MASK) | 309 | if (CC_DST & SIGN_MASK) |
| 295 | - JUMP_TB(glue(op_js_sub, SUFFIX), PARAM1, 0, PARAM2); | ||
| 296 | - else | ||
| 297 | - JUMP_TB(glue(op_js_sub, SUFFIX), PARAM1, 1, PARAM3); | 310 | + GOTO_LABEL_PARAM(1); |
| 298 | FORCE_RET(); | 311 | FORCE_RET(); |
| 299 | } | 312 | } |
| 300 | 313 | ||
| 301 | void OPPROTO glue(op_jl_sub, SUFFIX)(void) | 314 | void OPPROTO glue(op_jl_sub, SUFFIX)(void) |
| 302 | { | 315 | { |
| 303 | - int src1, src2; | 316 | + target_long src1, src2; |
| 304 | src1 = CC_DST + CC_SRC; | 317 | src1 = CC_DST + CC_SRC; |
| 305 | src2 = CC_SRC; | 318 | src2 = CC_SRC; |
| 306 | 319 | ||
| 307 | if ((DATA_STYPE)src1 < (DATA_STYPE)src2) | 320 | if ((DATA_STYPE)src1 < (DATA_STYPE)src2) |
| 308 | - JUMP_TB(glue(op_jl_sub, SUFFIX), PARAM1, 0, PARAM2); | ||
| 309 | - else | ||
| 310 | - JUMP_TB(glue(op_jl_sub, SUFFIX), PARAM1, 1, PARAM3); | 321 | + GOTO_LABEL_PARAM(1); |
| 311 | FORCE_RET(); | 322 | FORCE_RET(); |
| 312 | } | 323 | } |
| 313 | 324 | ||
| 314 | void OPPROTO glue(op_jle_sub, SUFFIX)(void) | 325 | void OPPROTO glue(op_jle_sub, SUFFIX)(void) |
| 315 | { | 326 | { |
| 316 | - int src1, src2; | 327 | + target_long src1, src2; |
| 317 | src1 = CC_DST + CC_SRC; | 328 | src1 = CC_DST + CC_SRC; |
| 318 | src2 = CC_SRC; | 329 | src2 = CC_SRC; |
| 319 | 330 | ||
| 320 | if ((DATA_STYPE)src1 <= (DATA_STYPE)src2) | 331 | if ((DATA_STYPE)src1 <= (DATA_STYPE)src2) |
| 321 | - JUMP_TB(glue(op_jle_sub, SUFFIX), PARAM1, 0, PARAM2); | ||
| 322 | - else | ||
| 323 | - JUMP_TB(glue(op_jle_sub, SUFFIX), PARAM1, 1, PARAM3); | 332 | + GOTO_LABEL_PARAM(1); |
| 324 | FORCE_RET(); | 333 | FORCE_RET(); |
| 325 | } | 334 | } |
| 326 | 335 | ||
| @@ -330,50 +339,33 @@ void OPPROTO glue(op_jle_sub, SUFFIX)(void) | @@ -330,50 +339,33 @@ void OPPROTO glue(op_jle_sub, SUFFIX)(void) | ||
| 330 | 339 | ||
| 331 | void OPPROTO glue(op_loopnz, SUFFIX)(void) | 340 | void OPPROTO glue(op_loopnz, SUFFIX)(void) |
| 332 | { | 341 | { |
| 333 | - unsigned int tmp; | ||
| 334 | int eflags; | 342 | int eflags; |
| 335 | eflags = cc_table[CC_OP].compute_all(); | 343 | eflags = cc_table[CC_OP].compute_all(); |
| 336 | - tmp = (ECX - 1) & DATA_MASK; | ||
| 337 | - ECX = (ECX & ~DATA_MASK) | tmp; | ||
| 338 | - if (tmp != 0 && !(eflags & CC_Z)) | ||
| 339 | - EIP = PARAM1; | ||
| 340 | - else | ||
| 341 | - EIP = PARAM2; | 344 | + if ((DATA_TYPE)ECX != 0 && !(eflags & CC_Z)) |
| 345 | + GOTO_LABEL_PARAM(1); | ||
| 342 | FORCE_RET(); | 346 | FORCE_RET(); |
| 343 | } | 347 | } |
| 344 | 348 | ||
| 345 | void OPPROTO glue(op_loopz, SUFFIX)(void) | 349 | void OPPROTO glue(op_loopz, SUFFIX)(void) |
| 346 | { | 350 | { |
| 347 | - unsigned int tmp; | ||
| 348 | int eflags; | 351 | int eflags; |
| 349 | eflags = cc_table[CC_OP].compute_all(); | 352 | eflags = cc_table[CC_OP].compute_all(); |
| 350 | - tmp = (ECX - 1) & DATA_MASK; | ||
| 351 | - ECX = (ECX & ~DATA_MASK) | tmp; | ||
| 352 | - if (tmp != 0 && (eflags & CC_Z)) | ||
| 353 | - EIP = PARAM1; | ||
| 354 | - else | ||
| 355 | - EIP = PARAM2; | 353 | + if ((DATA_TYPE)ECX != 0 && (eflags & CC_Z)) |
| 354 | + GOTO_LABEL_PARAM(1); | ||
| 356 | FORCE_RET(); | 355 | FORCE_RET(); |
| 357 | } | 356 | } |
| 358 | 357 | ||
| 359 | -void OPPROTO glue(op_loop, SUFFIX)(void) | 358 | +void OPPROTO glue(op_jz_ecx, SUFFIX)(void) |
| 360 | { | 359 | { |
| 361 | - unsigned int tmp; | ||
| 362 | - tmp = (ECX - 1) & DATA_MASK; | ||
| 363 | - ECX = (ECX & ~DATA_MASK) | tmp; | ||
| 364 | - if (tmp != 0) | ||
| 365 | - EIP = PARAM1; | ||
| 366 | - else | ||
| 367 | - EIP = PARAM2; | 360 | + if ((DATA_TYPE)ECX == 0) |
| 361 | + GOTO_LABEL_PARAM(1); | ||
| 368 | FORCE_RET(); | 362 | FORCE_RET(); |
| 369 | } | 363 | } |
| 370 | 364 | ||
| 371 | -void OPPROTO glue(op_jecxz, SUFFIX)(void) | 365 | +void OPPROTO glue(op_jnz_ecx, SUFFIX)(void) |
| 372 | { | 366 | { |
| 373 | - if ((DATA_TYPE)ECX == 0) | ||
| 374 | - EIP = PARAM1; | ||
| 375 | - else | ||
| 376 | - EIP = PARAM2; | 367 | + if ((DATA_TYPE)ECX != 0) |
| 368 | + GOTO_LABEL_PARAM(1); | ||
| 377 | FORCE_RET(); | 369 | FORCE_RET(); |
| 378 | } | 370 | } |
| 379 | 371 | ||
| @@ -383,7 +375,7 @@ void OPPROTO glue(op_jecxz, SUFFIX)(void) | @@ -383,7 +375,7 @@ void OPPROTO glue(op_jecxz, SUFFIX)(void) | ||
| 383 | 375 | ||
| 384 | void OPPROTO glue(op_setb_T0_sub, SUFFIX)(void) | 376 | void OPPROTO glue(op_setb_T0_sub, SUFFIX)(void) |
| 385 | { | 377 | { |
| 386 | - int src1, src2; | 378 | + target_long src1, src2; |
| 387 | src1 = CC_DST + CC_SRC; | 379 | src1 = CC_DST + CC_SRC; |
| 388 | src2 = CC_SRC; | 380 | src2 = CC_SRC; |
| 389 | 381 | ||
| @@ -397,7 +389,7 @@ void OPPROTO glue(op_setz_T0_sub, SUFFIX)(void) | @@ -397,7 +389,7 @@ void OPPROTO glue(op_setz_T0_sub, SUFFIX)(void) | ||
| 397 | 389 | ||
| 398 | void OPPROTO glue(op_setbe_T0_sub, SUFFIX)(void) | 390 | void OPPROTO glue(op_setbe_T0_sub, SUFFIX)(void) |
| 399 | { | 391 | { |
| 400 | - int src1, src2; | 392 | + target_long src1, src2; |
| 401 | src1 = CC_DST + CC_SRC; | 393 | src1 = CC_DST + CC_SRC; |
| 402 | src2 = CC_SRC; | 394 | src2 = CC_SRC; |
| 403 | 395 | ||
| @@ -411,7 +403,7 @@ void OPPROTO glue(op_sets_T0_sub, SUFFIX)(void) | @@ -411,7 +403,7 @@ void OPPROTO glue(op_sets_T0_sub, SUFFIX)(void) | ||
| 411 | 403 | ||
| 412 | void OPPROTO glue(op_setl_T0_sub, SUFFIX)(void) | 404 | void OPPROTO glue(op_setl_T0_sub, SUFFIX)(void) |
| 413 | { | 405 | { |
| 414 | - int src1, src2; | 406 | + target_long src1, src2; |
| 415 | src1 = CC_DST + CC_SRC; | 407 | src1 = CC_DST + CC_SRC; |
| 416 | src2 = CC_SRC; | 408 | src2 = CC_SRC; |
| 417 | 409 | ||
| @@ -420,7 +412,7 @@ void OPPROTO glue(op_setl_T0_sub, SUFFIX)(void) | @@ -420,7 +412,7 @@ void OPPROTO glue(op_setl_T0_sub, SUFFIX)(void) | ||
| 420 | 412 | ||
| 421 | void OPPROTO glue(op_setle_T0_sub, SUFFIX)(void) | 413 | void OPPROTO glue(op_setle_T0_sub, SUFFIX)(void) |
| 422 | { | 414 | { |
| 423 | - int src1, src2; | 415 | + target_long src1, src2; |
| 424 | src1 = CC_DST + CC_SRC; | 416 | src1 = CC_DST + CC_SRC; |
| 425 | src2 = CC_SRC; | 417 | src2 = CC_SRC; |
| 426 | 418 | ||
| @@ -432,7 +424,7 @@ void OPPROTO glue(op_setle_T0_sub, SUFFIX)(void) | @@ -432,7 +424,7 @@ void OPPROTO glue(op_setle_T0_sub, SUFFIX)(void) | ||
| 432 | void OPPROTO glue(glue(op_shl, SUFFIX), _T0_T1)(void) | 424 | void OPPROTO glue(glue(op_shl, SUFFIX), _T0_T1)(void) |
| 433 | { | 425 | { |
| 434 | int count; | 426 | int count; |
| 435 | - count = T1 & 0x1f; | 427 | + count = T1 & SHIFT1_MASK; |
| 436 | T0 = T0 << count; | 428 | T0 = T0 << count; |
| 437 | FORCE_RET(); | 429 | FORCE_RET(); |
| 438 | } | 430 | } |
| @@ -440,7 +432,7 @@ void OPPROTO glue(glue(op_shl, SUFFIX), _T0_T1)(void) | @@ -440,7 +432,7 @@ void OPPROTO glue(glue(op_shl, SUFFIX), _T0_T1)(void) | ||
| 440 | void OPPROTO glue(glue(op_shr, SUFFIX), _T0_T1)(void) | 432 | void OPPROTO glue(glue(op_shr, SUFFIX), _T0_T1)(void) |
| 441 | { | 433 | { |
| 442 | int count; | 434 | int count; |
| 443 | - count = T1 & 0x1f; | 435 | + count = T1 & SHIFT1_MASK; |
| 444 | T0 &= DATA_MASK; | 436 | T0 &= DATA_MASK; |
| 445 | T0 = T0 >> count; | 437 | T0 = T0 >> count; |
| 446 | FORCE_RET(); | 438 | FORCE_RET(); |
| @@ -448,8 +440,10 @@ void OPPROTO glue(glue(op_shr, SUFFIX), _T0_T1)(void) | @@ -448,8 +440,10 @@ void OPPROTO glue(glue(op_shr, SUFFIX), _T0_T1)(void) | ||
| 448 | 440 | ||
| 449 | void OPPROTO glue(glue(op_sar, SUFFIX), _T0_T1)(void) | 441 | void OPPROTO glue(glue(op_sar, SUFFIX), _T0_T1)(void) |
| 450 | { | 442 | { |
| 451 | - int count, src; | ||
| 452 | - count = T1 & 0x1f; | 443 | + int count; |
| 444 | + target_long src; | ||
| 445 | + | ||
| 446 | + count = T1 & SHIFT1_MASK; | ||
| 453 | src = (DATA_STYPE)T0; | 447 | src = (DATA_STYPE)T0; |
| 454 | T0 = src >> count; | 448 | T0 = src >> count; |
| 455 | FORCE_RET(); | 449 | FORCE_RET(); |
| @@ -484,7 +478,7 @@ void OPPROTO glue(glue(op_bts, SUFFIX), _T0_T1_cc)(void) | @@ -484,7 +478,7 @@ void OPPROTO glue(glue(op_bts, SUFFIX), _T0_T1_cc)(void) | ||
| 484 | int count; | 478 | int count; |
| 485 | count = T1 & SHIFT_MASK; | 479 | count = T1 & SHIFT_MASK; |
| 486 | T1 = T0 >> count; | 480 | T1 = T0 >> count; |
| 487 | - T0 |= (1 << count); | 481 | + T0 |= (((target_long)1) << count); |
| 488 | } | 482 | } |
| 489 | 483 | ||
| 490 | void OPPROTO glue(glue(op_btr, SUFFIX), _T0_T1_cc)(void) | 484 | void OPPROTO glue(glue(op_btr, SUFFIX), _T0_T1_cc)(void) |
| @@ -492,7 +486,7 @@ void OPPROTO glue(glue(op_btr, SUFFIX), _T0_T1_cc)(void) | @@ -492,7 +486,7 @@ void OPPROTO glue(glue(op_btr, SUFFIX), _T0_T1_cc)(void) | ||
| 492 | int count; | 486 | int count; |
| 493 | count = T1 & SHIFT_MASK; | 487 | count = T1 & SHIFT_MASK; |
| 494 | T1 = T0 >> count; | 488 | T1 = T0 >> count; |
| 495 | - T0 &= ~(1 << count); | 489 | + T0 &= ~(((target_long)1) << count); |
| 496 | } | 490 | } |
| 497 | 491 | ||
| 498 | void OPPROTO glue(glue(op_btc, SUFFIX), _T0_T1_cc)(void) | 492 | void OPPROTO glue(glue(op_btc, SUFFIX), _T0_T1_cc)(void) |
| @@ -500,12 +494,19 @@ void OPPROTO glue(glue(op_btc, SUFFIX), _T0_T1_cc)(void) | @@ -500,12 +494,19 @@ void OPPROTO glue(glue(op_btc, SUFFIX), _T0_T1_cc)(void) | ||
| 500 | int count; | 494 | int count; |
| 501 | count = T1 & SHIFT_MASK; | 495 | count = T1 & SHIFT_MASK; |
| 502 | T1 = T0 >> count; | 496 | T1 = T0 >> count; |
| 503 | - T0 ^= (1 << count); | 497 | + T0 ^= (((target_long)1) << count); |
| 498 | +} | ||
| 499 | + | ||
| 500 | +void OPPROTO glue(glue(op_add_bit, SUFFIX), _A0_T1)(void) | ||
| 501 | +{ | ||
| 502 | + A0 += ((DATA_STYPE)T1 >> (3 + SHIFT)) << SHIFT; | ||
| 504 | } | 503 | } |
| 505 | 504 | ||
| 506 | void OPPROTO glue(glue(op_bsf, SUFFIX), _T0_cc)(void) | 505 | void OPPROTO glue(glue(op_bsf, SUFFIX), _T0_cc)(void) |
| 507 | { | 506 | { |
| 508 | - int res, count; | 507 | + int count; |
| 508 | + target_long res; | ||
| 509 | + | ||
| 509 | res = T0 & DATA_MASK; | 510 | res = T0 & DATA_MASK; |
| 510 | if (res != 0) { | 511 | if (res != 0) { |
| 511 | count = 0; | 512 | count = 0; |
| @@ -523,7 +524,9 @@ void OPPROTO glue(glue(op_bsf, SUFFIX), _T0_cc)(void) | @@ -523,7 +524,9 @@ void OPPROTO glue(glue(op_bsf, SUFFIX), _T0_cc)(void) | ||
| 523 | 524 | ||
| 524 | void OPPROTO glue(glue(op_bsr, SUFFIX), _T0_cc)(void) | 525 | void OPPROTO glue(glue(op_bsr, SUFFIX), _T0_cc)(void) |
| 525 | { | 526 | { |
| 526 | - int res, count; | 527 | + int count; |
| 528 | + target_long res; | ||
| 529 | + | ||
| 527 | res = T0 & DATA_MASK; | 530 | res = T0 & DATA_MASK; |
| 528 | if (res != 0) { | 531 | if (res != 0) { |
| 529 | count = DATA_BITS - 1; | 532 | count = DATA_BITS - 1; |
| @@ -555,70 +558,8 @@ void OPPROTO glue(op_movl_T0_Dshift, SUFFIX)(void) | @@ -555,70 +558,8 @@ void OPPROTO glue(op_movl_T0_Dshift, SUFFIX)(void) | ||
| 555 | T0 = DF << SHIFT; | 558 | T0 = DF << SHIFT; |
| 556 | } | 559 | } |
| 557 | 560 | ||
| 558 | -void OPPROTO glue(op_string_jz_sub, SUFFIX)(void) | ||
| 559 | -{ | ||
| 560 | - if ((DATA_TYPE)CC_DST == 0) | ||
| 561 | - JUMP_TB2(glue(op_string_jz_sub, SUFFIX), PARAM1, 3); | ||
| 562 | - FORCE_RET(); | ||
| 563 | -} | ||
| 564 | - | ||
| 565 | -void OPPROTO glue(op_string_jnz_sub, SUFFIX)(void) | ||
| 566 | -{ | ||
| 567 | - if ((DATA_TYPE)CC_DST != 0) | ||
| 568 | - JUMP_TB2(glue(op_string_jnz_sub, SUFFIX), PARAM1, 3); | ||
| 569 | - FORCE_RET(); | ||
| 570 | -} | ||
| 571 | - | ||
| 572 | -void OPPROTO glue(glue(op_string_jz_sub, SUFFIX), _im)(void) | ||
| 573 | -{ | ||
| 574 | - if ((DATA_TYPE)CC_DST == 0) { | ||
| 575 | - EIP = PARAM1; | ||
| 576 | - if (env->eflags & TF_MASK) { | ||
| 577 | - raise_exception(EXCP01_SSTP); | ||
| 578 | - } | ||
| 579 | - T0 = 0; | ||
| 580 | - EXIT_TB(); | ||
| 581 | - } | ||
| 582 | - FORCE_RET(); | ||
| 583 | -} | ||
| 584 | - | ||
| 585 | -void OPPROTO glue(glue(op_string_jnz_sub, SUFFIX), _im)(void) | ||
| 586 | -{ | ||
| 587 | - if ((DATA_TYPE)CC_DST != 0) { | ||
| 588 | - EIP = PARAM1; | ||
| 589 | - if (env->eflags & TF_MASK) { | ||
| 590 | - raise_exception(EXCP01_SSTP); | ||
| 591 | - } | ||
| 592 | - T0 = 0; | ||
| 593 | - EXIT_TB(); | ||
| 594 | - } | ||
| 595 | - FORCE_RET(); | ||
| 596 | -} | ||
| 597 | - | ||
| 598 | -#if DATA_BITS >= 16 | ||
| 599 | -void OPPROTO glue(op_jz_ecx, SUFFIX)(void) | ||
| 600 | -{ | ||
| 601 | - if ((DATA_TYPE)ECX == 0) | ||
| 602 | - JUMP_TB(glue(op_jz_ecx, SUFFIX), PARAM1, 1, PARAM2); | ||
| 603 | - FORCE_RET(); | ||
| 604 | -} | ||
| 605 | - | ||
| 606 | -void OPPROTO glue(glue(op_jz_ecx, SUFFIX), _im)(void) | ||
| 607 | -{ | ||
| 608 | - if ((DATA_TYPE)ECX == 0) { | ||
| 609 | - EIP = PARAM1; | ||
| 610 | - if (env->eflags & TF_MASK) { | ||
| 611 | - raise_exception(EXCP01_SSTP); | ||
| 612 | - } | ||
| 613 | - T0 = 0; | ||
| 614 | - EXIT_TB(); | ||
| 615 | - } | ||
| 616 | - FORCE_RET(); | ||
| 617 | -} | ||
| 618 | -#endif | ||
| 619 | - | ||
| 620 | /* port I/O */ | 561 | /* port I/O */ |
| 621 | - | 562 | +#if DATA_BITS <= 32 |
| 622 | void OPPROTO glue(glue(op_out, SUFFIX), _T0_T1)(void) | 563 | void OPPROTO glue(glue(op_out, SUFFIX), _T0_T1)(void) |
| 623 | { | 564 | { |
| 624 | glue(cpu_out, SUFFIX)(env, T0, T1 & DATA_MASK); | 565 | glue(cpu_out, SUFFIX)(env, T0, T1 & DATA_MASK); |
| @@ -648,9 +589,11 @@ void OPPROTO glue(glue(op_check_io, SUFFIX), _DX)(void) | @@ -648,9 +589,11 @@ void OPPROTO glue(glue(op_check_io, SUFFIX), _DX)(void) | ||
| 648 | { | 589 | { |
| 649 | glue(glue(check_io, SUFFIX), _DX)(); | 590 | glue(glue(check_io, SUFFIX), _DX)(); |
| 650 | } | 591 | } |
| 592 | +#endif | ||
| 651 | 593 | ||
| 652 | #undef DATA_BITS | 594 | #undef DATA_BITS |
| 653 | #undef SHIFT_MASK | 595 | #undef SHIFT_MASK |
| 596 | +#undef SHIFT1_MASK | ||
| 654 | #undef SIGN_MASK | 597 | #undef SIGN_MASK |
| 655 | #undef DATA_TYPE | 598 | #undef DATA_TYPE |
| 656 | #undef DATA_STYPE | 599 | #undef DATA_STYPE |
target-i386/ops_template_mem.h
| @@ -28,6 +28,8 @@ | @@ -28,6 +28,8 @@ | ||
| 28 | #define MEM_SUFFIX w_raw | 28 | #define MEM_SUFFIX w_raw |
| 29 | #elif DATA_BITS == 32 | 29 | #elif DATA_BITS == 32 |
| 30 | #define MEM_SUFFIX l_raw | 30 | #define MEM_SUFFIX l_raw |
| 31 | +#elif DATA_BITS == 64 | ||
| 32 | +#define MEM_SUFFIX q_raw | ||
| 31 | #endif | 33 | #endif |
| 32 | 34 | ||
| 33 | #elif MEM_WRITE == 1 | 35 | #elif MEM_WRITE == 1 |
| @@ -38,6 +40,8 @@ | @@ -38,6 +40,8 @@ | ||
| 38 | #define MEM_SUFFIX w_kernel | 40 | #define MEM_SUFFIX w_kernel |
| 39 | #elif DATA_BITS == 32 | 41 | #elif DATA_BITS == 32 |
| 40 | #define MEM_SUFFIX l_kernel | 42 | #define MEM_SUFFIX l_kernel |
| 43 | +#elif DATA_BITS == 64 | ||
| 44 | +#define MEM_SUFFIX q_kernel | ||
| 41 | #endif | 45 | #endif |
| 42 | 46 | ||
| 43 | #elif MEM_WRITE == 2 | 47 | #elif MEM_WRITE == 2 |
| @@ -48,6 +52,8 @@ | @@ -48,6 +52,8 @@ | ||
| 48 | #define MEM_SUFFIX w_user | 52 | #define MEM_SUFFIX w_user |
| 49 | #elif DATA_BITS == 32 | 53 | #elif DATA_BITS == 32 |
| 50 | #define MEM_SUFFIX l_user | 54 | #define MEM_SUFFIX l_user |
| 55 | +#elif DATA_BITS == 64 | ||
| 56 | +#define MEM_SUFFIX q_user | ||
| 51 | #endif | 57 | #endif |
| 52 | 58 | ||
| 53 | #else | 59 | #else |
| @@ -64,14 +70,16 @@ | @@ -64,14 +70,16 @@ | ||
| 64 | 70 | ||
| 65 | void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1_cc)(void) | 71 | void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1_cc)(void) |
| 66 | { | 72 | { |
| 67 | - int count, src; | 73 | + int count; |
| 74 | + target_long src; | ||
| 75 | + | ||
| 68 | count = T1 & SHIFT_MASK; | 76 | count = T1 & SHIFT_MASK; |
| 69 | if (count) { | 77 | if (count) { |
| 70 | src = T0; | 78 | src = T0; |
| 71 | T0 &= DATA_MASK; | 79 | T0 &= DATA_MASK; |
| 72 | T0 = (T0 << count) | (T0 >> (DATA_BITS - count)); | 80 | T0 = (T0 << count) | (T0 >> (DATA_BITS - count)); |
| 73 | #ifdef MEM_WRITE | 81 | #ifdef MEM_WRITE |
| 74 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 82 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 75 | #else | 83 | #else |
| 76 | /* gcc 3.2 workaround. This is really a bug in gcc. */ | 84 | /* gcc 3.2 workaround. This is really a bug in gcc. */ |
| 77 | asm volatile("" : : "r" (T0)); | 85 | asm volatile("" : : "r" (T0)); |
| @@ -86,14 +94,16 @@ void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1_cc)(void) | @@ -86,14 +94,16 @@ void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1_cc)(void) | ||
| 86 | 94 | ||
| 87 | void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1_cc)(void) | 95 | void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1_cc)(void) |
| 88 | { | 96 | { |
| 89 | - int count, src; | 97 | + int count; |
| 98 | + target_long src; | ||
| 99 | + | ||
| 90 | count = T1 & SHIFT_MASK; | 100 | count = T1 & SHIFT_MASK; |
| 91 | if (count) { | 101 | if (count) { |
| 92 | src = T0; | 102 | src = T0; |
| 93 | T0 &= DATA_MASK; | 103 | T0 &= DATA_MASK; |
| 94 | T0 = (T0 >> count) | (T0 << (DATA_BITS - count)); | 104 | T0 = (T0 >> count) | (T0 << (DATA_BITS - count)); |
| 95 | #ifdef MEM_WRITE | 105 | #ifdef MEM_WRITE |
| 96 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 106 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 97 | #else | 107 | #else |
| 98 | /* gcc 3.2 workaround. This is really a bug in gcc. */ | 108 | /* gcc 3.2 workaround. This is really a bug in gcc. */ |
| 99 | asm volatile("" : : "r" (T0)); | 109 | asm volatile("" : : "r" (T0)); |
| @@ -114,7 +124,7 @@ void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1)(void) | @@ -114,7 +124,7 @@ void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1)(void) | ||
| 114 | T0 &= DATA_MASK; | 124 | T0 &= DATA_MASK; |
| 115 | T0 = (T0 << count) | (T0 >> (DATA_BITS - count)); | 125 | T0 = (T0 << count) | (T0 >> (DATA_BITS - count)); |
| 116 | #ifdef MEM_WRITE | 126 | #ifdef MEM_WRITE |
| 117 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 127 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 118 | #endif | 128 | #endif |
| 119 | } | 129 | } |
| 120 | FORCE_RET(); | 130 | FORCE_RET(); |
| @@ -128,7 +138,7 @@ void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1)(void) | @@ -128,7 +138,7 @@ void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1)(void) | ||
| 128 | T0 &= DATA_MASK; | 138 | T0 &= DATA_MASK; |
| 129 | T0 = (T0 >> count) | (T0 << (DATA_BITS - count)); | 139 | T0 = (T0 >> count) | (T0 << (DATA_BITS - count)); |
| 130 | #ifdef MEM_WRITE | 140 | #ifdef MEM_WRITE |
| 131 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 141 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 132 | #endif | 142 | #endif |
| 133 | } | 143 | } |
| 134 | FORCE_RET(); | 144 | FORCE_RET(); |
| @@ -136,10 +146,11 @@ void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1)(void) | @@ -136,10 +146,11 @@ void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1)(void) | ||
| 136 | 146 | ||
| 137 | void OPPROTO glue(glue(op_rcl, MEM_SUFFIX), _T0_T1_cc)(void) | 147 | void OPPROTO glue(glue(op_rcl, MEM_SUFFIX), _T0_T1_cc)(void) |
| 138 | { | 148 | { |
| 139 | - int count, res, eflags; | ||
| 140 | - unsigned int src; | 149 | + int count, eflags; |
| 150 | + target_ulong src; | ||
| 151 | + target_long res; | ||
| 141 | 152 | ||
| 142 | - count = T1 & 0x1f; | 153 | + count = T1 & SHIFT1_MASK; |
| 143 | #if DATA_BITS == 16 | 154 | #if DATA_BITS == 16 |
| 144 | count = rclw_table[count]; | 155 | count = rclw_table[count]; |
| 145 | #elif DATA_BITS == 8 | 156 | #elif DATA_BITS == 8 |
| @@ -154,7 +165,7 @@ void OPPROTO glue(glue(op_rcl, MEM_SUFFIX), _T0_T1_cc)(void) | @@ -154,7 +165,7 @@ void OPPROTO glue(glue(op_rcl, MEM_SUFFIX), _T0_T1_cc)(void) | ||
| 154 | res |= T0 >> (DATA_BITS + 1 - count); | 165 | res |= T0 >> (DATA_BITS + 1 - count); |
| 155 | T0 = res; | 166 | T0 = res; |
| 156 | #ifdef MEM_WRITE | 167 | #ifdef MEM_WRITE |
| 157 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 168 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 158 | #endif | 169 | #endif |
| 159 | CC_SRC = (eflags & ~(CC_C | CC_O)) | | 170 | CC_SRC = (eflags & ~(CC_C | CC_O)) | |
| 160 | (lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) | | 171 | (lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) | |
| @@ -166,10 +177,11 @@ void OPPROTO glue(glue(op_rcl, MEM_SUFFIX), _T0_T1_cc)(void) | @@ -166,10 +177,11 @@ void OPPROTO glue(glue(op_rcl, MEM_SUFFIX), _T0_T1_cc)(void) | ||
| 166 | 177 | ||
| 167 | void OPPROTO glue(glue(op_rcr, MEM_SUFFIX), _T0_T1_cc)(void) | 178 | void OPPROTO glue(glue(op_rcr, MEM_SUFFIX), _T0_T1_cc)(void) |
| 168 | { | 179 | { |
| 169 | - int count, res, eflags; | ||
| 170 | - unsigned int src; | 180 | + int count, eflags; |
| 181 | + target_ulong src; | ||
| 182 | + target_long res; | ||
| 171 | 183 | ||
| 172 | - count = T1 & 0x1f; | 184 | + count = T1 & SHIFT1_MASK; |
| 173 | #if DATA_BITS == 16 | 185 | #if DATA_BITS == 16 |
| 174 | count = rclw_table[count]; | 186 | count = rclw_table[count]; |
| 175 | #elif DATA_BITS == 8 | 187 | #elif DATA_BITS == 8 |
| @@ -184,7 +196,7 @@ void OPPROTO glue(glue(op_rcr, MEM_SUFFIX), _T0_T1_cc)(void) | @@ -184,7 +196,7 @@ void OPPROTO glue(glue(op_rcr, MEM_SUFFIX), _T0_T1_cc)(void) | ||
| 184 | res |= T0 << (DATA_BITS + 1 - count); | 196 | res |= T0 << (DATA_BITS + 1 - count); |
| 185 | T0 = res; | 197 | T0 = res; |
| 186 | #ifdef MEM_WRITE | 198 | #ifdef MEM_WRITE |
| 187 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 199 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 188 | #endif | 200 | #endif |
| 189 | CC_SRC = (eflags & ~(CC_C | CC_O)) | | 201 | CC_SRC = (eflags & ~(CC_C | CC_O)) | |
| 190 | (lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) | | 202 | (lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) | |
| @@ -196,13 +208,15 @@ void OPPROTO glue(glue(op_rcr, MEM_SUFFIX), _T0_T1_cc)(void) | @@ -196,13 +208,15 @@ void OPPROTO glue(glue(op_rcr, MEM_SUFFIX), _T0_T1_cc)(void) | ||
| 196 | 208 | ||
| 197 | void OPPROTO glue(glue(op_shl, MEM_SUFFIX), _T0_T1_cc)(void) | 209 | void OPPROTO glue(glue(op_shl, MEM_SUFFIX), _T0_T1_cc)(void) |
| 198 | { | 210 | { |
| 199 | - int count, src; | ||
| 200 | - count = T1 & 0x1f; | 211 | + int count; |
| 212 | + target_long src; | ||
| 213 | + | ||
| 214 | + count = T1 & SHIFT1_MASK; | ||
| 201 | if (count) { | 215 | if (count) { |
| 202 | src = (DATA_TYPE)T0 << (count - 1); | 216 | src = (DATA_TYPE)T0 << (count - 1); |
| 203 | T0 = T0 << count; | 217 | T0 = T0 << count; |
| 204 | #ifdef MEM_WRITE | 218 | #ifdef MEM_WRITE |
| 205 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 219 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 206 | #endif | 220 | #endif |
| 207 | CC_SRC = src; | 221 | CC_SRC = src; |
| 208 | CC_DST = T0; | 222 | CC_DST = T0; |
| @@ -213,14 +227,16 @@ void OPPROTO glue(glue(op_shl, MEM_SUFFIX), _T0_T1_cc)(void) | @@ -213,14 +227,16 @@ void OPPROTO glue(glue(op_shl, MEM_SUFFIX), _T0_T1_cc)(void) | ||
| 213 | 227 | ||
| 214 | void OPPROTO glue(glue(op_shr, MEM_SUFFIX), _T0_T1_cc)(void) | 228 | void OPPROTO glue(glue(op_shr, MEM_SUFFIX), _T0_T1_cc)(void) |
| 215 | { | 229 | { |
| 216 | - int count, src; | ||
| 217 | - count = T1 & 0x1f; | 230 | + int count; |
| 231 | + target_long src; | ||
| 232 | + | ||
| 233 | + count = T1 & SHIFT1_MASK; | ||
| 218 | if (count) { | 234 | if (count) { |
| 219 | T0 &= DATA_MASK; | 235 | T0 &= DATA_MASK; |
| 220 | src = T0 >> (count - 1); | 236 | src = T0 >> (count - 1); |
| 221 | T0 = T0 >> count; | 237 | T0 = T0 >> count; |
| 222 | #ifdef MEM_WRITE | 238 | #ifdef MEM_WRITE |
| 223 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 239 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 224 | #endif | 240 | #endif |
| 225 | CC_SRC = src; | 241 | CC_SRC = src; |
| 226 | CC_DST = T0; | 242 | CC_DST = T0; |
| @@ -231,14 +247,16 @@ void OPPROTO glue(glue(op_shr, MEM_SUFFIX), _T0_T1_cc)(void) | @@ -231,14 +247,16 @@ void OPPROTO glue(glue(op_shr, MEM_SUFFIX), _T0_T1_cc)(void) | ||
| 231 | 247 | ||
| 232 | void OPPROTO glue(glue(op_sar, MEM_SUFFIX), _T0_T1_cc)(void) | 248 | void OPPROTO glue(glue(op_sar, MEM_SUFFIX), _T0_T1_cc)(void) |
| 233 | { | 249 | { |
| 234 | - int count, src; | ||
| 235 | - count = T1 & 0x1f; | 250 | + int count; |
| 251 | + target_long src; | ||
| 252 | + | ||
| 253 | + count = T1 & SHIFT1_MASK; | ||
| 236 | if (count) { | 254 | if (count) { |
| 237 | src = (DATA_STYPE)T0; | 255 | src = (DATA_STYPE)T0; |
| 238 | T0 = src >> count; | 256 | T0 = src >> count; |
| 239 | src = src >> (count - 1); | 257 | src = src >> (count - 1); |
| 240 | #ifdef MEM_WRITE | 258 | #ifdef MEM_WRITE |
| 241 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 259 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 242 | #endif | 260 | #endif |
| 243 | CC_SRC = src; | 261 | CC_SRC = src; |
| 244 | CC_DST = T0; | 262 | CC_DST = T0; |
| @@ -262,7 +280,7 @@ void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_im_cc)(void) | @@ -262,7 +280,7 @@ void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_im_cc)(void) | ||
| 262 | res |= T1 << (count - 16); | 280 | res |= T1 << (count - 16); |
| 263 | T0 = res >> 16; | 281 | T0 = res >> 16; |
| 264 | #ifdef MEM_WRITE | 282 | #ifdef MEM_WRITE |
| 265 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 283 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 266 | #endif | 284 | #endif |
| 267 | CC_SRC = tmp; | 285 | CC_SRC = tmp; |
| 268 | CC_DST = T0; | 286 | CC_DST = T0; |
| @@ -282,7 +300,7 @@ void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_ECX_cc)(void) | @@ -282,7 +300,7 @@ void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_ECX_cc)(void) | ||
| 282 | res |= T1 << (count - 16); | 300 | res |= T1 << (count - 16); |
| 283 | T0 = res >> 16; | 301 | T0 = res >> 16; |
| 284 | #ifdef MEM_WRITE | 302 | #ifdef MEM_WRITE |
| 285 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 303 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 286 | #endif | 304 | #endif |
| 287 | CC_SRC = tmp; | 305 | CC_SRC = tmp; |
| 288 | CC_DST = T0; | 306 | CC_DST = T0; |
| @@ -304,7 +322,7 @@ void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_im_cc)(void) | @@ -304,7 +322,7 @@ void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_im_cc)(void) | ||
| 304 | res |= T1 << (32 - count); | 322 | res |= T1 << (32 - count); |
| 305 | T0 = res; | 323 | T0 = res; |
| 306 | #ifdef MEM_WRITE | 324 | #ifdef MEM_WRITE |
| 307 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 325 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 308 | #endif | 326 | #endif |
| 309 | CC_SRC = tmp; | 327 | CC_SRC = tmp; |
| 310 | CC_DST = T0; | 328 | CC_DST = T0; |
| @@ -325,7 +343,7 @@ void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_ECX_cc)(void) | @@ -325,7 +343,7 @@ void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_ECX_cc)(void) | ||
| 325 | res |= T1 << (32 - count); | 343 | res |= T1 << (32 - count); |
| 326 | T0 = res; | 344 | T0 = res; |
| 327 | #ifdef MEM_WRITE | 345 | #ifdef MEM_WRITE |
| 328 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 346 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 329 | #endif | 347 | #endif |
| 330 | CC_SRC = tmp; | 348 | CC_SRC = tmp; |
| 331 | CC_DST = T0; | 349 | CC_DST = T0; |
| @@ -335,17 +353,19 @@ void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_ECX_cc)(void) | @@ -335,17 +353,19 @@ void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_ECX_cc)(void) | ||
| 335 | } | 353 | } |
| 336 | #endif | 354 | #endif |
| 337 | 355 | ||
| 338 | -#if DATA_BITS == 32 | 356 | +#if DATA_BITS >= 32 |
| 339 | void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_im_cc)(void) | 357 | void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_im_cc)(void) |
| 340 | { | 358 | { |
| 341 | - int count, tmp; | 359 | + int count; |
| 360 | + target_long tmp; | ||
| 361 | + | ||
| 342 | count = PARAM1; | 362 | count = PARAM1; |
| 343 | T0 &= DATA_MASK; | 363 | T0 &= DATA_MASK; |
| 344 | T1 &= DATA_MASK; | 364 | T1 &= DATA_MASK; |
| 345 | tmp = T0 << (count - 1); | 365 | tmp = T0 << (count - 1); |
| 346 | T0 = (T0 << count) | (T1 >> (DATA_BITS - count)); | 366 | T0 = (T0 << count) | (T1 >> (DATA_BITS - count)); |
| 347 | #ifdef MEM_WRITE | 367 | #ifdef MEM_WRITE |
| 348 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 368 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 349 | #endif | 369 | #endif |
| 350 | CC_SRC = tmp; | 370 | CC_SRC = tmp; |
| 351 | CC_DST = T0; | 371 | CC_DST = T0; |
| @@ -353,15 +373,17 @@ void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_im_cc)(void) | @@ -353,15 +373,17 @@ void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_im_cc)(void) | ||
| 353 | 373 | ||
| 354 | void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_ECX_cc)(void) | 374 | void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_ECX_cc)(void) |
| 355 | { | 375 | { |
| 356 | - int count, tmp; | ||
| 357 | - count = ECX & 0x1f; | 376 | + int count; |
| 377 | + target_long tmp; | ||
| 378 | + | ||
| 379 | + count = ECX & SHIFT1_MASK; | ||
| 358 | if (count) { | 380 | if (count) { |
| 359 | T0 &= DATA_MASK; | 381 | T0 &= DATA_MASK; |
| 360 | T1 &= DATA_MASK; | 382 | T1 &= DATA_MASK; |
| 361 | tmp = T0 << (count - 1); | 383 | tmp = T0 << (count - 1); |
| 362 | T0 = (T0 << count) | (T1 >> (DATA_BITS - count)); | 384 | T0 = (T0 << count) | (T1 >> (DATA_BITS - count)); |
| 363 | #ifdef MEM_WRITE | 385 | #ifdef MEM_WRITE |
| 364 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 386 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 365 | #endif | 387 | #endif |
| 366 | CC_SRC = tmp; | 388 | CC_SRC = tmp; |
| 367 | CC_DST = T0; | 389 | CC_DST = T0; |
| @@ -372,14 +394,16 @@ void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_ECX_cc)(void) | @@ -372,14 +394,16 @@ void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_ECX_cc)(void) | ||
| 372 | 394 | ||
| 373 | void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_im_cc)(void) | 395 | void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_im_cc)(void) |
| 374 | { | 396 | { |
| 375 | - int count, tmp; | 397 | + int count; |
| 398 | + target_long tmp; | ||
| 399 | + | ||
| 376 | count = PARAM1; | 400 | count = PARAM1; |
| 377 | T0 &= DATA_MASK; | 401 | T0 &= DATA_MASK; |
| 378 | T1 &= DATA_MASK; | 402 | T1 &= DATA_MASK; |
| 379 | tmp = T0 >> (count - 1); | 403 | tmp = T0 >> (count - 1); |
| 380 | T0 = (T0 >> count) | (T1 << (DATA_BITS - count)); | 404 | T0 = (T0 >> count) | (T1 << (DATA_BITS - count)); |
| 381 | #ifdef MEM_WRITE | 405 | #ifdef MEM_WRITE |
| 382 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 406 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 383 | #endif | 407 | #endif |
| 384 | CC_SRC = tmp; | 408 | CC_SRC = tmp; |
| 385 | CC_DST = T0; | 409 | CC_DST = T0; |
| @@ -388,15 +412,17 @@ void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_im_cc)(void) | @@ -388,15 +412,17 @@ void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_im_cc)(void) | ||
| 388 | 412 | ||
| 389 | void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_ECX_cc)(void) | 413 | void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_ECX_cc)(void) |
| 390 | { | 414 | { |
| 391 | - int count, tmp; | ||
| 392 | - count = ECX & 0x1f; | 415 | + int count; |
| 416 | + target_long tmp; | ||
| 417 | + | ||
| 418 | + count = ECX & SHIFT1_MASK; | ||
| 393 | if (count) { | 419 | if (count) { |
| 394 | T0 &= DATA_MASK; | 420 | T0 &= DATA_MASK; |
| 395 | T1 &= DATA_MASK; | 421 | T1 &= DATA_MASK; |
| 396 | tmp = T0 >> (count - 1); | 422 | tmp = T0 >> (count - 1); |
| 397 | T0 = (T0 >> count) | (T1 << (DATA_BITS - count)); | 423 | T0 = (T0 >> count) | (T1 << (DATA_BITS - count)); |
| 398 | #ifdef MEM_WRITE | 424 | #ifdef MEM_WRITE |
| 399 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 425 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 400 | #endif | 426 | #endif |
| 401 | CC_SRC = tmp; | 427 | CC_SRC = tmp; |
| 402 | CC_DST = T0; | 428 | CC_DST = T0; |
| @@ -414,11 +440,11 @@ void OPPROTO glue(glue(op_adc, MEM_SUFFIX), _T0_T1_cc)(void) | @@ -414,11 +440,11 @@ void OPPROTO glue(glue(op_adc, MEM_SUFFIX), _T0_T1_cc)(void) | ||
| 414 | cf = cc_table[CC_OP].compute_c(); | 440 | cf = cc_table[CC_OP].compute_c(); |
| 415 | T0 = T0 + T1 + cf; | 441 | T0 = T0 + T1 + cf; |
| 416 | #ifdef MEM_WRITE | 442 | #ifdef MEM_WRITE |
| 417 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 443 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 418 | #endif | 444 | #endif |
| 419 | CC_SRC = T1; | 445 | CC_SRC = T1; |
| 420 | CC_DST = T0; | 446 | CC_DST = T0; |
| 421 | - CC_OP = CC_OP_ADDB + SHIFT + cf * 3; | 447 | + CC_OP = CC_OP_ADDB + SHIFT + cf * 4; |
| 422 | } | 448 | } |
| 423 | 449 | ||
| 424 | void OPPROTO glue(glue(op_sbb, MEM_SUFFIX), _T0_T1_cc)(void) | 450 | void OPPROTO glue(glue(op_sbb, MEM_SUFFIX), _T0_T1_cc)(void) |
| @@ -427,23 +453,23 @@ void OPPROTO glue(glue(op_sbb, MEM_SUFFIX), _T0_T1_cc)(void) | @@ -427,23 +453,23 @@ void OPPROTO glue(glue(op_sbb, MEM_SUFFIX), _T0_T1_cc)(void) | ||
| 427 | cf = cc_table[CC_OP].compute_c(); | 453 | cf = cc_table[CC_OP].compute_c(); |
| 428 | T0 = T0 - T1 - cf; | 454 | T0 = T0 - T1 - cf; |
| 429 | #ifdef MEM_WRITE | 455 | #ifdef MEM_WRITE |
| 430 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 456 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 431 | #endif | 457 | #endif |
| 432 | CC_SRC = T1; | 458 | CC_SRC = T1; |
| 433 | CC_DST = T0; | 459 | CC_DST = T0; |
| 434 | - CC_OP = CC_OP_SUBB + SHIFT + cf * 3; | 460 | + CC_OP = CC_OP_SUBB + SHIFT + cf * 4; |
| 435 | } | 461 | } |
| 436 | 462 | ||
| 437 | void OPPROTO glue(glue(op_cmpxchg, MEM_SUFFIX), _T0_T1_EAX_cc)(void) | 463 | void OPPROTO glue(glue(op_cmpxchg, MEM_SUFFIX), _T0_T1_EAX_cc)(void) |
| 438 | { | 464 | { |
| 439 | - unsigned int src, dst; | 465 | + target_ulong src, dst; |
| 440 | 466 | ||
| 441 | src = T0; | 467 | src = T0; |
| 442 | dst = EAX - T0; | 468 | dst = EAX - T0; |
| 443 | if ((DATA_TYPE)dst == 0) { | 469 | if ((DATA_TYPE)dst == 0) { |
| 444 | T0 = T1; | 470 | T0 = T1; |
| 445 | #ifdef MEM_WRITE | 471 | #ifdef MEM_WRITE |
| 446 | - glue(st, MEM_SUFFIX)((uint8_t *)A0, T0); | 472 | + glue(st, MEM_SUFFIX)(A0, T0); |
| 447 | #endif | 473 | #endif |
| 448 | } else { | 474 | } else { |
| 449 | EAX = (EAX & ~DATA_MASK) | (T0 & DATA_MASK); | 475 | EAX = (EAX & ~DATA_MASK) | (T0 & DATA_MASK); |