Commit 4b4f782c78f49c78c912e1e44c6a63fb7bf9aab4

Authored by bellard
1 parent 84b7b8e7

NX support


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1677 c046a42c-6fe2-441c-8c8c-71466251a162
Showing 1 changed file with 120 additions and 40 deletions
target-i386/helper2.c
@@ -127,7 +127,7 @@ CPUX86State *cpu_x86_init(void) @@ -127,7 +127,7 @@ CPUX86State *cpu_x86_init(void)
127 /* currently not enabled for std i386 because not fully tested */ 127 /* currently not enabled for std i386 because not fully tested */
128 env->cpuid_features |= CPUID_APIC; 128 env->cpuid_features |= CPUID_APIC;
129 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF); 129 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
130 - env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL; 130 + env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
131 env->cpuid_xlevel = 0x80000008; 131 env->cpuid_xlevel = 0x80000008;
132 132
133 /* these features are needed for Win64 and aren't fully implemented */ 133 /* these features are needed for Win64 and aren't fully implemented */
@@ -576,6 +576,8 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) @@ -576,6 +576,8 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
576 576
577 #else 577 #else
578 578
  579 +#define PHYS_ADDR_MASK 0xfffff000
  580 +
579 /* return value: 581 /* return value:
580 -1 = cannot handle fault 582 -1 = cannot handle fault
581 0 = nothing more to do 583 0 = nothing more to do
@@ -583,37 +585,38 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) @@ -583,37 +585,38 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
583 2 = soft MMU activation required for this block 585 2 = soft MMU activation required for this block
584 */ 586 */
585 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 587 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
586 - int is_write, int is_user, int is_softmmu) 588 + int is_write1, int is_user, int is_softmmu)
587 { 589 {
  590 + uint64_t ptep, pte;
588 uint32_t pdpe_addr, pde_addr, pte_addr; 591 uint32_t pdpe_addr, pde_addr, pte_addr;
589 - uint32_t pde, pte, ptep, pdpe;  
590 - int error_code, is_dirty, prot, page_size, ret; 592 + int error_code, is_dirty, prot, page_size, ret, is_write;
591 unsigned long paddr, page_offset; 593 unsigned long paddr, page_offset;
592 target_ulong vaddr, virt_addr; 594 target_ulong vaddr, virt_addr;
593 595
594 #if defined(DEBUG_MMU) 596 #if defined(DEBUG_MMU)
595 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", 597 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
596 - addr, is_write, is_user, env->eip); 598 + addr, is_write1, is_user, env->eip);
597 #endif 599 #endif
598 - is_write &= 1; 600 + is_write = is_write1 & 1;
599 601
600 if (!(env->cr[0] & CR0_PG_MASK)) { 602 if (!(env->cr[0] & CR0_PG_MASK)) {
601 pte = addr; 603 pte = addr;
602 virt_addr = addr & TARGET_PAGE_MASK; 604 virt_addr = addr & TARGET_PAGE_MASK;
603 - prot = PAGE_READ | PAGE_WRITE; 605 + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
604 page_size = 4096; 606 page_size = 4096;
605 goto do_mapping; 607 goto do_mapping;
606 } 608 }
607 609
608 if (env->cr[4] & CR4_PAE_MASK) { 610 if (env->cr[4] & CR4_PAE_MASK) {
  611 + uint64_t pde, pdpe;
  612 +
609 /* XXX: we only use 32 bit physical addresses */ 613 /* XXX: we only use 32 bit physical addresses */
610 #ifdef TARGET_X86_64 614 #ifdef TARGET_X86_64
611 if (env->hflags & HF_LMA_MASK) { 615 if (env->hflags & HF_LMA_MASK) {
612 - uint32_t pml4e_addr, pml4e; 616 + uint32_t pml4e_addr;
  617 + uint64_t pml4e;
613 int32_t sext; 618 int32_t sext;
614 619
615 - /* XXX: handle user + rw rights */  
616 - /* XXX: handle NX flag */  
617 /* test virtual address sign extension */ 620 /* test virtual address sign extension */
618 sext = (int64_t)addr >> 47; 621 sext = (int64_t)addr >> 47;
619 if (sext != 0 && sext != -1) { 622 if (sext != 0 && sext != -1) {
@@ -623,61 +626,134 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -623,61 +626,134 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
623 626
624 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 627 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
625 env->a20_mask; 628 env->a20_mask;
626 - pml4e = ldl_phys(pml4e_addr); 629 + pml4e = ldq_phys(pml4e_addr);
627 if (!(pml4e & PG_PRESENT_MASK)) { 630 if (!(pml4e & PG_PRESENT_MASK)) {
628 error_code = 0; 631 error_code = 0;
629 goto do_fault; 632 goto do_fault;
630 } 633 }
  634 + if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
  635 + error_code = PG_ERROR_RSVD_MASK;
  636 + goto do_fault;
  637 + }
631 if (!(pml4e & PG_ACCESSED_MASK)) { 638 if (!(pml4e & PG_ACCESSED_MASK)) {
632 pml4e |= PG_ACCESSED_MASK; 639 pml4e |= PG_ACCESSED_MASK;
633 stl_phys_notdirty(pml4e_addr, pml4e); 640 stl_phys_notdirty(pml4e_addr, pml4e);
634 } 641 }
635 -  
636 - pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & 642 + ptep = pml4e ^ PG_NX_MASK;
  643 + pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
637 env->a20_mask; 644 env->a20_mask;
638 - pdpe = ldl_phys(pdpe_addr); 645 + pdpe = ldq_phys(pdpe_addr);
639 if (!(pdpe & PG_PRESENT_MASK)) { 646 if (!(pdpe & PG_PRESENT_MASK)) {
640 error_code = 0; 647 error_code = 0;
641 goto do_fault; 648 goto do_fault;
642 } 649 }
  650 + if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
  651 + error_code = PG_ERROR_RSVD_MASK;
  652 + goto do_fault;
  653 + }
  654 + ptep &= pdpe ^ PG_NX_MASK;
643 if (!(pdpe & PG_ACCESSED_MASK)) { 655 if (!(pdpe & PG_ACCESSED_MASK)) {
644 pdpe |= PG_ACCESSED_MASK; 656 pdpe |= PG_ACCESSED_MASK;
645 stl_phys_notdirty(pdpe_addr, pdpe); 657 stl_phys_notdirty(pdpe_addr, pdpe);
646 } 658 }
647 - } else 659 + } else
648 #endif 660 #endif
649 { 661 {
  662 + /* XXX: load them when cr3 is loaded ? */
650 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 663 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
651 env->a20_mask; 664 env->a20_mask;
652 - pdpe = ldl_phys(pdpe_addr); 665 + pdpe = ldq_phys(pdpe_addr);
653 if (!(pdpe & PG_PRESENT_MASK)) { 666 if (!(pdpe & PG_PRESENT_MASK)) {
654 error_code = 0; 667 error_code = 0;
655 goto do_fault; 668 goto do_fault;
656 } 669 }
  670 + ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
657 } 671 }
658 672
659 - pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) & 673 + pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
660 env->a20_mask; 674 env->a20_mask;
661 - pde = ldl_phys(pde_addr); 675 + pde = ldq_phys(pde_addr);
662 if (!(pde & PG_PRESENT_MASK)) { 676 if (!(pde & PG_PRESENT_MASK)) {
663 error_code = 0; 677 error_code = 0;
664 goto do_fault; 678 goto do_fault;
665 } 679 }
  680 + if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
  681 + error_code = PG_ERROR_RSVD_MASK;
  682 + goto do_fault;
  683 + }
  684 + ptep &= pde ^ PG_NX_MASK;
666 if (pde & PG_PSE_MASK) { 685 if (pde & PG_PSE_MASK) {
667 /* 2 MB page */ 686 /* 2 MB page */
668 page_size = 2048 * 1024; 687 page_size = 2048 * 1024;
669 - goto handle_big_page; 688 + ptep ^= PG_NX_MASK;
  689 + if ((ptep & PG_NX_MASK) && is_write1 == 2)
  690 + goto do_fault_protect;
  691 + if (is_user) {
  692 + if (!(ptep & PG_USER_MASK))
  693 + goto do_fault_protect;
  694 + if (is_write && !(ptep & PG_RW_MASK))
  695 + goto do_fault_protect;
  696 + } else {
  697 + if ((env->cr[0] & CR0_WP_MASK) &&
  698 + is_write && !(ptep & PG_RW_MASK))
  699 + goto do_fault_protect;
  700 + }
  701 + is_dirty = is_write && !(pde & PG_DIRTY_MASK);
  702 + if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
  703 + pde |= PG_ACCESSED_MASK;
  704 + if (is_dirty)
  705 + pde |= PG_DIRTY_MASK;
  706 + stl_phys_notdirty(pde_addr, pde);
  707 + }
  708 + /* align to page_size */
  709 + pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
  710 + virt_addr = addr & ~(page_size - 1);
670 } else { 711 } else {
671 /* 4 KB page */ 712 /* 4 KB page */
672 if (!(pde & PG_ACCESSED_MASK)) { 713 if (!(pde & PG_ACCESSED_MASK)) {
673 pde |= PG_ACCESSED_MASK; 714 pde |= PG_ACCESSED_MASK;
674 stl_phys_notdirty(pde_addr, pde); 715 stl_phys_notdirty(pde_addr, pde);
675 } 716 }
676 - pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) & 717 + pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
677 env->a20_mask; 718 env->a20_mask;
678 - goto handle_4k_page; 719 + pte = ldq_phys(pte_addr);
  720 + if (!(pte & PG_PRESENT_MASK)) {
  721 + error_code = 0;
  722 + goto do_fault;
  723 + }
  724 + if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
  725 + error_code = PG_ERROR_RSVD_MASK;
  726 + goto do_fault;
  727 + }
  728 + /* combine pde and pte nx, user and rw protections */
  729 + ptep &= pte ^ PG_NX_MASK;
  730 + ptep ^= PG_NX_MASK;
  731 + if ((ptep & PG_NX_MASK) && is_write1 == 2)
  732 + goto do_fault_protect;
  733 + if (is_user) {
  734 + if (!(ptep & PG_USER_MASK))
  735 + goto do_fault_protect;
  736 + if (is_write && !(ptep & PG_RW_MASK))
  737 + goto do_fault_protect;
  738 + } else {
  739 + if ((env->cr[0] & CR0_WP_MASK) &&
  740 + is_write && !(ptep & PG_RW_MASK))
  741 + goto do_fault_protect;
  742 + }
  743 + is_dirty = is_write && !(pte & PG_DIRTY_MASK);
  744 + if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
  745 + pte |= PG_ACCESSED_MASK;
  746 + if (is_dirty)
  747 + pte |= PG_DIRTY_MASK;
  748 + stl_phys_notdirty(pte_addr, pte);
  749 + }
  750 + page_size = 4096;
  751 + virt_addr = addr & ~0xfff;
  752 + pte = pte & (PHYS_ADDR_MASK | 0xfff);
679 } 753 }
680 } else { 754 } else {
  755 + uint32_t pde;
  756 +
681 /* page directory entry */ 757 /* page directory entry */
682 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & 758 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) &
683 env->a20_mask; 759 env->a20_mask;
@@ -689,7 +765,6 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -689,7 +765,6 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
689 /* if PSE bit is set, then we use a 4MB page */ 765 /* if PSE bit is set, then we use a 4MB page */
690 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { 766 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
691 page_size = 4096 * 1024; 767 page_size = 4096 * 1024;
692 - handle_big_page:  
693 if (is_user) { 768 if (is_user) {
694 if (!(pde & PG_USER_MASK)) 769 if (!(pde & PG_USER_MASK))
695 goto do_fault_protect; 770 goto do_fault_protect;
@@ -720,7 +795,6 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -720,7 +795,6 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
720 /* page directory entry */ 795 /* page directory entry */
721 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & 796 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
722 env->a20_mask; 797 env->a20_mask;
723 - handle_4k_page:  
724 pte = ldl_phys(pte_addr); 798 pte = ldl_phys(pte_addr);
725 if (!(pte & PG_PRESENT_MASK)) { 799 if (!(pte & PG_PRESENT_MASK)) {
726 error_code = 0; 800 error_code = 0;
@@ -748,20 +822,21 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -748,20 +822,21 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
748 page_size = 4096; 822 page_size = 4096;
749 virt_addr = addr & ~0xfff; 823 virt_addr = addr & ~0xfff;
750 } 824 }
751 -  
752 - /* the page can be put in the TLB */  
753 - prot = PAGE_READ;  
754 - if (pte & PG_DIRTY_MASK) {  
755 - /* only set write access if already dirty... otherwise wait  
756 - for dirty access */  
757 - if (is_user) {  
758 - if (ptep & PG_RW_MASK)  
759 - prot |= PAGE_WRITE;  
760 - } else {  
761 - if (!(env->cr[0] & CR0_WP_MASK) ||  
762 - (ptep & PG_RW_MASK))  
763 - prot |= PAGE_WRITE;  
764 - } 825 + }
  826 + /* the page can be put in the TLB */
  827 + prot = PAGE_READ;
  828 + if (!(ptep & PG_NX_MASK))
  829 + prot |= PAGE_EXEC;
  830 + if (pte & PG_DIRTY_MASK) {
  831 + /* only set write access if already dirty... otherwise wait
  832 + for dirty access */
  833 + if (is_user) {
  834 + if (ptep & PG_RW_MASK)
  835 + prot |= PAGE_WRITE;
  836 + } else {
  837 + if (!(env->cr[0] & CR0_WP_MASK) ||
  838 + (ptep & PG_RW_MASK))
  839 + prot |= PAGE_WRITE;
765 } 840 }
766 } 841 }
767 do_mapping: 842 do_mapping:
@@ -773,15 +848,20 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, @@ -773,15 +848,20 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
773 paddr = (pte & TARGET_PAGE_MASK) + page_offset; 848 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
774 vaddr = virt_addr + page_offset; 849 vaddr = virt_addr + page_offset;
775 850
776 - ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu); 851 + ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
777 return ret; 852 return ret;
778 do_fault_protect: 853 do_fault_protect:
779 error_code = PG_ERROR_P_MASK; 854 error_code = PG_ERROR_P_MASK;
780 do_fault: 855 do_fault:
781 env->cr[2] = addr; 856 env->cr[2] = addr;
782 - env->error_code = (is_write << PG_ERROR_W_BIT) | error_code; 857 + error_code |= (is_write << PG_ERROR_W_BIT);
783 if (is_user) 858 if (is_user)
784 - env->error_code |= PG_ERROR_U_MASK; 859 + error_code |= PG_ERROR_U_MASK;
  860 + if (is_write1 == 2 &&
  861 + (env->efer & MSR_EFER_NXE) &&
  862 + (env->cr[4] & CR4_PAE_MASK))
  863 + error_code |= PG_ERROR_I_D_MASK;
  864 + env->error_code = error_code;
785 return 1; 865 return 1;
786 } 866 }
787 867