Commit 5b5aba4f142b821516e1e8b08d07d0c67a229da6

Authored by blueswir1
1 parent f6b868fc

Implement large pages

The current SLB/PTE code does not support large pages, which are
required by Linux, as it boots up with the kernel regions up as large.

This patch implements large page support, so we can run Linux.

Signed-off-by: Alexander Graf <alex@csgraf.de>


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6748 c046a42c-6fe2-441c-8c8c-71466251a162
target-ppc/cpu.h
@@ -677,6 +677,7 @@ struct CPUPPCState { @@ -677,6 +677,7 @@ struct CPUPPCState {
677 typedef struct mmu_ctx_t mmu_ctx_t; 677 typedef struct mmu_ctx_t mmu_ctx_t;
678 struct mmu_ctx_t { 678 struct mmu_ctx_t {
679 target_phys_addr_t raddr; /* Real address */ 679 target_phys_addr_t raddr; /* Real address */
  680 + target_phys_addr_t eaddr; /* Effective address */
680 int prot; /* Protection bits */ 681 int prot; /* Protection bits */
681 target_phys_addr_t pg_addr[2]; /* PTE tables base addresses */ 682 target_phys_addr_t pg_addr[2]; /* PTE tables base addresses */
682 target_ulong ptem; /* Virtual segment ID | API */ 683 target_ulong ptem; /* Virtual segment ID | API */
target-ppc/helper.c
@@ -582,7 +582,8 @@ static always_inline int get_bat (CPUState *env, mmu_ctx_t *ctx, @@ -582,7 +582,8 @@ static always_inline int get_bat (CPUState *env, mmu_ctx_t *ctx,
582 582
583 /* PTE table lookup */ 583 /* PTE table lookup */
584 static always_inline int _find_pte (mmu_ctx_t *ctx, int is_64b, int h, 584 static always_inline int _find_pte (mmu_ctx_t *ctx, int is_64b, int h,
585 - int rw, int type) 585 + int rw, int type,
  586 + int target_page_bits)
586 { 587 {
587 target_ulong base, pte0, pte1; 588 target_ulong base, pte0, pte1;
588 int i, good = -1; 589 int i, good = -1;
@@ -594,7 +595,14 @@ static always_inline int _find_pte (mmu_ctx_t *ctx, int is_64b, int h, @@ -594,7 +595,14 @@ static always_inline int _find_pte (mmu_ctx_t *ctx, int is_64b, int h,
594 #if defined(TARGET_PPC64) 595 #if defined(TARGET_PPC64)
595 if (is_64b) { 596 if (is_64b) {
596 pte0 = ldq_phys(base + (i * 16)); 597 pte0 = ldq_phys(base + (i * 16));
597 - pte1 = ldq_phys(base + (i * 16) + 8); 598 + pte1 = ldq_phys(base + (i * 16) + 8);
  599 +
  600 + /* We have a TLB that saves 4K pages, so let's
  601 + * split a huge page to 4k chunks */
  602 + if (target_page_bits != TARGET_PAGE_BITS)
  603 + pte1 |= (ctx->eaddr & (( 1 << target_page_bits ) - 1))
  604 + & TARGET_PAGE_MASK;
  605 +
598 r = pte64_check(ctx, pte0, pte1, h, rw, type); 606 r = pte64_check(ctx, pte0, pte1, h, rw, type);
599 LOG_MMU("Load pte from " ADDRX " => " ADDRX " " ADDRX 607 LOG_MMU("Load pte from " ADDRX " => " ADDRX " " ADDRX
600 " %d %d %d " ADDRX "\n", 608 " %d %d %d " ADDRX "\n",
@@ -658,27 +666,30 @@ static always_inline int _find_pte (mmu_ctx_t *ctx, int is_64b, int h, @@ -658,27 +666,30 @@ static always_inline int _find_pte (mmu_ctx_t *ctx, int is_64b, int h,
658 return ret; 666 return ret;
659 } 667 }
660 668
661 -static always_inline int find_pte32 (mmu_ctx_t *ctx, int h, int rw, int type) 669 +static always_inline int find_pte32 (mmu_ctx_t *ctx, int h, int rw,
  670 + int type, int target_page_bits)
662 { 671 {
663 - return _find_pte(ctx, 0, h, rw, type); 672 + return _find_pte(ctx, 0, h, rw, type, target_page_bits);
664 } 673 }
665 674
666 #if defined(TARGET_PPC64) 675 #if defined(TARGET_PPC64)
667 -static always_inline int find_pte64 (mmu_ctx_t *ctx, int h, int rw, int type) 676 +static always_inline int find_pte64 (mmu_ctx_t *ctx, int h, int rw,
  677 + int type, int target_page_bits)
668 { 678 {
669 - return _find_pte(ctx, 1, h, rw, type); 679 + return _find_pte(ctx, 1, h, rw, type, target_page_bits);
670 } 680 }
671 #endif 681 #endif
672 682
673 static always_inline int find_pte (CPUState *env, mmu_ctx_t *ctx, 683 static always_inline int find_pte (CPUState *env, mmu_ctx_t *ctx,
674 - int h, int rw, int type) 684 + int h, int rw, int type,
  685 + int target_page_bits)
675 { 686 {
676 #if defined(TARGET_PPC64) 687 #if defined(TARGET_PPC64)
677 if (env->mmu_model & POWERPC_MMU_64) 688 if (env->mmu_model & POWERPC_MMU_64)
678 - return find_pte64(ctx, h, rw, type); 689 + return find_pte64(ctx, h, rw, type, target_page_bits);
679 #endif 690 #endif
680 691
681 - return find_pte32(ctx, h, rw, type); 692 + return find_pte32(ctx, h, rw, type, target_page_bits);
682 } 693 }
683 694
684 #if defined(TARGET_PPC64) 695 #if defined(TARGET_PPC64)
@@ -694,7 +705,8 @@ static always_inline void slb_invalidate (uint64_t *slb64) @@ -694,7 +705,8 @@ static always_inline void slb_invalidate (uint64_t *slb64)
694 705
695 static always_inline int slb_lookup (CPUPPCState *env, target_ulong eaddr, 706 static always_inline int slb_lookup (CPUPPCState *env, target_ulong eaddr,
696 target_ulong *vsid, 707 target_ulong *vsid,
697 - target_ulong *page_mask, int *attr) 708 + target_ulong *page_mask, int *attr,
  709 + int *target_page_bits)
698 { 710 {
699 target_phys_addr_t sr_base; 711 target_phys_addr_t sr_base;
700 target_ulong mask; 712 target_ulong mask;
@@ -714,19 +726,16 @@ static always_inline int slb_lookup (CPUPPCState *env, target_ulong eaddr, @@ -714,19 +726,16 @@ static always_inline int slb_lookup (CPUPPCState *env, target_ulong eaddr,
714 PRIx32 "\n", __func__, n, sr_base, tmp64, tmp); 726 PRIx32 "\n", __func__, n, sr_base, tmp64, tmp);
715 if (slb_is_valid(tmp64)) { 727 if (slb_is_valid(tmp64)) {
716 /* SLB entry is valid */ 728 /* SLB entry is valid */
717 - switch (tmp64 & 0x0000000006000000ULL) {  
718 - case 0x0000000000000000ULL:  
719 - /* 256 MB segment */  
720 - mask = 0xFFFFFFFFF0000000ULL;  
721 - break;  
722 - case 0x0000000002000000ULL:  
723 - /* 1 TB segment */ 729 + if (tmp & 0x8) {
  730 + /* 1 TB Segment */
724 mask = 0xFFFF000000000000ULL; 731 mask = 0xFFFF000000000000ULL;
725 - break;  
726 - case 0x0000000004000000ULL:  
727 - case 0x0000000006000000ULL:  
728 - /* Reserved => segment is invalid */  
729 - continue; 732 + if (target_page_bits)
  733 + *target_page_bits = 24; // XXX 16M pages?
  734 + } else {
  735 + /* 256MB Segment */
  736 + mask = 0xFFFFFFFFF0000000ULL;
  737 + if (target_page_bits)
  738 + *target_page_bits = TARGET_PAGE_BITS;
730 } 739 }
731 if ((eaddr & mask) == (tmp64 & mask)) { 740 if ((eaddr & mask) == (tmp64 & mask)) {
732 /* SLB match */ 741 /* SLB match */
@@ -777,7 +786,7 @@ void ppc_slb_invalidate_one (CPUPPCState *env, uint64_t T0) @@ -777,7 +786,7 @@ void ppc_slb_invalidate_one (CPUPPCState *env, uint64_t T0)
777 int attr; 786 int attr;
778 int n; 787 int n;
779 788
780 - n = slb_lookup(env, T0, &vsid, &page_mask, &attr); 789 + n = slb_lookup(env, T0, &vsid, &page_mask, &attr, NULL);
781 if (n >= 0) { 790 if (n >= 0) {
782 sr_base = env->spr[SPR_ASR]; 791 sr_base = env->spr[SPR_ASR];
783 sr_base += 12 * n; 792 sr_base += 12 * n;
@@ -871,20 +880,22 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx, @@ -871,20 +880,22 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx,
871 #if defined(TARGET_PPC64) 880 #if defined(TARGET_PPC64)
872 int attr; 881 int attr;
873 #endif 882 #endif
874 - int ds, vsid_sh, sdr_sh, pr; 883 + int ds, vsid_sh, sdr_sh, pr, target_page_bits;
875 int ret, ret2; 884 int ret, ret2;
876 885
877 pr = msr_pr; 886 pr = msr_pr;
878 #if defined(TARGET_PPC64) 887 #if defined(TARGET_PPC64)
879 if (env->mmu_model & POWERPC_MMU_64) { 888 if (env->mmu_model & POWERPC_MMU_64) {
880 LOG_MMU("Check SLBs\n"); 889 LOG_MMU("Check SLBs\n");
881 - ret = slb_lookup(env, eaddr, &vsid, &page_mask, &attr); 890 + ret = slb_lookup(env, eaddr, &vsid, &page_mask, &attr,
  891 + &target_page_bits);
882 if (ret < 0) 892 if (ret < 0)
883 return ret; 893 return ret;
884 ctx->key = ((attr & 0x40) && (pr != 0)) || 894 ctx->key = ((attr & 0x40) && (pr != 0)) ||
885 ((attr & 0x80) && (pr == 0)) ? 1 : 0; 895 ((attr & 0x80) && (pr == 0)) ? 1 : 0;
886 ds = 0; 896 ds = 0;
887 - ctx->nx = attr & 0x20 ? 1 : 0; 897 + ctx->nx = attr & 0x10 ? 1 : 0;
  898 + ctx->eaddr = eaddr;
888 vsid_mask = 0x00003FFFFFFFFF80ULL; 899 vsid_mask = 0x00003FFFFFFFFF80ULL;
889 vsid_sh = 7; 900 vsid_sh = 7;
890 sdr_sh = 18; 901 sdr_sh = 18;
@@ -903,6 +914,7 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx, @@ -903,6 +914,7 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx,
903 vsid_sh = 6; 914 vsid_sh = 6;
904 sdr_sh = 16; 915 sdr_sh = 16;
905 sdr_mask = 0xFFC0; 916 sdr_mask = 0xFFC0;
  917 + target_page_bits = TARGET_PAGE_BITS;
906 LOG_MMU("Check segment v=" ADDRX " %d " ADDRX 918 LOG_MMU("Check segment v=" ADDRX " %d " ADDRX
907 " nip=" ADDRX " lr=" ADDRX " ir=%d dr=%d pr=%d %d t=%d\n", 919 " nip=" ADDRX " lr=" ADDRX " ir=%d dr=%d pr=%d %d t=%d\n",
908 eaddr, (int)(eaddr >> 28), sr, env->nip, 920 eaddr, (int)(eaddr >> 28), sr, env->nip,
@@ -918,7 +930,7 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx, @@ -918,7 +930,7 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx,
918 /* Page address translation */ 930 /* Page address translation */
919 /* Primary table address */ 931 /* Primary table address */
920 sdr = env->sdr1; 932 sdr = env->sdr1;
921 - pgidx = (eaddr & page_mask) >> TARGET_PAGE_BITS; 933 + pgidx = (eaddr & page_mask) >> target_page_bits;
922 #if defined(TARGET_PPC64) 934 #if defined(TARGET_PPC64)
923 if (env->mmu_model & POWERPC_MMU_64) { 935 if (env->mmu_model & POWERPC_MMU_64) {
924 htab_mask = 0x0FFFFFFF >> (28 - (sdr & 0x1F)); 936 htab_mask = 0x0FFFFFFF >> (28 - (sdr & 0x1F));
@@ -944,7 +956,12 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx, @@ -944,7 +956,12 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx,
944 #if defined(TARGET_PPC64) 956 #if defined(TARGET_PPC64)
945 if (env->mmu_model & POWERPC_MMU_64) { 957 if (env->mmu_model & POWERPC_MMU_64) {
946 /* Only 5 bits of the page index are used in the AVPN */ 958 /* Only 5 bits of the page index are used in the AVPN */
947 - ctx->ptem = (vsid << 12) | ((pgidx >> 4) & 0x0F80); 959 + if (target_page_bits > 23) {
  960 + ctx->ptem = (vsid << 12) |
  961 + ((pgidx << (target_page_bits - 16)) & 0xF80);
  962 + } else {
  963 + ctx->ptem = (vsid << 12) | ((pgidx >> 4) & 0x0F80);
  964 + }
948 } else 965 } else
949 #endif 966 #endif
950 { 967 {
@@ -962,7 +979,7 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx, @@ -962,7 +979,7 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx,
962 " pg_addr=" PADDRX "\n", 979 " pg_addr=" PADDRX "\n",
963 sdr, vsid, pgidx, hash, ctx->pg_addr[0]); 980 sdr, vsid, pgidx, hash, ctx->pg_addr[0]);
964 /* Primary table lookup */ 981 /* Primary table lookup */
965 - ret = find_pte(env, ctx, 0, rw, type); 982 + ret = find_pte(env, ctx, 0, rw, type, target_page_bits);
966 if (ret < 0) { 983 if (ret < 0) {
967 /* Secondary table lookup */ 984 /* Secondary table lookup */
968 if (eaddr != 0xEFFFFFFF) 985 if (eaddr != 0xEFFFFFFF)
@@ -970,7 +987,8 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx, @@ -970,7 +987,8 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx,
970 "api=" ADDRX " hash=" PADDRX 987 "api=" ADDRX " hash=" PADDRX
971 " pg_addr=" PADDRX "\n", 988 " pg_addr=" PADDRX "\n",
972 sdr, vsid, pgidx, hash, ctx->pg_addr[1]); 989 sdr, vsid, pgidx, hash, ctx->pg_addr[1]);
973 - ret2 = find_pte(env, ctx, 1, rw, type); 990 + ret2 = find_pte(env, ctx, 1, rw, type,
  991 + target_page_bits);
974 if (ret2 != -1) 992 if (ret2 != -1)
975 ret = ret2; 993 ret = ret2;
976 } 994 }