Commit 5b5aba4f142b821516e1e8b08d07d0c67a229da6

Authored by blueswir1
1 parent f6b868fc

Implement large pages

The current SLB/PTE code does not support large pages, which are
required by Linux, as it boots up with the kernel regions up as large.

This patch implements large page support, so we can run Linux.

Signed-off-by: Alexander Graf <alex@csgraf.de>


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6748 c046a42c-6fe2-441c-8c8c-71466251a162
target-ppc/cpu.h
... ... @@ -677,6 +677,7 @@ struct CPUPPCState {
677 677 typedef struct mmu_ctx_t mmu_ctx_t;
678 678 struct mmu_ctx_t {
679 679 target_phys_addr_t raddr; /* Real address */
  680 + target_phys_addr_t eaddr; /* Effective address */
680 681 int prot; /* Protection bits */
681 682 target_phys_addr_t pg_addr[2]; /* PTE tables base addresses */
682 683 target_ulong ptem; /* Virtual segment ID | API */
... ...
target-ppc/helper.c
... ... @@ -582,7 +582,8 @@ static always_inline int get_bat (CPUState *env, mmu_ctx_t *ctx,
582 582  
583 583 /* PTE table lookup */
584 584 static always_inline int _find_pte (mmu_ctx_t *ctx, int is_64b, int h,
585   - int rw, int type)
  585 + int rw, int type,
  586 + int target_page_bits)
586 587 {
587 588 target_ulong base, pte0, pte1;
588 589 int i, good = -1;
... ... @@ -594,7 +595,14 @@ static always_inline int _find_pte (mmu_ctx_t *ctx, int is_64b, int h,
594 595 #if defined(TARGET_PPC64)
595 596 if (is_64b) {
596 597 pte0 = ldq_phys(base + (i * 16));
597   - pte1 = ldq_phys(base + (i * 16) + 8);
  598 + pte1 = ldq_phys(base + (i * 16) + 8);
  599 +
  600 + /* We have a TLB that saves 4K pages, so let's
  601 + * split a huge page to 4k chunks */
  602 + if (target_page_bits != TARGET_PAGE_BITS)
  603 + pte1 |= (ctx->eaddr & (( 1 << target_page_bits ) - 1))
  604 + & TARGET_PAGE_MASK;
  605 +
598 606 r = pte64_check(ctx, pte0, pte1, h, rw, type);
599 607 LOG_MMU("Load pte from " ADDRX " => " ADDRX " " ADDRX
600 608 " %d %d %d " ADDRX "\n",
... ... @@ -658,27 +666,30 @@ static always_inline int _find_pte (mmu_ctx_t *ctx, int is_64b, int h,
658 666 return ret;
659 667 }
660 668  
661   -static always_inline int find_pte32 (mmu_ctx_t *ctx, int h, int rw, int type)
  669 +static always_inline int find_pte32 (mmu_ctx_t *ctx, int h, int rw,
  670 + int type, int target_page_bits)
662 671 {
663   - return _find_pte(ctx, 0, h, rw, type);
  672 + return _find_pte(ctx, 0, h, rw, type, target_page_bits);
664 673 }
665 674  
666 675 #if defined(TARGET_PPC64)
667   -static always_inline int find_pte64 (mmu_ctx_t *ctx, int h, int rw, int type)
  676 +static always_inline int find_pte64 (mmu_ctx_t *ctx, int h, int rw,
  677 + int type, int target_page_bits)
668 678 {
669   - return _find_pte(ctx, 1, h, rw, type);
  679 + return _find_pte(ctx, 1, h, rw, type, target_page_bits);
670 680 }
671 681 #endif
672 682  
673 683 static always_inline int find_pte (CPUState *env, mmu_ctx_t *ctx,
674   - int h, int rw, int type)
  684 + int h, int rw, int type,
  685 + int target_page_bits)
675 686 {
676 687 #if defined(TARGET_PPC64)
677 688 if (env->mmu_model & POWERPC_MMU_64)
678   - return find_pte64(ctx, h, rw, type);
  689 + return find_pte64(ctx, h, rw, type, target_page_bits);
679 690 #endif
680 691  
681   - return find_pte32(ctx, h, rw, type);
  692 + return find_pte32(ctx, h, rw, type, target_page_bits);
682 693 }
683 694  
684 695 #if defined(TARGET_PPC64)
... ... @@ -694,7 +705,8 @@ static always_inline void slb_invalidate (uint64_t *slb64)
694 705  
695 706 static always_inline int slb_lookup (CPUPPCState *env, target_ulong eaddr,
696 707 target_ulong *vsid,
697   - target_ulong *page_mask, int *attr)
  708 + target_ulong *page_mask, int *attr,
  709 + int *target_page_bits)
698 710 {
699 711 target_phys_addr_t sr_base;
700 712 target_ulong mask;
... ... @@ -714,19 +726,16 @@ static always_inline int slb_lookup (CPUPPCState *env, target_ulong eaddr,
714 726 PRIx32 "\n", __func__, n, sr_base, tmp64, tmp);
715 727 if (slb_is_valid(tmp64)) {
716 728 /* SLB entry is valid */
717   - switch (tmp64 & 0x0000000006000000ULL) {
718   - case 0x0000000000000000ULL:
719   - /* 256 MB segment */
720   - mask = 0xFFFFFFFFF0000000ULL;
721   - break;
722   - case 0x0000000002000000ULL:
723   - /* 1 TB segment */
  729 + if (tmp & 0x8) {
  730 + /* 1 TB Segment */
724 731 mask = 0xFFFF000000000000ULL;
725   - break;
726   - case 0x0000000004000000ULL:
727   - case 0x0000000006000000ULL:
728   - /* Reserved => segment is invalid */
729   - continue;
  732 + if (target_page_bits)
  733 + *target_page_bits = 24; // XXX 16M pages?
  734 + } else {
  735 + /* 256MB Segment */
  736 + mask = 0xFFFFFFFFF0000000ULL;
  737 + if (target_page_bits)
  738 + *target_page_bits = TARGET_PAGE_BITS;
730 739 }
731 740 if ((eaddr & mask) == (tmp64 & mask)) {
732 741 /* SLB match */
... ... @@ -777,7 +786,7 @@ void ppc_slb_invalidate_one (CPUPPCState *env, uint64_t T0)
777 786 int attr;
778 787 int n;
779 788  
780   - n = slb_lookup(env, T0, &vsid, &page_mask, &attr);
  789 + n = slb_lookup(env, T0, &vsid, &page_mask, &attr, NULL);
781 790 if (n >= 0) {
782 791 sr_base = env->spr[SPR_ASR];
783 792 sr_base += 12 * n;
... ... @@ -871,20 +880,22 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx,
871 880 #if defined(TARGET_PPC64)
872 881 int attr;
873 882 #endif
874   - int ds, vsid_sh, sdr_sh, pr;
  883 + int ds, vsid_sh, sdr_sh, pr, target_page_bits;
875 884 int ret, ret2;
876 885  
877 886 pr = msr_pr;
878 887 #if defined(TARGET_PPC64)
879 888 if (env->mmu_model & POWERPC_MMU_64) {
880 889 LOG_MMU("Check SLBs\n");
881   - ret = slb_lookup(env, eaddr, &vsid, &page_mask, &attr);
  890 + ret = slb_lookup(env, eaddr, &vsid, &page_mask, &attr,
  891 + &target_page_bits);
882 892 if (ret < 0)
883 893 return ret;
884 894 ctx->key = ((attr & 0x40) && (pr != 0)) ||
885 895 ((attr & 0x80) && (pr == 0)) ? 1 : 0;
886 896 ds = 0;
887   - ctx->nx = attr & 0x20 ? 1 : 0;
  897 + ctx->nx = attr & 0x10 ? 1 : 0;
  898 + ctx->eaddr = eaddr;
888 899 vsid_mask = 0x00003FFFFFFFFF80ULL;
889 900 vsid_sh = 7;
890 901 sdr_sh = 18;
... ... @@ -903,6 +914,7 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx,
903 914 vsid_sh = 6;
904 915 sdr_sh = 16;
905 916 sdr_mask = 0xFFC0;
  917 + target_page_bits = TARGET_PAGE_BITS;
906 918 LOG_MMU("Check segment v=" ADDRX " %d " ADDRX
907 919 " nip=" ADDRX " lr=" ADDRX " ir=%d dr=%d pr=%d %d t=%d\n",
908 920 eaddr, (int)(eaddr >> 28), sr, env->nip,
... ... @@ -918,7 +930,7 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx,
918 930 /* Page address translation */
919 931 /* Primary table address */
920 932 sdr = env->sdr1;
921   - pgidx = (eaddr & page_mask) >> TARGET_PAGE_BITS;
  933 + pgidx = (eaddr & page_mask) >> target_page_bits;
922 934 #if defined(TARGET_PPC64)
923 935 if (env->mmu_model & POWERPC_MMU_64) {
924 936 htab_mask = 0x0FFFFFFF >> (28 - (sdr & 0x1F));
... ... @@ -944,7 +956,12 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx,
944 956 #if defined(TARGET_PPC64)
945 957 if (env->mmu_model & POWERPC_MMU_64) {
946 958 /* Only 5 bits of the page index are used in the AVPN */
947   - ctx->ptem = (vsid << 12) | ((pgidx >> 4) & 0x0F80);
  959 + if (target_page_bits > 23) {
  960 + ctx->ptem = (vsid << 12) |
  961 + ((pgidx << (target_page_bits - 16)) & 0xF80);
  962 + } else {
  963 + ctx->ptem = (vsid << 12) | ((pgidx >> 4) & 0x0F80);
  964 + }
948 965 } else
949 966 #endif
950 967 {
... ... @@ -962,7 +979,7 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx,
962 979 " pg_addr=" PADDRX "\n",
963 980 sdr, vsid, pgidx, hash, ctx->pg_addr[0]);
964 981 /* Primary table lookup */
965   - ret = find_pte(env, ctx, 0, rw, type);
  982 + ret = find_pte(env, ctx, 0, rw, type, target_page_bits);
966 983 if (ret < 0) {
967 984 /* Secondary table lookup */
968 985 if (eaddr != 0xEFFFFFFF)
... ... @@ -970,7 +987,8 @@ static always_inline int get_segment (CPUState *env, mmu_ctx_t *ctx,
970 987 "api=" ADDRX " hash=" PADDRX
971 988 " pg_addr=" PADDRX "\n",
972 989 sdr, vsid, pgidx, hash, ctx->pg_addr[1]);
973   - ret2 = find_pte(env, ctx, 1, rw, type);
  990 + ret2 = find_pte(env, ctx, 1, rw, type,
  991 + target_page_bits);
974 992 if (ret2 != -1)
975 993 ret = ret2;
976 994 }
... ...