Commit fe1e5c53fdd4e07fd217077dc6e03ad7d01d9839
1 parent
a0d7d5a7
target-ppc: convert altivec load/store to TCG
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5787 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
4 changed files
with
45 additions
and
116 deletions
target-ppc/cpu.h
target-ppc/exec.h
... | ... | @@ -54,10 +54,6 @@ register target_ulong T2 asm(AREG3); |
54 | 54 | #define T1_64 T1 |
55 | 55 | #define T2_64 T2 |
56 | 56 | #endif |
57 | -/* Provision for Altivec */ | |
58 | -#define AVR0 (env->avr0) | |
59 | -#define AVR1 (env->avr1) | |
60 | -#define AVR2 (env->avr2) | |
61 | 57 | |
62 | 58 | #define FT0 (env->ft0) |
63 | 59 | #define FT1 (env->ft1) | ... | ... |
target-ppc/op_mem.h
... | ... | @@ -642,66 +642,6 @@ void OPPROTO glue(op_POWER2_stfq_le, MEMSUFFIX) (void) |
642 | 642 | RETURN(); |
643 | 643 | } |
644 | 644 | |
645 | -/* Altivec vector extension */ | |
646 | -#if defined(WORDS_BIGENDIAN) | |
647 | -#define VR_DWORD0 0 | |
648 | -#define VR_DWORD1 1 | |
649 | -#else | |
650 | -#define VR_DWORD0 1 | |
651 | -#define VR_DWORD1 0 | |
652 | -#endif | |
653 | -void OPPROTO glue(op_vr_lvx, MEMSUFFIX) (void) | |
654 | -{ | |
655 | - AVR0.u64[VR_DWORD0] = glue(ldu64, MEMSUFFIX)((uint32_t)T0); | |
656 | - AVR0.u64[VR_DWORD1] = glue(ldu64, MEMSUFFIX)((uint32_t)T0 + 8); | |
657 | -} | |
658 | - | |
659 | -void OPPROTO glue(op_vr_lvx_le, MEMSUFFIX) (void) | |
660 | -{ | |
661 | - AVR0.u64[VR_DWORD1] = glue(ldu64r, MEMSUFFIX)((uint32_t)T0); | |
662 | - AVR0.u64[VR_DWORD0] = glue(ldu64r, MEMSUFFIX)((uint32_t)T0 + 8); | |
663 | -} | |
664 | - | |
665 | -void OPPROTO glue(op_vr_stvx, MEMSUFFIX) (void) | |
666 | -{ | |
667 | - glue(st64, MEMSUFFIX)((uint32_t)T0, AVR0.u64[VR_DWORD0]); | |
668 | - glue(st64, MEMSUFFIX)((uint32_t)T0 + 8, AVR0.u64[VR_DWORD1]); | |
669 | -} | |
670 | - | |
671 | -void OPPROTO glue(op_vr_stvx_le, MEMSUFFIX) (void) | |
672 | -{ | |
673 | - glue(st64r, MEMSUFFIX)((uint32_t)T0, AVR0.u64[VR_DWORD1]); | |
674 | - glue(st64r, MEMSUFFIX)((uint32_t)T0 + 8, AVR0.u64[VR_DWORD0]); | |
675 | -} | |
676 | - | |
677 | -#if defined(TARGET_PPC64) | |
678 | -void OPPROTO glue(op_vr_lvx_64, MEMSUFFIX) (void) | |
679 | -{ | |
680 | - AVR0.u64[VR_DWORD0] = glue(ldu64, MEMSUFFIX)((uint64_t)T0); | |
681 | - AVR0.u64[VR_DWORD1] = glue(ldu64, MEMSUFFIX)((uint64_t)T0 + 8); | |
682 | -} | |
683 | - | |
684 | -void OPPROTO glue(op_vr_lvx_le_64, MEMSUFFIX) (void) | |
685 | -{ | |
686 | - AVR0.u64[VR_DWORD1] = glue(ldu64r, MEMSUFFIX)((uint64_t)T0); | |
687 | - AVR0.u64[VR_DWORD0] = glue(ldu64r, MEMSUFFIX)((uint64_t)T0 + 8); | |
688 | -} | |
689 | - | |
690 | -void OPPROTO glue(op_vr_stvx_64, MEMSUFFIX) (void) | |
691 | -{ | |
692 | - glue(st64, MEMSUFFIX)((uint64_t)T0, AVR0.u64[VR_DWORD0]); | |
693 | - glue(st64, MEMSUFFIX)((uint64_t)T0 + 8, AVR0.u64[VR_DWORD1]); | |
694 | -} | |
695 | - | |
696 | -void OPPROTO glue(op_vr_stvx_le_64, MEMSUFFIX) (void) | |
697 | -{ | |
698 | - glue(st64r, MEMSUFFIX)((uint64_t)T0, AVR0.u64[VR_DWORD1]); | |
699 | - glue(st64r, MEMSUFFIX)((uint64_t)T0 + 8, AVR0.u64[VR_DWORD0]); | |
700 | -} | |
701 | -#endif | |
702 | -#undef VR_DWORD0 | |
703 | -#undef VR_DWORD1 | |
704 | - | |
705 | 645 | /* SPE extension */ |
706 | 646 | #define _PPC_SPE_LD_OP(name, op) \ |
707 | 647 | void OPPROTO glue(glue(op_spe_l, name), MEMSUFFIX) (void) \ | ... | ... |
target-ppc/translate.c
... | ... | @@ -77,7 +77,6 @@ static TCGv cpu_T[3]; |
77 | 77 | static TCGv_i64 cpu_T64[3]; |
78 | 78 | #endif |
79 | 79 | static TCGv_i64 cpu_FT[2]; |
80 | -static TCGv_i64 cpu_AVRh[3], cpu_AVRl[3]; | |
81 | 80 | |
82 | 81 | #include "gen-icount.h" |
83 | 82 | |
... | ... | @@ -122,19 +121,6 @@ void ppc_translate_init(void) |
122 | 121 | cpu_FT[1] = tcg_global_mem_new_i64(TCG_AREG0, |
123 | 122 | offsetof(CPUState, ft1), "FT1"); |
124 | 123 | |
125 | - cpu_AVRh[0] = tcg_global_mem_new_i64(TCG_AREG0, | |
126 | - offsetof(CPUState, avr0.u64[0]), "AVR0H"); | |
127 | - cpu_AVRl[0] = tcg_global_mem_new_i64(TCG_AREG0, | |
128 | - offsetof(CPUState, avr0.u64[1]), "AVR0L"); | |
129 | - cpu_AVRh[1] = tcg_global_mem_new_i64(TCG_AREG0, | |
130 | - offsetof(CPUState, avr1.u64[0]), "AVR1H"); | |
131 | - cpu_AVRl[1] = tcg_global_mem_new_i64(TCG_AREG0, | |
132 | - offsetof(CPUState, avr1.u64[1]), "AVR1L"); | |
133 | - cpu_AVRh[2] = tcg_global_mem_new_i64(TCG_AREG0, | |
134 | - offsetof(CPUState, avr2.u64[0]), "AVR2H"); | |
135 | - cpu_AVRl[2] = tcg_global_mem_new_i64(TCG_AREG0, | |
136 | - offsetof(CPUState, avr2.u64[1]), "AVR2L"); | |
137 | - | |
138 | 124 | p = cpu_reg_names; |
139 | 125 | |
140 | 126 | for (i = 0; i < 8; i++) { |
... | ... | @@ -162,13 +148,23 @@ void ppc_translate_init(void) |
162 | 148 | p += (i < 10) ? 4 : 5; |
163 | 149 | |
164 | 150 | sprintf(p, "avr%dH", i); |
151 | +#ifdef WORDS_BIGENDIAN | |
152 | + cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0, | |
153 | + offsetof(CPUState, avr[i].u64[0]), p); | |
154 | +#else | |
165 | 155 | cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0, |
166 | - offsetof(CPUState, avr[i].u64[0]), p); | |
156 | + offsetof(CPUState, avr[i].u64[1]), p); | |
157 | +#endif | |
167 | 158 | p += (i < 10) ? 6 : 7; |
168 | 159 | |
169 | 160 | sprintf(p, "avr%dL", i); |
161 | +#ifdef WORDS_BIGENDIAN | |
170 | 162 | cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0, |
171 | - offsetof(CPUState, avr[i].u64[1]), p); | |
163 | + offsetof(CPUState, avr[i].u64[1]), p); | |
164 | +#else | |
165 | + cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0, | |
166 | + offsetof(CPUState, avr[i].u64[0]), p); | |
167 | +#endif | |
172 | 168 | p += (i < 10) ? 6 : 7; |
173 | 169 | } |
174 | 170 | |
... | ... | @@ -5939,61 +5935,59 @@ GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001, PPC_BOOKE) |
5939 | 5935 | /*** Altivec vector extension ***/ |
5940 | 5936 | /* Altivec registers moves */ |
5941 | 5937 | |
5942 | -static always_inline void gen_load_avr(int t, int reg) { | |
5943 | - tcg_gen_mov_i64(cpu_AVRh[t], cpu_avrh[reg]); | |
5944 | - tcg_gen_mov_i64(cpu_AVRl[t], cpu_avrl[reg]); | |
5945 | -} | |
5946 | - | |
5947 | -static always_inline void gen_store_avr(int reg, int t) { | |
5948 | - tcg_gen_mov_i64(cpu_avrh[reg], cpu_AVRh[t]); | |
5949 | - tcg_gen_mov_i64(cpu_avrl[reg], cpu_AVRl[t]); | |
5950 | -} | |
5951 | - | |
5952 | -#define op_vr_ldst(name) (*gen_op_##name[ctx->mem_idx])() | |
5953 | -#define OP_VR_LD_TABLE(name) \ | |
5954 | -static GenOpFunc *gen_op_vr_l##name[NB_MEM_FUNCS] = { \ | |
5955 | - GEN_MEM_FUNCS(vr_l##name), \ | |
5956 | -}; | |
5957 | -#define OP_VR_ST_TABLE(name) \ | |
5958 | -static GenOpFunc *gen_op_vr_st##name[NB_MEM_FUNCS] = { \ | |
5959 | - GEN_MEM_FUNCS(vr_st##name), \ | |
5960 | -}; | |
5961 | - | |
5962 | 5938 | #define GEN_VR_LDX(name, opc2, opc3) \ |
5963 | -GEN_HANDLER(l##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) \ | |
5939 | +GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) \ | |
5964 | 5940 | { \ |
5941 | + TCGv EA; \ | |
5965 | 5942 | if (unlikely(!ctx->altivec_enabled)) { \ |
5966 | 5943 | GEN_EXCP_NO_VR(ctx); \ |
5967 | 5944 | return; \ |
5968 | 5945 | } \ |
5969 | - gen_addr_reg_index(cpu_T[0], ctx); \ | |
5970 | - op_vr_ldst(vr_l##name); \ | |
5971 | - gen_store_avr(rD(ctx->opcode), 0); \ | |
5946 | + EA = tcg_temp_new(); \ | |
5947 | + gen_addr_reg_index(EA, ctx); \ | |
5948 | + tcg_gen_andi_tl(EA, EA, ~0xf); \ | |
5949 | + if (ctx->mem_idx & 1) { \ | |
5950 | + gen_qemu_ld64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \ | |
5951 | + tcg_gen_addi_tl(EA, EA, 8); \ | |
5952 | + gen_qemu_ld64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \ | |
5953 | + } else { \ | |
5954 | + gen_qemu_ld64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \ | |
5955 | + tcg_gen_addi_tl(EA, EA, 8); \ | |
5956 | + gen_qemu_ld64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \ | |
5957 | + } \ | |
5958 | + tcg_temp_free(EA); \ | |
5972 | 5959 | } |
5973 | 5960 | |
5974 | 5961 | #define GEN_VR_STX(name, opc2, opc3) \ |
5975 | 5962 | GEN_HANDLER(st##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) \ |
5976 | 5963 | { \ |
5964 | + TCGv EA; \ | |
5977 | 5965 | if (unlikely(!ctx->altivec_enabled)) { \ |
5978 | 5966 | GEN_EXCP_NO_VR(ctx); \ |
5979 | 5967 | return; \ |
5980 | 5968 | } \ |
5981 | - gen_addr_reg_index(cpu_T[0], ctx); \ | |
5982 | - gen_load_avr(0, rS(ctx->opcode)); \ | |
5983 | - op_vr_ldst(vr_st##name); \ | |
5969 | + EA = tcg_temp_new(); \ | |
5970 | + gen_addr_reg_index(EA, ctx); \ | |
5971 | + tcg_gen_andi_tl(EA, EA, ~0xf); \ | |
5972 | + if (ctx->mem_idx & 1) { \ | |
5973 | + gen_qemu_st64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \ | |
5974 | + tcg_gen_addi_tl(EA, EA, 8); \ | |
5975 | + gen_qemu_st64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \ | |
5976 | + } else { \ | |
5977 | + gen_qemu_st64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \ | |
5978 | + tcg_gen_addi_tl(EA, EA, 8); \ | |
5979 | + gen_qemu_st64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \ | |
5980 | + } \ | |
5981 | + tcg_temp_free(EA); \ | |
5984 | 5982 | } |
5985 | 5983 | |
5986 | -OP_VR_LD_TABLE(vx); | |
5987 | -GEN_VR_LDX(vx, 0x07, 0x03); | |
5984 | +GEN_VR_LDX(lvx, 0x07, 0x03); | |
5988 | 5985 | /* As we don't emulate the cache, lvxl is stricly equivalent to lvx */ |
5989 | -#define gen_op_vr_lvxl gen_op_vr_lvx | |
5990 | -GEN_VR_LDX(vxl, 0x07, 0x0B); | |
5986 | +GEN_VR_LDX(lvxl, 0x07, 0x0B); | |
5991 | 5987 | |
5992 | -OP_VR_ST_TABLE(vx); | |
5993 | -GEN_VR_STX(vx, 0x07, 0x07); | |
5988 | +GEN_VR_STX(svx, 0x07, 0x07); | |
5994 | 5989 | /* As we don't emulate the cache, stvxl is stricly equivalent to stvx */ |
5995 | -#define gen_op_vr_stvxl gen_op_vr_stvx | |
5996 | -GEN_VR_STX(vxl, 0x07, 0x0F); | |
5990 | +GEN_VR_STX(svxl, 0x07, 0x0F); | |
5997 | 5991 | |
5998 | 5992 | /*** SPE extension ***/ |
5999 | 5993 | /* Register moves */ | ... | ... |