Commit b7bcbe9524c05d5134136cce2d5d2a09c09a4f83
1 parent
55754d9e
ARM VFP support (Paul Brook)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1309 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
8 changed files
with
1104 additions
and
14 deletions
Changelog
Makefile.target
... | ... | @@ -259,6 +259,10 @@ ifeq ($(TARGET_BASE_ARCH), sparc) |
259 | 259 | LIBOBJS+= op_helper.o helper.o |
260 | 260 | endif |
261 | 261 | |
262 | +ifeq ($(TARGET_BASE_ARCH), arm) | |
263 | +LIBOBJS+= op_helper.o | |
264 | +endif | |
265 | + | |
262 | 266 | # NOTE: the disassembler code is only needed for debugging |
263 | 267 | LIBOBJS+=disas.o |
264 | 268 | ifeq ($(findstring i386, $(TARGET_ARCH) $(ARCH)),i386) | ... | ... |
cpu-exec.c
... | ... | @@ -346,7 +346,8 @@ int cpu_exec(CPUState *env1) |
346 | 346 | cs_base = env->segs[R_CS].base; |
347 | 347 | pc = cs_base + env->eip; |
348 | 348 | #elif defined(TARGET_ARM) |
349 | - flags = env->thumb; | |
349 | + flags = env->thumb | (env->vfp.vec_len << 1) | |
350 | + | (env->vfp.vec_stride << 4); | |
350 | 351 | cs_base = 0; |
351 | 352 | pc = env->regs[15]; |
352 | 353 | #elif defined(TARGET_SPARC) |
... | ... | @@ -619,6 +620,7 @@ int cpu_exec(CPUState *env1) |
619 | 620 | #endif |
620 | 621 | #elif defined(TARGET_ARM) |
621 | 622 | env->cpsr = compute_cpsr(); |
623 | + /* XXX: Save/restore host fpu exception state?. */ | |
622 | 624 | #elif defined(TARGET_SPARC) |
623 | 625 | #elif defined(TARGET_PPC) |
624 | 626 | #else | ... | ... |
target-arm/cpu.h
... | ... | @@ -29,6 +29,14 @@ |
29 | 29 | #define EXCP_PREFETCH_ABORT 3 |
30 | 30 | #define EXCP_DATA_ABORT 4 |
31 | 31 | |
32 | +/* We currently assume float and double are IEEE single and double | |
33 | + precision respectively. | |
34 | + Doing runtime conversions is tricky because VFP registers may contain | |
35 | + integer values (eg. as the result of a FTOSI instruction). | |
36 | + A double precision register load/store must also load/store the | |
37 | + corresponding single precision pair, although it is undefined how | |
38 | + these overlap. */ | |
39 | + | |
32 | 40 | typedef struct CPUARMState { |
33 | 41 | uint32_t regs[16]; |
34 | 42 | uint32_t cpsr; |
... | ... | @@ -50,6 +58,7 @@ typedef struct CPUARMState { |
50 | 58 | int interrupt_request; |
51 | 59 | struct TranslationBlock *current_tb; |
52 | 60 | int user_mode_only; |
61 | + uint32_t address; | |
53 | 62 | |
54 | 63 | /* in order to avoid passing too many arguments to the memory |
55 | 64 | write helpers, we store some rarely used information in the CPU |
... | ... | @@ -58,6 +67,25 @@ typedef struct CPUARMState { |
58 | 67 | written */ |
59 | 68 | unsigned long mem_write_vaddr; /* target virtual addr at which the |
60 | 69 | memory was written */ |
70 | + /* VFP coprocessor state. */ | |
71 | + struct { | |
72 | + union { | |
73 | + float s[32]; | |
74 | + double d[16]; | |
75 | + } regs; | |
76 | + | |
77 | + /* We store these fpcsr fields separately for convenience. */ | |
78 | + int vec_len; | |
79 | + int vec_stride; | |
80 | + | |
81 | + uint32_t fpscr; | |
82 | + | |
83 | + /* Temporary variables if we don't have spare fp regs. */ | |
84 | + float tmp0s, tmp1s; | |
85 | + double tmp0d, tmp1d; | |
86 | + | |
87 | + } vfp; | |
88 | + | |
61 | 89 | /* user data */ |
62 | 90 | void *opaque; |
63 | 91 | } CPUARMState; | ... | ... |
target-arm/exec.h
... | ... | @@ -24,13 +24,16 @@ register uint32_t T0 asm(AREG1); |
24 | 24 | register uint32_t T1 asm(AREG2); |
25 | 25 | register uint32_t T2 asm(AREG3); |
26 | 26 | |
27 | +/* TODO: Put these in FP regs on targets that have such things. */ | |
28 | +/* It is ok for FT0s and FT0d to overlap. Likewise FT1s and FT1d. */ | |
29 | +#define FT0s env->vfp.tmp0s | |
30 | +#define FT1s env->vfp.tmp1s | |
31 | +#define FT0d env->vfp.tmp0d | |
32 | +#define FT1d env->vfp.tmp1d | |
33 | + | |
27 | 34 | #include "cpu.h" |
28 | 35 | #include "exec-all.h" |
29 | 36 | |
30 | -void cpu_lock(void); | |
31 | -void cpu_unlock(void); | |
32 | -void cpu_loop_exit(void); | |
33 | - | |
34 | 37 | /* Implemented CPSR bits. */ |
35 | 38 | #define CACHED_CPSR_BITS 0xf8000000 |
36 | 39 | static inline int compute_cpsr(void) |
... | ... | @@ -51,3 +54,24 @@ static inline void regs_to_env(void) |
51 | 54 | |
52 | 55 | int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw, |
53 | 56 | int is_user, int is_softmmu); |
57 | + | |
58 | +/* In op_helper.c */ | |
59 | + | |
60 | +void cpu_lock(void); | |
61 | +void cpu_unlock(void); | |
62 | +void cpu_loop_exit(void); | |
63 | + | |
64 | +void raise_exception(int); | |
65 | + | |
66 | +void do_vfp_abss(void); | |
67 | +void do_vfp_absd(void); | |
68 | +void do_vfp_negs(void); | |
69 | +void do_vfp_negd(void); | |
70 | +void do_vfp_sqrts(void); | |
71 | +void do_vfp_sqrtd(void); | |
72 | +void do_vfp_cmps(void); | |
73 | +void do_vfp_cmpd(void); | |
74 | +void do_vfp_cmpes(void); | |
75 | +void do_vfp_cmped(void); | |
76 | +void do_vfp_set_fpscr(void); | |
77 | +void do_vfp_get_fpscr(void); | ... | ... |
target-arm/op.c
... | ... | @@ -2,6 +2,7 @@ |
2 | 2 | * ARM micro operations |
3 | 3 | * |
4 | 4 | * Copyright (c) 2003 Fabrice Bellard |
5 | + * Copyright (c) 2005 CodeSourcery, LLC | |
5 | 6 | * |
6 | 7 | * This library is free software; you can redistribute it and/or |
7 | 8 | * modify it under the terms of the GNU Lesser General Public |
... | ... | @@ -857,17 +858,259 @@ void OPPROTO op_undef_insn(void) |
857 | 858 | cpu_loop_exit(); |
858 | 859 | } |
859 | 860 | |
860 | -/* thread support */ | |
861 | +/* VFP support. We follow the convention used for VFP instrunctions: | |
862 | + Single precition routines have a "s" suffix, double precision a | |
863 | + "d" suffix. */ | |
861 | 864 | |
862 | -spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED; | |
865 | +#define VFP_OP(name, p) void OPPROTO op_vfp_##name##p(void) | |
863 | 866 | |
864 | -void cpu_lock(void) | |
867 | +#define VFP_BINOP(name, op) \ | |
868 | +VFP_OP(name, s) \ | |
869 | +{ \ | |
870 | + FT0s = FT0s op FT1s; \ | |
871 | +} \ | |
872 | +VFP_OP(name, d) \ | |
873 | +{ \ | |
874 | + FT0d = FT0d op FT1d; \ | |
875 | +} | |
876 | +VFP_BINOP(add, +) | |
877 | +VFP_BINOP(sub, -) | |
878 | +VFP_BINOP(mul, *) | |
879 | +VFP_BINOP(div, /) | |
880 | +#undef VFP_BINOP | |
881 | + | |
882 | +#define VFP_HELPER(name) \ | |
883 | +VFP_OP(name, s) \ | |
884 | +{ \ | |
885 | + do_vfp_##name##s(); \ | |
886 | +} \ | |
887 | +VFP_OP(name, d) \ | |
888 | +{ \ | |
889 | + do_vfp_##name##d(); \ | |
890 | +} | |
891 | +VFP_HELPER(abs) | |
892 | +VFP_HELPER(sqrt) | |
893 | +VFP_HELPER(cmp) | |
894 | +VFP_HELPER(cmpe) | |
895 | +#undef VFP_HELPER | |
896 | + | |
897 | +/* XXX: Will this do the right thing for NANs. Should invert the signbit | |
898 | + without looking at the rest of the value. */ | |
899 | +VFP_OP(neg, s) | |
900 | +{ | |
901 | + FT0s = -FT0s; | |
902 | +} | |
903 | + | |
904 | +VFP_OP(neg, d) | |
905 | +{ | |
906 | + FT0d = -FT0d; | |
907 | +} | |
908 | + | |
909 | +VFP_OP(F1_ld0, s) | |
910 | +{ | |
911 | + FT1s = 0.0f; | |
912 | +} | |
913 | + | |
914 | +VFP_OP(F1_ld0, d) | |
915 | +{ | |
916 | + FT1d = 0.0; | |
917 | +} | |
918 | + | |
919 | +/* Helper routines to perform bitwise copies between float and int. */ | |
920 | +static inline float vfp_itos(uint32_t i) | |
921 | +{ | |
922 | + union { | |
923 | + uint32_t i; | |
924 | + float s; | |
925 | + } v; | |
926 | + | |
927 | + v.i = i; | |
928 | + return v.s; | |
929 | +} | |
930 | + | |
931 | +static inline uint32_t vfp_stoi(float s) | |
932 | +{ | |
933 | + union { | |
934 | + uint32_t i; | |
935 | + float s; | |
936 | + } v; | |
937 | + | |
938 | + v.s = s; | |
939 | + return v.i; | |
940 | +} | |
941 | + | |
942 | +/* Integer to float conversion. */ | |
943 | +VFP_OP(uito, s) | |
944 | +{ | |
945 | + FT0s = (float)(uint32_t)vfp_stoi(FT0s); | |
946 | +} | |
947 | + | |
948 | +VFP_OP(uito, d) | |
949 | +{ | |
950 | + FT0d = (double)(uint32_t)vfp_stoi(FT0s); | |
951 | +} | |
952 | + | |
953 | +VFP_OP(sito, s) | |
954 | +{ | |
955 | + FT0s = (float)(int32_t)vfp_stoi(FT0s); | |
956 | +} | |
957 | + | |
958 | +VFP_OP(sito, d) | |
959 | +{ | |
960 | + FT0d = (double)(int32_t)vfp_stoi(FT0s); | |
961 | +} | |
962 | + | |
963 | +/* Float to integer conversion. */ | |
964 | +VFP_OP(toui, s) | |
965 | +{ | |
966 | + FT0s = vfp_itos((uint32_t)FT0s); | |
967 | +} | |
968 | + | |
969 | +VFP_OP(toui, d) | |
970 | +{ | |
971 | + FT0s = vfp_itos((uint32_t)FT0d); | |
972 | +} | |
973 | + | |
974 | +VFP_OP(tosi, s) | |
975 | +{ | |
976 | + FT0s = vfp_itos((int32_t)FT0s); | |
977 | +} | |
978 | + | |
979 | +VFP_OP(tosi, d) | |
980 | +{ | |
981 | + FT0s = vfp_itos((int32_t)FT0d); | |
982 | +} | |
983 | + | |
984 | +/* TODO: Set rounding mode properly. */ | |
985 | +VFP_OP(touiz, s) | |
986 | +{ | |
987 | + FT0s = vfp_itos((uint32_t)FT0s); | |
988 | +} | |
989 | + | |
990 | +VFP_OP(touiz, d) | |
991 | +{ | |
992 | + FT0s = vfp_itos((uint32_t)FT0d); | |
993 | +} | |
994 | + | |
995 | +VFP_OP(tosiz, s) | |
996 | +{ | |
997 | + FT0s = vfp_itos((int32_t)FT0s); | |
998 | +} | |
999 | + | |
1000 | +VFP_OP(tosiz, d) | |
865 | 1001 | { |
866 | - spin_lock(&global_cpu_lock); | |
1002 | + FT0s = vfp_itos((int32_t)FT0d); | |
867 | 1003 | } |
868 | 1004 | |
869 | -void cpu_unlock(void) | |
1005 | +/* floating point conversion */ | |
1006 | +VFP_OP(fcvtd, s) | |
870 | 1007 | { |
871 | - spin_unlock(&global_cpu_lock); | |
1008 | + FT0d = (double)FT0s; | |
872 | 1009 | } |
873 | 1010 | |
1011 | +VFP_OP(fcvts, d) | |
1012 | +{ | |
1013 | + FT0s = (float)FT0d; | |
1014 | +} | |
1015 | + | |
1016 | +/* Get and Put values from registers. */ | |
1017 | +VFP_OP(getreg_F0, d) | |
1018 | +{ | |
1019 | + FT0d = *(double *)((char *) env + PARAM1); | |
1020 | +} | |
1021 | + | |
1022 | +VFP_OP(getreg_F0, s) | |
1023 | +{ | |
1024 | + FT0s = *(float *)((char *) env + PARAM1); | |
1025 | +} | |
1026 | + | |
1027 | +VFP_OP(getreg_F1, d) | |
1028 | +{ | |
1029 | + FT1d = *(double *)((char *) env + PARAM1); | |
1030 | +} | |
1031 | + | |
1032 | +VFP_OP(getreg_F1, s) | |
1033 | +{ | |
1034 | + FT1s = *(float *)((char *) env + PARAM1); | |
1035 | +} | |
1036 | + | |
1037 | +VFP_OP(setreg_F0, d) | |
1038 | +{ | |
1039 | + *(double *)((char *) env + PARAM1) = FT0d; | |
1040 | +} | |
1041 | + | |
1042 | +VFP_OP(setreg_F0, s) | |
1043 | +{ | |
1044 | + *(float *)((char *) env + PARAM1) = FT0s; | |
1045 | +} | |
1046 | + | |
1047 | +VFP_OP(foobar, d) | |
1048 | +{ | |
1049 | + FT0d = env->vfp.regs.s[3]; | |
1050 | +} | |
1051 | + | |
1052 | +void OPPROTO op_vfp_movl_T0_fpscr(void) | |
1053 | +{ | |
1054 | + do_vfp_get_fpscr (); | |
1055 | +} | |
1056 | + | |
1057 | +void OPPROTO op_vfp_movl_T0_fpscr_flags(void) | |
1058 | +{ | |
1059 | + T0 = env->vfp.fpscr & (0xf << 28); | |
1060 | +} | |
1061 | + | |
1062 | +void OPPROTO op_vfp_movl_fpscr_T0(void) | |
1063 | +{ | |
1064 | + do_vfp_set_fpscr(); | |
1065 | +} | |
1066 | + | |
1067 | +/* Move between FT0s to T0 */ | |
1068 | +void OPPROTO op_vfp_mrs(void) | |
1069 | +{ | |
1070 | + T0 = vfp_stoi(FT0s); | |
1071 | +} | |
1072 | + | |
1073 | +void OPPROTO op_vfp_msr(void) | |
1074 | +{ | |
1075 | + FT0s = vfp_itos(T0); | |
1076 | +} | |
1077 | + | |
1078 | +/* Move between FT0d and {T0,T1} */ | |
1079 | +void OPPROTO op_vfp_mrrd(void) | |
1080 | +{ | |
1081 | + CPU_DoubleU u; | |
1082 | + | |
1083 | + u.d = FT0d; | |
1084 | + T0 = u.l.lower; | |
1085 | + T1 = u.l.upper; | |
1086 | +} | |
1087 | + | |
1088 | +void OPPROTO op_vfp_mdrr(void) | |
1089 | +{ | |
1090 | + CPU_DoubleU u; | |
1091 | + | |
1092 | + u.l.lower = T0; | |
1093 | + u.l.upper = T1; | |
1094 | + FT0d = u.d; | |
1095 | +} | |
1096 | + | |
1097 | +/* Floating point load/store. Address is in T1 */ | |
1098 | +void OPPROTO op_vfp_lds(void) | |
1099 | +{ | |
1100 | + FT0s = ldfl((void *)T1); | |
1101 | +} | |
1102 | + | |
1103 | +void OPPROTO op_vfp_ldd(void) | |
1104 | +{ | |
1105 | + FT0d = ldfq((void *)T1); | |
1106 | +} | |
1107 | + | |
1108 | +void OPPROTO op_vfp_sts(void) | |
1109 | +{ | |
1110 | + stfl((void *)T1, FT0s); | |
1111 | +} | |
1112 | + | |
1113 | +void OPPROTO op_vfp_std(void) | |
1114 | +{ | |
1115 | + stfq((void *)T1, FT0d); | |
1116 | +} | ... | ... |
target-arm/op_helper.c
0 โ 100644
1 | +/* | |
2 | + * ARM helper routines | |
3 | + * | |
4 | + * Copyright (c) 2005 CodeSourcery, LLC | |
5 | + * | |
6 | + * This library is free software; you can redistribute it and/or | |
7 | + * modify it under the terms of the GNU Lesser General Public | |
8 | + * License as published by the Free Software Foundation; either | |
9 | + * version 2 of the License, or (at your option) any later version. | |
10 | + * | |
11 | + * This library is distributed in the hope that it will be useful, | |
12 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | + * Lesser General Public License for more details. | |
15 | + * | |
16 | + * You should have received a copy of the GNU Lesser General Public | |
17 | + * License along with this library; if not, write to the Free Software | |
18 | + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | + */ | |
20 | + | |
21 | +#include <math.h> | |
22 | +#include <fenv.h> | |
23 | +#include "exec.h" | |
24 | + | |
25 | +/* If the host doesn't define C99 math intrinsics then use the normal | |
26 | + operators. This may generate excess exceptions, but it's probably | |
27 | + near enough for most things. */ | |
28 | +#ifndef isless | |
29 | +#define isless(x, y) (x < y) | |
30 | +#endif | |
31 | +#ifndef isgreater | |
32 | +#define isgreater(x, y) (x > y) | |
33 | +#endif | |
34 | +#ifndef isunordered | |
35 | +#define isunordered(x, y) (!((x < y) || (x >= y))) | |
36 | +#endif | |
37 | + | |
38 | +void raise_exception(int tt) | |
39 | +{ | |
40 | + env->exception_index = tt; | |
41 | + cpu_loop_exit(); | |
42 | +} | |
43 | + | |
44 | +/* thread support */ | |
45 | + | |
46 | +spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED; | |
47 | + | |
48 | +void cpu_lock(void) | |
49 | +{ | |
50 | + spin_lock(&global_cpu_lock); | |
51 | +} | |
52 | + | |
53 | +void cpu_unlock(void) | |
54 | +{ | |
55 | + spin_unlock(&global_cpu_lock); | |
56 | +} | |
57 | + | |
58 | +/* VFP support. */ | |
59 | + | |
60 | +void do_vfp_abss(void) | |
61 | +{ | |
62 | + FT0s = fabsf(FT0s); | |
63 | +} | |
64 | + | |
65 | +void do_vfp_absd(void) | |
66 | +{ | |
67 | + FT0d = fabs(FT0d); | |
68 | +} | |
69 | + | |
70 | +void do_vfp_sqrts(void) | |
71 | +{ | |
72 | + FT0s = sqrtf(FT0s); | |
73 | +} | |
74 | + | |
75 | +void do_vfp_sqrtd(void) | |
76 | +{ | |
77 | + FT0d = sqrt(FT0d); | |
78 | +} | |
79 | + | |
80 | +/* We use an == operator first to generate teh correct floating point | |
81 | + exception. Subsequent comparisons use the exception-safe macros. */ | |
82 | +#define DO_VFP_cmp(p) \ | |
83 | +void do_vfp_cmp##p(void) \ | |
84 | +{ \ | |
85 | + uint32_t flags; \ | |
86 | + if (FT0##p == FT1##p) \ | |
87 | + flags = 0xc; \ | |
88 | + else if (isless (FT0##p, FT1##p)) \ | |
89 | + flags = 0x8; \ | |
90 | + else if (isgreater (FT0##p, FT1##p)) \ | |
91 | + flags = 0x2; \ | |
92 | + else /* unordered */ \ | |
93 | + flags = 0x3; \ | |
94 | + env->vfp.fpscr = (flags << 28) | (env->vfp.fpscr & 0x0fffffff); \ | |
95 | + FORCE_RET(); \ | |
96 | +} | |
97 | +DO_VFP_cmp(s) | |
98 | +DO_VFP_cmp(d) | |
99 | +#undef DO_VFP_cmp | |
100 | + | |
101 | +/* We use a > operator first to get FP exceptions right. */ | |
102 | +#define DO_VFP_cmpe(p) \ | |
103 | +void do_vfp_cmpe##p(void) \ | |
104 | +{ \ | |
105 | + uint32_t flags; \ | |
106 | + if (FT0##p > FT1##p) \ | |
107 | + flags = 0x2; \ | |
108 | + else if (isless (FT0##p, FT1##p)) \ | |
109 | + flags = 0x8; \ | |
110 | + else if (isunordered (FT0##p, FT1##p)) \ | |
111 | + flags = 0x3; \ | |
112 | + else /* equal */ \ | |
113 | + flags = 0xc; \ | |
114 | + env->vfp.fpscr = (flags << 28) | (env->vfp.fpscr & 0x0fffffff); \ | |
115 | + FORCE_RET(); \ | |
116 | +} | |
117 | +DO_VFP_cmpe(s) | |
118 | +DO_VFP_cmpe(d) | |
119 | +#undef DO_VFP_cmpe | |
120 | + | |
121 | +/* Convert host exception flags to vfp form. */ | |
122 | +int vfp_exceptbits_from_host(int host_bits) | |
123 | +{ | |
124 | + int target_bits = 0; | |
125 | + | |
126 | +#ifdef FE_INVALID | |
127 | + if (host_bits & FE_INVALID) | |
128 | + target_bits |= 1; | |
129 | +#endif | |
130 | +#ifdef FE_DIVBYZERO | |
131 | + if (host_bits & FE_DIVBYZERO) | |
132 | + target_bits |= 2; | |
133 | +#endif | |
134 | +#ifdef FE_OVERFLOW | |
135 | + if (host_bits & FE_OVERFLOW) | |
136 | + target_bits |= 4; | |
137 | +#endif | |
138 | +#ifdef FE_UNDERFLOW | |
139 | + if (host_bits & FE_UNDERFLOW) | |
140 | + target_bits |= 8; | |
141 | +#endif | |
142 | +#ifdef FE_INEXACT | |
143 | + if (host_bits & FE_INEXACT) | |
144 | + target_bits |= 0x10; | |
145 | +#endif | |
146 | + /* C doesn't define an inexact exception. */ | |
147 | + return target_bits; | |
148 | +} | |
149 | + | |
150 | +/* Convert vfp exception flags to target form. */ | |
151 | +int vfp_host_exceptbits_to_host(int target_bits) | |
152 | +{ | |
153 | + int host_bits = 0; | |
154 | + | |
155 | +#ifdef FE_INVALID | |
156 | + if (target_bits & 1) | |
157 | + host_bits |= FE_INVALID; | |
158 | +#endif | |
159 | +#ifdef FE_DIVBYZERO | |
160 | + if (target_bits & 2) | |
161 | + host_bits |= FE_DIVBYZERO; | |
162 | +#endif | |
163 | +#ifdef FE_OVERFLOW | |
164 | + if (target_bits & 4) | |
165 | + host_bits |= FE_OVERFLOW; | |
166 | +#endif | |
167 | +#ifdef FE_UNDERFLOW | |
168 | + if (target_bits & 8) | |
169 | + host_bits |= FE_UNDERFLOW; | |
170 | +#endif | |
171 | +#ifdef FE_INEXACT | |
172 | + if (target_bits & 0x10) | |
173 | + host_bits |= FE_INEXACT; | |
174 | +#endif | |
175 | + return host_bits; | |
176 | +} | |
177 | + | |
178 | +void do_vfp_set_fpscr(void) | |
179 | +{ | |
180 | + int i; | |
181 | + uint32_t changed; | |
182 | + | |
183 | + changed = env->vfp.fpscr; | |
184 | + env->vfp.fpscr = (T0 & 0xffc8ffff); | |
185 | + env->vfp.vec_len = (T0 >> 16) & 7; | |
186 | + env->vfp.vec_stride = (T0 >> 20) & 3; | |
187 | + | |
188 | + changed ^= T0; | |
189 | + if (changed & (3 << 22)) { | |
190 | + i = (T0 >> 22) & 3; | |
191 | + switch (i) { | |
192 | + case 0: | |
193 | + i = FE_TONEAREST; | |
194 | + break; | |
195 | + case 1: | |
196 | + i = FE_UPWARD; | |
197 | + break; | |
198 | + case 2: | |
199 | + i = FE_DOWNWARD; | |
200 | + break; | |
201 | + case 3: | |
202 | + i = FE_TOWARDZERO; | |
203 | + break; | |
204 | + } | |
205 | + fesetround (i); | |
206 | + } | |
207 | + | |
208 | + /* Clear host exception flags. */ | |
209 | + feclearexcept(FE_ALL_EXCEPT); | |
210 | + | |
211 | +#ifdef feenableexcept | |
212 | + if (changed & 0x1f00) { | |
213 | + i = vfp_exceptbits_to_host((T0 >> 8) & 0x1f); | |
214 | + feenableexcept (i); | |
215 | + fedisableexcept (FE_ALL_EXCEPT & ~i); | |
216 | + } | |
217 | +#endif | |
218 | + /* XXX: FZ and DN are not implemented. */ | |
219 | +} | |
220 | + | |
221 | +void do_vfp_get_fpscr(void) | |
222 | +{ | |
223 | + int i; | |
224 | + | |
225 | + T0 = (env->vfp.fpscr & 0xffc8ffff) | (env->vfp.vec_len << 16) | |
226 | + | (env->vfp.vec_stride << 20); | |
227 | + i = fetestexcept(FE_ALL_EXCEPT); | |
228 | + T0 |= vfp_exceptbits_from_host(i); | |
229 | +} | ... | ... |
target-arm/translate.c
... | ... | @@ -2,6 +2,7 @@ |
2 | 2 | * ARM translation |
3 | 3 | * |
4 | 4 | * Copyright (c) 2003 Fabrice Bellard |
5 | + * Copyright (c) 2005 CodeSourcery, LLC | |
5 | 6 | * |
6 | 7 | * This library is free software; you can redistribute it and/or |
7 | 8 | * modify it under the terms of the GNU Lesser General Public |
... | ... | @@ -354,7 +355,527 @@ static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn) |
354 | 355 | } |
355 | 356 | } |
356 | 357 | |
357 | -static void disas_arm_insn(DisasContext *s) | |
358 | +#define VFP_OP(name) \ | |
359 | +static inline void gen_vfp_##name(int dp) \ | |
360 | +{ \ | |
361 | + if (dp) \ | |
362 | + gen_op_vfp_##name##d(); \ | |
363 | + else \ | |
364 | + gen_op_vfp_##name##s(); \ | |
365 | +} | |
366 | + | |
367 | +VFP_OP(add) | |
368 | +VFP_OP(sub) | |
369 | +VFP_OP(mul) | |
370 | +VFP_OP(div) | |
371 | +VFP_OP(neg) | |
372 | +VFP_OP(abs) | |
373 | +VFP_OP(sqrt) | |
374 | +VFP_OP(cmp) | |
375 | +VFP_OP(cmpe) | |
376 | +VFP_OP(F1_ld0) | |
377 | +VFP_OP(uito) | |
378 | +VFP_OP(sito) | |
379 | +VFP_OP(toui) | |
380 | +VFP_OP(touiz) | |
381 | +VFP_OP(tosi) | |
382 | +VFP_OP(tosiz) | |
383 | +VFP_OP(ld) | |
384 | +VFP_OP(st) | |
385 | + | |
386 | +#undef VFP_OP | |
387 | + | |
388 | +static inline void gen_mov_F0_vreg(int dp, int reg) | |
389 | +{ | |
390 | + if (dp) | |
391 | + gen_op_vfp_getreg_F0d(offsetof(CPUARMState, vfp.regs.d[reg])); | |
392 | + else | |
393 | + gen_op_vfp_getreg_F0s(offsetof(CPUARMState, vfp.regs.s[reg])); | |
394 | +} | |
395 | + | |
396 | +static inline void gen_mov_F1_vreg(int dp, int reg) | |
397 | +{ | |
398 | + if (dp) | |
399 | + gen_op_vfp_getreg_F1d(offsetof(CPUARMState, vfp.regs.d[reg])); | |
400 | + else | |
401 | + gen_op_vfp_getreg_F1s(offsetof(CPUARMState, vfp.regs.s[reg])); | |
402 | +} | |
403 | + | |
404 | +static inline void gen_mov_vreg_F0(int dp, int reg) | |
405 | +{ | |
406 | + if (dp) | |
407 | + gen_op_vfp_setreg_F0d(offsetof(CPUARMState, vfp.regs.d[reg])); | |
408 | + else | |
409 | + gen_op_vfp_setreg_F0s(offsetof(CPUARMState, vfp.regs.s[reg])); | |
410 | +} | |
411 | + | |
412 | +/* Disassemble a VFP instruction. Returns nonzero if an error occured | |
413 | + (ie. an undefined instruction). */ | |
414 | +static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn) | |
415 | +{ | |
416 | + uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask; | |
417 | + int dp, veclen; | |
418 | + | |
419 | + dp = ((insn & 0xf00) == 0xb00); | |
420 | + switch ((insn >> 24) & 0xf) { | |
421 | + case 0xe: | |
422 | + if (insn & (1 << 4)) { | |
423 | + /* single register transfer */ | |
424 | + if ((insn & 0x6f) != 0x00) | |
425 | + return 1; | |
426 | + rd = (insn >> 12) & 0xf; | |
427 | + if (dp) { | |
428 | + if (insn & 0x80) | |
429 | + return 1; | |
430 | + rn = (insn >> 16) & 0xf; | |
431 | + /* Get the existing value even for arm->vfp moves because | |
432 | + we only set half the register. */ | |
433 | + gen_mov_F0_vreg(1, rn); | |
434 | + gen_op_vfp_mrrd(); | |
435 | + if (insn & (1 << 20)) { | |
436 | + /* vfp->arm */ | |
437 | + if (insn & (1 << 21)) | |
438 | + gen_movl_reg_T1(s, rd); | |
439 | + else | |
440 | + gen_movl_reg_T0(s, rd); | |
441 | + } else { | |
442 | + /* arm->vfp */ | |
443 | + if (insn & (1 << 21)) | |
444 | + gen_movl_T1_reg(s, rd); | |
445 | + else | |
446 | + gen_movl_T0_reg(s, rd); | |
447 | + gen_op_vfp_mdrr(); | |
448 | + gen_mov_vreg_F0(dp, rn); | |
449 | + } | |
450 | + } else { | |
451 | + rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1); | |
452 | + if (insn & (1 << 20)) { | |
453 | + /* vfp->arm */ | |
454 | + if (insn & (1 << 21)) { | |
455 | + /* system register */ | |
456 | + switch (rn) { | |
457 | + case 0: /* fpsid */ | |
458 | + n = 0x0091A0000; | |
459 | + break; | |
460 | + case 2: /* fpscr */ | |
461 | + if (rd == 15) | |
462 | + gen_op_vfp_movl_T0_fpscr_flags(); | |
463 | + else | |
464 | + gen_op_vfp_movl_T0_fpscr(); | |
465 | + break; | |
466 | + default: | |
467 | + return 1; | |
468 | + } | |
469 | + } else { | |
470 | + gen_mov_F0_vreg(0, rn); | |
471 | + gen_op_vfp_mrs(); | |
472 | + } | |
473 | + if (rd == 15) { | |
474 | + /* This will only set the 4 flag bits */ | |
475 | + gen_op_movl_psr_T0(); | |
476 | + } else | |
477 | + gen_movl_reg_T0(s, rd); | |
478 | + } else { | |
479 | + /* arm->vfp */ | |
480 | + gen_movl_T0_reg(s, rd); | |
481 | + if (insn & (1 << 21)) { | |
482 | + /* system register */ | |
483 | + switch (rn) { | |
484 | + case 0: /* fpsid */ | |
485 | + /* Writes are ignored. */ | |
486 | + break; | |
487 | + case 2: /* fpscr */ | |
488 | + gen_op_vfp_movl_fpscr_T0(); | |
489 | + /* This could change vector settings, so jump to | |
490 | + the next instuction. */ | |
491 | + gen_op_movl_T0_im(s->pc); | |
492 | + gen_movl_reg_T0(s, 15); | |
493 | + s->is_jmp = DISAS_UPDATE; | |
494 | + break; | |
495 | + default: | |
496 | + return 1; | |
497 | + } | |
498 | + } else { | |
499 | + gen_op_vfp_msr(); | |
500 | + gen_mov_vreg_F0(0, rn); | |
501 | + } | |
502 | + } | |
503 | + } | |
504 | + } else { | |
505 | + /* data processing */ | |
506 | + /* The opcode is in bits 23, 21, 20 and 6. */ | |
507 | + op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1); | |
508 | + if (dp) { | |
509 | + if (op == 15) { | |
510 | + /* rn is opcode */ | |
511 | + rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1); | |
512 | + } else { | |
513 | + /* rn is register number */ | |
514 | + if (insn & (1 << 7)) | |
515 | + return 1; | |
516 | + rn = (insn >> 16) & 0xf; | |
517 | + } | |
518 | + | |
519 | + if (op == 15 && (rn == 15 || rn > 17)) { | |
520 | + /* Integer or single precision destination. */ | |
521 | + rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1); | |
522 | + } else { | |
523 | + if (insn & (1 << 22)) | |
524 | + return 1; | |
525 | + rd = (insn >> 12) & 0xf; | |
526 | + } | |
527 | + | |
528 | + if (op == 15 && (rn == 16 || rn == 17)) { | |
529 | + /* Integer source. */ | |
530 | + rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1); | |
531 | + } else { | |
532 | + if (insn & (1 << 5)) | |
533 | + return 1; | |
534 | + rm = insn & 0xf; | |
535 | + } | |
536 | + } else { | |
537 | + rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1); | |
538 | + if (op == 15 && rn == 15) { | |
539 | + /* Double precision destination. */ | |
540 | + if (insn & (1 << 22)) | |
541 | + return 1; | |
542 | + rd = (insn >> 12) & 0xf; | |
543 | + } else | |
544 | + rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1); | |
545 | + rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1); | |
546 | + } | |
547 | + | |
548 | + veclen = env->vfp.vec_len; | |
549 | + if (op == 15 && rn > 3) | |
550 | + veclen = 0; | |
551 | + | |
552 | + /* Shut up compiler warnings. */ | |
553 | + delta_m = 0; | |
554 | + delta_d = 0; | |
555 | + bank_mask = 0; | |
556 | + | |
557 | + if (veclen > 0) { | |
558 | + if (dp) | |
559 | + bank_mask = 0xc; | |
560 | + else | |
561 | + bank_mask = 0x18; | |
562 | + | |
563 | + /* Figure out what type of vector operation this is. */ | |
564 | + if ((rd & bank_mask) == 0) { | |
565 | + /* scalar */ | |
566 | + veclen = 0; | |
567 | + } else { | |
568 | + if (dp) | |
569 | + delta_d = (env->vfp.vec_stride >> 1) + 1; | |
570 | + else | |
571 | + delta_d = env->vfp.vec_stride + 1; | |
572 | + | |
573 | + if ((rm & bank_mask) == 0) { | |
574 | + /* mixed scalar/vector */ | |
575 | + delta_m = 0; | |
576 | + } else { | |
577 | + /* vector */ | |
578 | + delta_m = delta_d; | |
579 | + } | |
580 | + } | |
581 | + } | |
582 | + | |
583 | + /* Load the initial operands. */ | |
584 | + if (op == 15) { | |
585 | + switch (rn) { | |
586 | + case 16: | |
587 | + case 17: | |
588 | + /* Integer source */ | |
589 | + gen_mov_F0_vreg(0, rm); | |
590 | + break; | |
591 | + case 8: | |
592 | + case 9: | |
593 | + /* Compare */ | |
594 | + gen_mov_F0_vreg(dp, rd); | |
595 | + gen_mov_F1_vreg(dp, rm); | |
596 | + break; | |
597 | + case 10: | |
598 | + case 11: | |
599 | + /* Compare with zero */ | |
600 | + gen_mov_F0_vreg(dp, rd); | |
601 | + gen_vfp_F1_ld0(dp); | |
602 | + break; | |
603 | + default: | |
604 | + /* One source operand. */ | |
605 | + gen_mov_F0_vreg(dp, rm); | |
606 | + } | |
607 | + } else { | |
608 | + /* Two source operands. */ | |
609 | + gen_mov_F0_vreg(dp, rn); | |
610 | + gen_mov_F1_vreg(dp, rm); | |
611 | + } | |
612 | + | |
613 | + for (;;) { | |
614 | + /* Perform the calculation. */ | |
615 | + switch (op) { | |
616 | + case 0: /* mac: fd + (fn * fm) */ | |
617 | + gen_vfp_mul(dp); | |
618 | + gen_mov_F1_vreg(dp, rd); | |
619 | + gen_vfp_add(dp); | |
620 | + break; | |
621 | + case 1: /* nmac: fd - (fn * fm) */ | |
622 | + gen_vfp_mul(dp); | |
623 | + gen_vfp_neg(dp); | |
624 | + gen_mov_F1_vreg(dp, rd); | |
625 | + gen_vfp_add(dp); | |
626 | + break; | |
627 | + case 2: /* msc: -fd + (fn * fm) */ | |
628 | + gen_vfp_mul(dp); | |
629 | + gen_mov_F1_vreg(dp, rd); | |
630 | + gen_vfp_sub(dp); | |
631 | + break; | |
632 | + case 3: /* nmsc: -fd - (fn * fm) */ | |
633 | + gen_vfp_mul(dp); | |
634 | + gen_mov_F1_vreg(dp, rd); | |
635 | + gen_vfp_add(dp); | |
636 | + gen_vfp_neg(dp); | |
637 | + break; | |
638 | + case 4: /* mul: fn * fm */ | |
639 | + gen_vfp_mul(dp); | |
640 | + break; | |
641 | + case 5: /* nmul: -(fn * fm) */ | |
642 | + gen_vfp_mul(dp); | |
643 | + gen_vfp_neg(dp); | |
644 | + break; | |
645 | + case 6: /* add: fn + fm */ | |
646 | + gen_vfp_add(dp); | |
647 | + break; | |
648 | + case 7: /* sub: fn - fm */ | |
649 | + gen_vfp_sub(dp); | |
650 | + break; | |
651 | + case 8: /* div: fn / fm */ | |
652 | + gen_vfp_div(dp); | |
653 | + break; | |
654 | + case 15: /* extension space */ | |
655 | + switch (rn) { | |
656 | + case 0: /* cpy */ | |
657 | + /* no-op */ | |
658 | + break; | |
659 | + case 1: /* abs */ | |
660 | + gen_vfp_abs(dp); | |
661 | + break; | |
662 | + case 2: /* neg */ | |
663 | + gen_vfp_neg(dp); | |
664 | + break; | |
665 | + case 3: /* sqrt */ | |
666 | + gen_vfp_sqrt(dp); | |
667 | + break; | |
668 | + case 8: /* cmp */ | |
669 | + gen_vfp_cmp(dp); | |
670 | + break; | |
671 | + case 9: /* cmpe */ | |
672 | + gen_vfp_cmpe(dp); | |
673 | + break; | |
674 | + case 10: /* cmpz */ | |
675 | + gen_vfp_cmp(dp); | |
676 | + break; | |
677 | + case 11: /* cmpez */ | |
678 | + gen_vfp_F1_ld0(dp); | |
679 | + gen_vfp_cmpe(dp); | |
680 | + break; | |
681 | + case 15: /* single<->double conversion */ | |
682 | + if (dp) | |
683 | + gen_op_vfp_fcvtsd(); | |
684 | + else | |
685 | + gen_op_vfp_fcvtds(); | |
686 | + break; | |
687 | + case 16: /* fuito */ | |
688 | + gen_vfp_uito(dp); | |
689 | + break; | |
690 | + case 17: /* fsito */ | |
691 | + gen_vfp_sito(dp); | |
692 | + break; | |
693 | + case 24: /* ftoui */ | |
694 | + gen_vfp_toui(dp); | |
695 | + break; | |
696 | + case 25: /* ftouiz */ | |
697 | + gen_vfp_touiz(dp); | |
698 | + break; | |
699 | + case 26: /* ftosi */ | |
700 | + gen_vfp_tosi(dp); | |
701 | + break; | |
702 | + case 27: /* ftosiz */ | |
703 | + gen_vfp_tosiz(dp); | |
704 | + break; | |
705 | + default: /* undefined */ | |
706 | + printf ("rn:%d\n", rn); | |
707 | + return 1; | |
708 | + } | |
709 | + break; | |
710 | + default: /* undefined */ | |
711 | + printf ("op:%d\n", op); | |
712 | + return 1; | |
713 | + } | |
714 | + | |
715 | + /* Write back the result. */ | |
716 | + if (op == 15 && (rn >= 8 && rn <= 11)) | |
717 | + ; /* Comparison, do nothing. */ | |
718 | + else if (op == 15 && rn > 17) | |
719 | + /* Integer result. */ | |
720 | + gen_mov_vreg_F0(0, rd); | |
721 | + else if (op == 15 && rn == 15) | |
722 | + /* conversion */ | |
723 | + gen_mov_vreg_F0(!dp, rd); | |
724 | + else | |
725 | + gen_mov_vreg_F0(dp, rd); | |
726 | + | |
727 | + /* break out of the loop if we have finished */ | |
728 | + if (veclen == 0) | |
729 | + break; | |
730 | + | |
731 | + if (op == 15 && delta_m == 0) { | |
732 | + /* single source one-many */ | |
733 | + while (veclen--) { | |
734 | + rd = ((rd + delta_d) & (bank_mask - 1)) | |
735 | + | (rd & bank_mask); | |
736 | + gen_mov_vreg_F0(dp, rd); | |
737 | + } | |
738 | + break; | |
739 | + } | |
740 | + /* Setup the next operands. */ | |
741 | + veclen--; | |
742 | + rd = ((rd + delta_d) & (bank_mask - 1)) | |
743 | + | (rd & bank_mask); | |
744 | + | |
745 | + if (op == 15) { | |
746 | + /* One source operand. */ | |
747 | + rm = ((rm + delta_m) & (bank_mask - 1)) | |
748 | + | (rm & bank_mask); | |
749 | + gen_mov_F0_vreg(dp, rm); | |
750 | + } else { | |
751 | + /* Two source operands. */ | |
752 | + rn = ((rn + delta_d) & (bank_mask - 1)) | |
753 | + | (rn & bank_mask); | |
754 | + gen_mov_F0_vreg(dp, rn); | |
755 | + if (delta_m) { | |
756 | + rm = ((rm + delta_m) & (bank_mask - 1)) | |
757 | + | (rm & bank_mask); | |
758 | + gen_mov_F1_vreg(dp, rm); | |
759 | + } | |
760 | + } | |
761 | + } | |
762 | + } | |
763 | + break; | |
764 | + case 0xc: | |
765 | + case 0xd: | |
766 | + if (dp && (insn & (1 << 22))) { | |
767 | + /* two-register transfer */ | |
768 | + rn = (insn >> 16) & 0xf; | |
769 | + rd = (insn >> 12) & 0xf; | |
770 | + if (dp) { | |
771 | + if (insn & (1 << 5)) | |
772 | + return 1; | |
773 | + rm = insn & 0xf; | |
774 | + } else | |
775 | + rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1); | |
776 | + | |
777 | + if (insn & (1 << 20)) { | |
778 | + /* vfp->arm */ | |
779 | + if (dp) { | |
780 | + gen_mov_F0_vreg(1, rm); | |
781 | + gen_op_vfp_mrrd(); | |
782 | + gen_movl_reg_T0(s, rd); | |
783 | + gen_movl_reg_T1(s, rn); | |
784 | + } else { | |
785 | + gen_mov_F0_vreg(0, rm); | |
786 | + gen_op_vfp_mrs(); | |
787 | + gen_movl_reg_T0(s, rn); | |
788 | + gen_mov_F0_vreg(0, rm + 1); | |
789 | + gen_op_vfp_mrs(); | |
790 | + gen_movl_reg_T0(s, rd); | |
791 | + } | |
792 | + } else { | |
793 | + /* arm->vfp */ | |
794 | + if (dp) { | |
795 | + gen_movl_T0_reg(s, rd); | |
796 | + gen_movl_T1_reg(s, rn); | |
797 | + gen_op_vfp_mdrr(); | |
798 | + gen_mov_vreg_F0(1, rm); | |
799 | + } else { | |
800 | + gen_movl_T0_reg(s, rn); | |
801 | + gen_op_vfp_msr(); | |
802 | + gen_mov_vreg_F0(0, rm); | |
803 | + gen_movl_T0_reg(s, rd); | |
804 | + gen_op_vfp_msr(); | |
805 | + gen_mov_vreg_F0(0, rm + 1); | |
806 | + } | |
807 | + } | |
808 | + } else { | |
809 | + /* Load/store */ | |
810 | + rn = (insn >> 16) & 0xf; | |
811 | + if (dp) | |
812 | + rd = (insn >> 12) & 0xf; | |
813 | + else | |
814 | + rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1); | |
815 | + gen_movl_T1_reg(s, rn); | |
816 | + if ((insn & 0x01200000) == 0x01000000) { | |
817 | + /* Single load/store */ | |
818 | + offset = (insn & 0xff) << 2; | |
819 | + if ((insn & (1 << 23)) == 0) | |
820 | + offset = -offset; | |
821 | + gen_op_addl_T1_im(offset); | |
822 | + if (insn & (1 << 20)) { | |
823 | + gen_vfp_ld(dp); | |
824 | + gen_mov_vreg_F0(dp, rd); | |
825 | + } else { | |
826 | + gen_mov_F0_vreg(dp, rd); | |
827 | + gen_vfp_st(dp); | |
828 | + } | |
829 | + } else { | |
830 | + /* load/store multiple */ | |
831 | + if (dp) | |
832 | + n = (insn >> 1) & 0x7f; | |
833 | + else | |
834 | + n = insn & 0xff; | |
835 | + | |
836 | + if (insn & (1 << 24)) /* pre-decrement */ | |
837 | + gen_op_addl_T1_im(-((insn & 0xff) << 2)); | |
838 | + | |
839 | + if (dp) | |
840 | + offset = 8; | |
841 | + else | |
842 | + offset = 4; | |
843 | + for (i = 0; i < n; i++) { | |
844 | + if (insn & (1 << 20)) { | |
845 | + /* load */ | |
846 | + gen_vfp_ld(dp); | |
847 | + gen_mov_vreg_F0(dp, rd + i); | |
848 | + } else { | |
849 | + /* store */ | |
850 | + gen_mov_F0_vreg(dp, rd + i); | |
851 | + gen_vfp_st(dp); | |
852 | + } | |
853 | + gen_op_addl_T1_im(offset); | |
854 | + } | |
855 | + if (insn & (1 << 21)) { | |
856 | + /* writeback */ | |
857 | + if (insn & (1 << 24)) | |
858 | + offset = -offset * n; | |
859 | + else if (dp && (insn & 1)) | |
860 | + offset = 4; | |
861 | + else | |
862 | + offset = 0; | |
863 | + | |
864 | + if (offset != 0) | |
865 | + gen_op_addl_T1_im(offset); | |
866 | + gen_movl_reg_T1(s, rn); | |
867 | + } | |
868 | + } | |
869 | + } | |
870 | + break; | |
871 | + default: | |
872 | + /* Should never happen. */ | |
873 | + return 1; | |
874 | + } | |
875 | + return 0; | |
876 | +} | |
877 | + | |
878 | +static void disas_arm_insn(CPUState * env, DisasContext *s) | |
358 | 879 | { |
359 | 880 | unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh; |
360 | 881 | |
... | ... | @@ -363,6 +884,7 @@ static void disas_arm_insn(DisasContext *s) |
363 | 884 | |
364 | 885 | cond = insn >> 28; |
365 | 886 | if (cond == 0xf){ |
887 | + /* Unconditional instructions. */ | |
366 | 888 | if ((insn & 0x0d70f000) == 0x0550f000) |
367 | 889 | return; /* PLD */ |
368 | 890 | else if ((insn & 0x0e000000) == 0x0a000000) { |
... | ... | @@ -381,6 +903,10 @@ static void disas_arm_insn(DisasContext *s) |
381 | 903 | gen_op_movl_T0_im(val); |
382 | 904 | gen_bx(s); |
383 | 905 | return; |
906 | + } else if ((insn & 0x0fe00000) == 0x0c400000) { | |
907 | + /* Coprocessor double register transfer. */ | |
908 | + } else if ((insn & 0x0f000010) == 0x0e000010) { | |
909 | + /* Additional coprocessor register transfer. */ | |
384 | 910 | } |
385 | 911 | goto illegal_op; |
386 | 912 | } |
... | ... | @@ -934,6 +1460,22 @@ static void disas_arm_insn(DisasContext *s) |
934 | 1460 | s->is_jmp = DISAS_TB_JUMP; |
935 | 1461 | } |
936 | 1462 | break; |
1463 | + case 0xc: | |
1464 | + case 0xd: | |
1465 | + case 0xe: | |
1466 | + /* Coprocessor. */ | |
1467 | + op1 = (insn >> 8) & 0xf; | |
1468 | + switch (op1) { | |
1469 | + case 10: | |
1470 | + case 11: | |
1471 | + if (disas_vfp_insn (env, s, insn)) | |
1472 | + goto illegal_op; | |
1473 | + break; | |
1474 | + default: | |
1475 | + /* unknown coprocessor. */ | |
1476 | + goto illegal_op; | |
1477 | + } | |
1478 | + break; | |
937 | 1479 | case 0xf: |
938 | 1480 | /* swi */ |
939 | 1481 | gen_op_movl_T0_im((long)s->pc); |
... | ... | @@ -1484,7 +2026,7 @@ static inline int gen_intermediate_code_internal(CPUState *env, |
1484 | 2026 | if (env->thumb) |
1485 | 2027 | disas_thumb_insn(dc); |
1486 | 2028 | else |
1487 | - disas_arm_insn(dc); | |
2029 | + disas_arm_insn(env, dc); | |
1488 | 2030 | } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && |
1489 | 2031 | (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32)); |
1490 | 2032 | switch(dc->is_jmp) { |
... | ... | @@ -1557,6 +2099,11 @@ void cpu_dump_state(CPUState *env, FILE *f, |
1557 | 2099 | int flags) |
1558 | 2100 | { |
1559 | 2101 | int i; |
2102 | + struct { | |
2103 | + uint32_t i; | |
2104 | + float s; | |
2105 | + } s0, s1; | |
2106 | + CPU_DoubleU d; | |
1560 | 2107 | |
1561 | 2108 | for(i=0;i<16;i++) { |
1562 | 2109 | cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]); |
... | ... | @@ -1566,11 +2113,23 @@ void cpu_dump_state(CPUState *env, FILE *f, |
1566 | 2113 | cpu_fprintf(f, " "); |
1567 | 2114 | } |
1568 | 2115 | cpu_fprintf(f, "PSR=%08x %c%c%c%c\n", |
1569 | - env->cpsr, | |
2116 | + env->cpsr, | |
1570 | 2117 | env->cpsr & (1 << 31) ? 'N' : '-', |
1571 | 2118 | env->cpsr & (1 << 30) ? 'Z' : '-', |
1572 | 2119 | env->cpsr & (1 << 29) ? 'C' : '-', |
1573 | 2120 | env->cpsr & (1 << 28) ? 'V' : '-'); |
2121 | + | |
2122 | + for (i = 0; i < 16; i++) { | |
2123 | + s0.s = env->vfp.regs.s[i * 2]; | |
2124 | + s1.s = env->vfp.regs.s[i * 2 + 1]; | |
2125 | + d.d = env->vfp.regs.d[i]; | |
2126 | + cpu_fprintf(f, "s%02d=%08x(%8f) s%02d=%08x(%8f) d%02d=%08x%08x(%8f)\n", | |
2127 | + i * 2, (int)s0.i, s0.s, | |
2128 | + i * 2 + 1, (int)s0.i, s0.s, | |
2129 | + i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower, | |
2130 | + d.d); | |
2131 | + cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.fpscr); | |
2132 | + } | |
1574 | 2133 | } |
1575 | 2134 | |
1576 | 2135 | target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) | ... | ... |