Commit 3a27ad0b57f5ac1dc5aaf1805d8caa4be2deb2dd
1 parent
2b413144
added vm86, exceptions and self modifying regression tests
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@174 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
2 changed files
with
423 additions
and
0 deletions
tests/test-i386-vm86.S
0 → 100644
1 | + .code16 | |
2 | + .globl vm86_code_start | |
3 | + .globl vm86_code_end | |
4 | + | |
5 | +#define GET_OFFSET(x) ((x) - vm86_code_start + 0x100) | |
6 | + | |
7 | +vm86_code_start: | |
8 | + movw $GET_OFFSET(hello_world), %dx | |
9 | + movb $0x09, %ah | |
10 | + int $0x21 | |
11 | + | |
12 | + /* prepare int 0x90 vector */ | |
13 | + xorw %ax, %ax | |
14 | + movw %ax, %es | |
15 | + es movw $GET_OFFSET(int90_test), 0x90 * 4 | |
16 | + es movw %cs, 0x90 * 4 + 2 | |
17 | + | |
18 | + /* launch int 0x90 */ | |
19 | + | |
20 | + int $0x90 | |
21 | + | |
22 | + /* test IF support */ | |
23 | + movw $GET_OFFSET(IF_msg), %dx | |
24 | + movb $0x09, %ah | |
25 | + int $0x21 | |
26 | + | |
27 | + pushf | |
28 | + popw %dx | |
29 | + movb $0xff, %ah | |
30 | + int $0x21 | |
31 | + | |
32 | + cli | |
33 | + pushf | |
34 | + popw %dx | |
35 | + movb $0xff, %ah | |
36 | + int $0x21 | |
37 | + | |
38 | + sti | |
39 | + pushfl | |
40 | + popl %edx | |
41 | + movb $0xff, %ah | |
42 | + int $0x21 | |
43 | + | |
44 | +#if 0 | |
45 | + movw $GET_OFFSET(IF_msg1), %dx | |
46 | + movb $0x09, %ah | |
47 | + int $0x21 | |
48 | + | |
49 | + pushf | |
50 | + movw %sp, %bx | |
51 | + andw $~0x200, (%bx) | |
52 | + popf | |
53 | +#else | |
54 | + cli | |
55 | +#endif | |
56 | + | |
57 | + pushf | |
58 | + popw %dx | |
59 | + movb $0xff, %ah | |
60 | + int $0x21 | |
61 | + | |
62 | + pushfl | |
63 | + movw %sp, %bx | |
64 | + orw $0x200, (%bx) | |
65 | + popfl | |
66 | + | |
67 | + pushfl | |
68 | + popl %edx | |
69 | + movb $0xff, %ah | |
70 | + int $0x21 | |
71 | + | |
72 | + movb $0x00, %ah | |
73 | + int $0x21 | |
74 | + | |
75 | +int90_test: | |
76 | + pushf | |
77 | + pop %dx | |
78 | + movb $0xff, %ah | |
79 | + int $0x21 | |
80 | + | |
81 | + movw %sp, %bx | |
82 | + movw 4(%bx), %dx | |
83 | + movb $0xff, %ah | |
84 | + int $0x21 | |
85 | + | |
86 | + movw $GET_OFFSET(int90_msg), %dx | |
87 | + movb $0x09, %ah | |
88 | + int $0x21 | |
89 | + iret | |
90 | + | |
91 | +int90_msg: | |
92 | + .string "INT90 started\n$" | |
93 | + | |
94 | +hello_world: | |
95 | + .string "Hello VM86 world\n$" | |
96 | + | |
97 | +IF_msg: | |
98 | + .string "VM86 IF test\n$" | |
99 | + | |
100 | +IF_msg1: | |
101 | + .string "If you see a diff here, your Linux kernel is buggy, please update to 2.4.20 kernel\n$" | |
102 | + | |
103 | +vm86_code_end: | |
104 | + | |
0 | 105 | \ No newline at end of file | ... | ... |
tests/test-i386.c
1 | +#define _GNU_SOURCE | |
1 | 2 | #include <stdlib.h> |
2 | 3 | #include <stdio.h> |
3 | 4 | #include <inttypes.h> |
4 | 5 | #include <math.h> |
6 | +#include <signal.h> | |
7 | +#include <setjmp.h> | |
8 | +#include <sys/ucontext.h> | |
9 | +#include <sys/mman.h> | |
10 | +#include <asm/vm86.h> | |
5 | 11 | |
6 | 12 | #define TEST_CMOV 0 |
7 | 13 | |
... | ... | @@ -913,6 +919,316 @@ void test_string(void) |
913 | 919 | TEST_STRING(cmps, "repnz "); |
914 | 920 | } |
915 | 921 | |
922 | +/* VM86 test */ | |
923 | + | |
924 | +static inline void set_bit(uint8_t *a, unsigned int bit) | |
925 | +{ | |
926 | + a[bit / 8] |= (1 << (bit % 8)); | |
927 | +} | |
928 | + | |
929 | +static inline uint8_t *seg_to_linear(unsigned int seg, unsigned int reg) | |
930 | +{ | |
931 | + return (uint8_t *)((seg << 4) + (reg & 0xffff)); | |
932 | +} | |
933 | + | |
934 | +static inline void pushw(struct vm86_regs *r, int val) | |
935 | +{ | |
936 | + r->esp = (r->esp & ~0xffff) | ((r->esp - 2) & 0xffff); | |
937 | + *(uint16_t *)seg_to_linear(r->ss, r->esp) = val; | |
938 | +} | |
939 | + | |
940 | +#undef __syscall_return | |
941 | +#define __syscall_return(type, res) \ | |
942 | +do { \ | |
943 | + return (type) (res); \ | |
944 | +} while (0) | |
945 | + | |
946 | +_syscall2(int, vm86, int, func, struct vm86plus_struct *, v86) | |
947 | + | |
948 | +extern char vm86_code_start; | |
949 | +extern char vm86_code_end; | |
950 | + | |
951 | +#define VM86_CODE_CS 0x100 | |
952 | +#define VM86_CODE_IP 0x100 | |
953 | + | |
954 | +void test_vm86(void) | |
955 | +{ | |
956 | + struct vm86plus_struct ctx; | |
957 | + struct vm86_regs *r; | |
958 | + uint8_t *vm86_mem; | |
959 | + int seg, ret; | |
960 | + | |
961 | + vm86_mem = mmap((void *)0x00000000, 0x110000, | |
962 | + PROT_WRITE | PROT_READ | PROT_EXEC, | |
963 | + MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); | |
964 | + if (vm86_mem == MAP_FAILED) { | |
965 | + printf("ERROR: could not map vm86 memory"); | |
966 | + return; | |
967 | + } | |
968 | + memset(&ctx, 0, sizeof(ctx)); | |
969 | + | |
970 | + /* init basic registers */ | |
971 | + r = &ctx.regs; | |
972 | + r->eip = VM86_CODE_IP; | |
973 | + r->esp = 0xfffe; | |
974 | + seg = VM86_CODE_CS; | |
975 | + r->cs = seg; | |
976 | + r->ss = seg; | |
977 | + r->ds = seg; | |
978 | + r->es = seg; | |
979 | + r->fs = seg; | |
980 | + r->gs = seg; | |
981 | + r->eflags = VIF_MASK; | |
982 | + | |
983 | + /* move code to proper address. We use the same layout as a .com | |
984 | + dos program. */ | |
985 | + memcpy(vm86_mem + (VM86_CODE_CS << 4) + VM86_CODE_IP, | |
986 | + &vm86_code_start, &vm86_code_end - &vm86_code_start); | |
987 | + | |
988 | + /* mark int 0x21 as being emulated */ | |
989 | + set_bit((uint8_t *)&ctx.int_revectored, 0x21); | |
990 | + | |
991 | + for(;;) { | |
992 | + ret = vm86(VM86_ENTER, &ctx); | |
993 | + switch(VM86_TYPE(ret)) { | |
994 | + case VM86_INTx: | |
995 | + { | |
996 | + int int_num, ah; | |
997 | + | |
998 | + int_num = VM86_ARG(ret); | |
999 | + if (int_num != 0x21) | |
1000 | + goto unknown_int; | |
1001 | + ah = (r->eax >> 8) & 0xff; | |
1002 | + switch(ah) { | |
1003 | + case 0x00: /* exit */ | |
1004 | + goto the_end; | |
1005 | + case 0x02: /* write char */ | |
1006 | + { | |
1007 | + uint8_t c = r->edx; | |
1008 | + putchar(c); | |
1009 | + } | |
1010 | + break; | |
1011 | + case 0x09: /* write string */ | |
1012 | + { | |
1013 | + uint8_t c, *ptr; | |
1014 | + ptr = seg_to_linear(r->ds, r->edx); | |
1015 | + for(;;) { | |
1016 | + c = *ptr++; | |
1017 | + if (c == '$') | |
1018 | + break; | |
1019 | + putchar(c); | |
1020 | + } | |
1021 | + r->eax = (r->eax & ~0xff) | '$'; | |
1022 | + } | |
1023 | + break; | |
1024 | + case 0xff: /* extension: write hex number in edx */ | |
1025 | + printf("%08x\n", (int)r->edx); | |
1026 | + break; | |
1027 | + default: | |
1028 | + unknown_int: | |
1029 | + printf("unsupported int 0x%02x\n", int_num); | |
1030 | + goto the_end; | |
1031 | + } | |
1032 | + } | |
1033 | + break; | |
1034 | + case VM86_SIGNAL: | |
1035 | + /* a signal came, we just ignore that */ | |
1036 | + break; | |
1037 | + case VM86_STI: | |
1038 | + break; | |
1039 | + default: | |
1040 | + printf("ERROR: unhandled vm86 return code (0x%x)\n", ret); | |
1041 | + goto the_end; | |
1042 | + } | |
1043 | + } | |
1044 | + the_end: | |
1045 | + printf("VM86 end\n"); | |
1046 | + munmap(vm86_mem, 0x110000); | |
1047 | +} | |
1048 | + | |
1049 | +/* exception tests */ | |
1050 | +#ifndef REG_EAX | |
1051 | +#define REG_EAX EAX | |
1052 | +#define REG_EBX EBX | |
1053 | +#define REG_ECX ECX | |
1054 | +#define REG_EDX EDX | |
1055 | +#define REG_ESI ESI | |
1056 | +#define REG_EDI EDI | |
1057 | +#define REG_EBP EBP | |
1058 | +#define REG_ESP ESP | |
1059 | +#define REG_EIP EIP | |
1060 | +#define REG_EFL EFL | |
1061 | +#define REG_TRAPNO TRAPNO | |
1062 | +#define REG_ERR ERR | |
1063 | +#endif | |
1064 | + | |
1065 | +jmp_buf jmp_env; | |
1066 | +int dump_eip; | |
1067 | +int dump_si_addr; | |
1068 | +int v1; | |
1069 | +int tab[2]; | |
1070 | + | |
1071 | +void sig_handler(int sig, siginfo_t *info, void *puc) | |
1072 | +{ | |
1073 | + struct ucontext *uc = puc; | |
1074 | + | |
1075 | + printf("si_signo=%d si_errno=%d si_code=%d", | |
1076 | + info->si_signo, info->si_errno, info->si_code); | |
1077 | + if (dump_si_addr) { | |
1078 | + printf(" si_addr=0x%08lx", | |
1079 | + (unsigned long)info->si_addr); | |
1080 | + } | |
1081 | + printf("\n"); | |
1082 | + | |
1083 | + printf("trapno=0x%02x err=0x%08x", | |
1084 | + uc->uc_mcontext.gregs[REG_TRAPNO], | |
1085 | + uc->uc_mcontext.gregs[REG_ERR]); | |
1086 | + if (dump_eip) | |
1087 | + printf(" EIP=0x%08x", uc->uc_mcontext.gregs[REG_EIP]); | |
1088 | + printf("\n"); | |
1089 | + longjmp(jmp_env, 1); | |
1090 | +} | |
1091 | + | |
1092 | +void test_exceptions(void) | |
1093 | +{ | |
1094 | + struct sigaction act; | |
1095 | + volatile int val; | |
1096 | + | |
1097 | + act.sa_sigaction = sig_handler; | |
1098 | + sigemptyset(&act.sa_mask); | |
1099 | + act.sa_flags = SA_SIGINFO; | |
1100 | + sigaction(SIGFPE, &act, NULL); | |
1101 | + sigaction(SIGILL, &act, NULL); | |
1102 | + sigaction(SIGSEGV, &act, NULL); | |
1103 | + sigaction(SIGTRAP, &act, NULL); | |
1104 | + | |
1105 | + /* test division by zero reporting */ | |
1106 | + dump_eip = 0; | |
1107 | + dump_si_addr = 0; | |
1108 | + printf("DIVZ exception (currently imprecise):\n"); | |
1109 | + if (setjmp(jmp_env) == 0) { | |
1110 | + /* now divide by zero */ | |
1111 | + v1 = 0; | |
1112 | + v1 = 2 / v1; | |
1113 | + } | |
1114 | + | |
1115 | + dump_si_addr = 1; | |
1116 | + printf("BOUND exception (currently imprecise):\n"); | |
1117 | + if (setjmp(jmp_env) == 0) { | |
1118 | + /* bound exception */ | |
1119 | + tab[0] = 1; | |
1120 | + tab[1] = 10; | |
1121 | + asm volatile ("bound %0, %1" : : "r" (11), "m" (tab)); | |
1122 | + } | |
1123 | + | |
1124 | + /* test SEGV reporting */ | |
1125 | + printf("PF exception (currently imprecise):\n"); | |
1126 | + if (setjmp(jmp_env) == 0) { | |
1127 | + /* now store in an invalid address */ | |
1128 | + *(char *)0x1234 = 1; | |
1129 | + } | |
1130 | + | |
1131 | + /* test SEGV reporting */ | |
1132 | + printf("PF exception (currently imprecise):\n"); | |
1133 | + if (setjmp(jmp_env) == 0) { | |
1134 | + /* read from an invalid address */ | |
1135 | + v1 = *(char *)0x1234; | |
1136 | + } | |
1137 | + | |
1138 | + printf("segment GPF exception (currently imprecise):\n"); | |
1139 | + if (setjmp(jmp_env) == 0) { | |
1140 | + /* load an invalid segment */ | |
1141 | + asm volatile ("movl %0, %%fs" : : "r" ((0x1234 << 3) | 0)); | |
1142 | + } | |
1143 | + | |
1144 | + dump_eip = 1; | |
1145 | + /* test illegal instruction reporting */ | |
1146 | + printf("UD2 exception:\n"); | |
1147 | + if (setjmp(jmp_env) == 0) { | |
1148 | + /* now execute an invalid instruction */ | |
1149 | + asm volatile("ud2"); | |
1150 | + } | |
1151 | + | |
1152 | + printf("INT exception:\n"); | |
1153 | + if (setjmp(jmp_env) == 0) { | |
1154 | + asm volatile ("int $0xfd"); | |
1155 | + } | |
1156 | + | |
1157 | + printf("INT3 exception:\n"); | |
1158 | + if (setjmp(jmp_env) == 0) { | |
1159 | + asm volatile ("int3"); | |
1160 | + } | |
1161 | + | |
1162 | + printf("CLI exception:\n"); | |
1163 | + if (setjmp(jmp_env) == 0) { | |
1164 | + asm volatile ("cli"); | |
1165 | + } | |
1166 | + | |
1167 | + printf("STI exception:\n"); | |
1168 | + if (setjmp(jmp_env) == 0) { | |
1169 | + asm volatile ("cli"); | |
1170 | + } | |
1171 | + | |
1172 | + printf("INTO exception:\n"); | |
1173 | + if (setjmp(jmp_env) == 0) { | |
1174 | + /* overflow exception */ | |
1175 | + asm volatile ("addl $1, %0 ; into" : : "r" (0x7fffffff)); | |
1176 | + } | |
1177 | + | |
1178 | + printf("OUTB exception:\n"); | |
1179 | + if (setjmp(jmp_env) == 0) { | |
1180 | + asm volatile ("outb %%al, %%dx" : : "d" (0x4321), "a" (0)); | |
1181 | + } | |
1182 | + | |
1183 | + printf("INB exception:\n"); | |
1184 | + if (setjmp(jmp_env) == 0) { | |
1185 | + asm volatile ("inb %%dx, %%al" : "=a" (val) : "d" (0x4321)); | |
1186 | + } | |
1187 | + | |
1188 | + printf("REP OUTSB exception:\n"); | |
1189 | + if (setjmp(jmp_env) == 0) { | |
1190 | + asm volatile ("rep outsb" : : "d" (0x4321), "S" (tab), "c" (1)); | |
1191 | + } | |
1192 | + | |
1193 | + printf("REP INSB exception:\n"); | |
1194 | + if (setjmp(jmp_env) == 0) { | |
1195 | + asm volatile ("rep insb" : : "d" (0x4321), "D" (tab), "c" (1)); | |
1196 | + } | |
1197 | + | |
1198 | + printf("HLT exception:\n"); | |
1199 | + if (setjmp(jmp_env) == 0) { | |
1200 | + asm volatile ("hlt"); | |
1201 | + } | |
1202 | + | |
1203 | + printf("single step exception:\n"); | |
1204 | + val = 0; | |
1205 | + if (setjmp(jmp_env) == 0) { | |
1206 | + asm volatile ("pushf\n" | |
1207 | + "orl $0x00100, (%%esp)\n" | |
1208 | + "popf\n" | |
1209 | + "movl $0xabcd, %0\n" | |
1210 | + "movl $0x0, %0\n" : "=m" (val) : : "cc", "memory"); | |
1211 | + } | |
1212 | + printf("val=0x%x\n", val); | |
1213 | +} | |
1214 | + | |
1215 | +/* self modifying code test */ | |
1216 | +uint8_t code[] = { | |
1217 | + 0xb8, 0x1, 0x00, 0x00, 0x00, /* movl $1, %eax */ | |
1218 | + 0xc3, /* ret */ | |
1219 | +}; | |
1220 | + | |
1221 | +void test_self_modifying_code(void) | |
1222 | +{ | |
1223 | + int (*func)(void); | |
1224 | + | |
1225 | + func = (void *)code; | |
1226 | + printf("self modifying code:\n"); | |
1227 | + printf("func1 = 0x%x\n", func()); | |
1228 | + code[1] = 0x2; | |
1229 | + printf("func1 = 0x%x\n", func()); | |
1230 | +} | |
1231 | + | |
916 | 1232 | static void *call_end __init_call = NULL; |
917 | 1233 | |
918 | 1234 | int main(int argc, char **argv) |
... | ... | @@ -936,5 +1252,8 @@ int main(int argc, char **argv) |
936 | 1252 | test_lea(); |
937 | 1253 | test_segs(); |
938 | 1254 | test_code16(); |
1255 | + test_vm86(); | |
1256 | + test_exceptions(); | |
1257 | + test_self_modifying_code(); | |
939 | 1258 | return 0; |
940 | 1259 | } | ... | ... |