Commit 54936004fddc52c321cb3f9a9a51140e782bed5d
1 parent
74c95119
mmap emulation
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@158 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
9 changed files
with
646 additions
and
101 deletions
Makefile
... | ... | @@ -58,11 +58,11 @@ LDFLAGS+=-p |
58 | 58 | main.o: CFLAGS+=-p |
59 | 59 | endif |
60 | 60 | |
61 | -OBJS= elfload.o main.o syscall.o signal.o vm86.o path.o | |
61 | +OBJS= elfload.o main.o syscall.o mmap.o signal.o vm86.o path.o | |
62 | 62 | SRCS:= $(OBJS:.o=.c) |
63 | 63 | OBJS+= libqemu.a |
64 | 64 | |
65 | -LIBOBJS+=thunk.o translate-i386.o op-i386.o exec-i386.o | |
65 | +LIBOBJS+=thunk.o translate-i386.o op-i386.o exec-i386.o exec.o | |
66 | 66 | # NOTE: the disassembler code is only needed for debugging |
67 | 67 | LIBOBJS+=disas.o ppc-dis.o i386-dis.o alpha-dis.o dis-buf.o |
68 | 68 | ... | ... |
cpu-i386.h
... | ... | @@ -431,6 +431,30 @@ int cpu_x86_signal_handler(int host_signum, struct siginfo *info, |
431 | 431 | #define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */ |
432 | 432 | void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags); |
433 | 433 | |
434 | +/* page related stuff */ | |
435 | +#define TARGET_PAGE_BITS 12 | |
436 | +#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) | |
437 | +#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) | |
438 | +#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) | |
439 | + | |
440 | +extern unsigned long real_host_page_size; | |
441 | +extern unsigned long host_page_bits; | |
442 | +extern unsigned long host_page_size; | |
443 | +extern unsigned long host_page_mask; | |
444 | + | |
445 | +#define HOST_PAGE_ALIGN(addr) (((addr) + host_page_size - 1) & host_page_mask) | |
446 | + | |
447 | +/* same as PROT_xxx */ | |
448 | +#define PAGE_READ 0x0001 | |
449 | +#define PAGE_WRITE 0x0002 | |
450 | +#define PAGE_EXEC 0x0004 | |
451 | +#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC) | |
452 | +#define PAGE_VALID 0x0008 | |
453 | + | |
454 | +void page_dump(FILE *f); | |
455 | +int page_get_flags(unsigned long address); | |
456 | +void page_set_flags(unsigned long start, unsigned long end, int flags); | |
457 | + | |
434 | 458 | /* internal functions */ |
435 | 459 | |
436 | 460 | #define GEN_FLAG_CODE32_SHIFT 0 |
... | ... | @@ -446,5 +470,6 @@ int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size, |
446 | 470 | int *gen_code_size_ptr, |
447 | 471 | uint8_t *pc_start, uint8_t *cs_base, int flags); |
448 | 472 | void cpu_x86_tblocks_init(void); |
473 | +void page_init(void); | |
449 | 474 | |
450 | 475 | #endif /* CPU_I386_H */ | ... | ... |
exec.c
0 → 100644
1 | +/* | |
2 | + * virtual page mapping | |
3 | + * | |
4 | + * Copyright (c) 2003 Fabrice Bellard | |
5 | + * | |
6 | + * This library is free software; you can redistribute it and/or | |
7 | + * modify it under the terms of the GNU Lesser General Public | |
8 | + * License as published by the Free Software Foundation; either | |
9 | + * version 2 of the License, or (at your option) any later version. | |
10 | + * | |
11 | + * This library is distributed in the hope that it will be useful, | |
12 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | + * Lesser General Public License for more details. | |
15 | + * | |
16 | + * You should have received a copy of the GNU Lesser General Public | |
17 | + * License along with this library; if not, write to the Free Software | |
18 | + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | + */ | |
20 | +#include <stdlib.h> | |
21 | +#include <stdio.h> | |
22 | +#include <stdarg.h> | |
23 | +#include <string.h> | |
24 | +#include <errno.h> | |
25 | +#include <unistd.h> | |
26 | +#include <inttypes.h> | |
27 | + | |
28 | +#include "cpu-i386.h" | |
29 | + | |
30 | +/* XXX: pack the flags in the low bits of the pointer ? */ | |
31 | +typedef struct PageDesc { | |
32 | + struct TranslationBlock *first_tb; | |
33 | + unsigned long flags; | |
34 | +} PageDesc; | |
35 | + | |
36 | +#define L2_BITS 10 | |
37 | +#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) | |
38 | + | |
39 | +#define L1_SIZE (1 << L1_BITS) | |
40 | +#define L2_SIZE (1 << L2_BITS) | |
41 | + | |
42 | +unsigned long real_host_page_size; | |
43 | +unsigned long host_page_bits; | |
44 | +unsigned long host_page_size; | |
45 | +unsigned long host_page_mask; | |
46 | + | |
47 | +static PageDesc *l1_map[L1_SIZE]; | |
48 | + | |
49 | +void page_init(void) | |
50 | +{ | |
51 | + /* NOTE: we can always suppose that host_page_size >= | |
52 | + TARGET_PAGE_SIZE */ | |
53 | + real_host_page_size = getpagesize(); | |
54 | + if (host_page_size == 0) | |
55 | + host_page_size = real_host_page_size; | |
56 | + if (host_page_size < TARGET_PAGE_SIZE) | |
57 | + host_page_size = TARGET_PAGE_SIZE; | |
58 | + host_page_bits = 0; | |
59 | + while ((1 << host_page_bits) < host_page_size) | |
60 | + host_page_bits++; | |
61 | + host_page_mask = ~(host_page_size - 1); | |
62 | +} | |
63 | + | |
64 | +/* dump memory mappings */ | |
65 | +void page_dump(FILE *f) | |
66 | +{ | |
67 | + unsigned long start, end; | |
68 | + int i, j, prot, prot1; | |
69 | + PageDesc *p; | |
70 | + | |
71 | + fprintf(f, "%-8s %-8s %-8s %s\n", | |
72 | + "start", "end", "size", "prot"); | |
73 | + start = -1; | |
74 | + end = -1; | |
75 | + prot = 0; | |
76 | + for(i = 0; i <= L1_SIZE; i++) { | |
77 | + if (i < L1_SIZE) | |
78 | + p = l1_map[i]; | |
79 | + else | |
80 | + p = NULL; | |
81 | + for(j = 0;j < L2_SIZE; j++) { | |
82 | + if (!p) | |
83 | + prot1 = 0; | |
84 | + else | |
85 | + prot1 = p[j].flags; | |
86 | + if (prot1 != prot) { | |
87 | + end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); | |
88 | + if (start != -1) { | |
89 | + fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", | |
90 | + start, end, end - start, | |
91 | + prot & PAGE_READ ? 'r' : '-', | |
92 | + prot & PAGE_WRITE ? 'w' : '-', | |
93 | + prot & PAGE_EXEC ? 'x' : '-'); | |
94 | + } | |
95 | + if (prot1 != 0) | |
96 | + start = end; | |
97 | + else | |
98 | + start = -1; | |
99 | + prot = prot1; | |
100 | + } | |
101 | + if (!p) | |
102 | + break; | |
103 | + } | |
104 | + } | |
105 | +} | |
106 | + | |
107 | + | |
108 | +static inline PageDesc *page_find_alloc(unsigned long address) | |
109 | +{ | |
110 | + unsigned int index; | |
111 | + PageDesc **lp, *p; | |
112 | + | |
113 | + index = address >> TARGET_PAGE_BITS; | |
114 | + lp = &l1_map[index >> L2_BITS]; | |
115 | + p = *lp; | |
116 | + if (!p) { | |
117 | + /* allocate if not found */ | |
118 | + p = malloc(sizeof(PageDesc) * L2_SIZE); | |
119 | + memset(p, 0, sizeof(sizeof(PageDesc) * L2_SIZE)); | |
120 | + *lp = p; | |
121 | + } | |
122 | + return p + (index & (L2_SIZE - 1)); | |
123 | +} | |
124 | + | |
125 | +int page_get_flags(unsigned long address) | |
126 | +{ | |
127 | + unsigned int index; | |
128 | + PageDesc *p; | |
129 | + | |
130 | + index = address >> TARGET_PAGE_BITS; | |
131 | + p = l1_map[index >> L2_BITS]; | |
132 | + if (!p) | |
133 | + return 0; | |
134 | + return p[index & (L2_SIZE - 1)].flags; | |
135 | +} | |
136 | + | |
137 | +void page_set_flags(unsigned long start, unsigned long end, int flags) | |
138 | +{ | |
139 | + PageDesc *p; | |
140 | + unsigned long addr; | |
141 | + | |
142 | + start = start & TARGET_PAGE_MASK; | |
143 | + end = TARGET_PAGE_ALIGN(end); | |
144 | + for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
145 | + p = page_find_alloc(addr); | |
146 | + p->flags = flags; | |
147 | + } | |
148 | +} | ... | ... |
linux-user/elfload.c
... | ... | @@ -95,8 +95,6 @@ struct exec |
95 | 95 | #define ZMAGIC 0413 |
96 | 96 | #define QMAGIC 0314 |
97 | 97 | |
98 | -#define X86_STACK_TOP 0x7d000000 | |
99 | - | |
100 | 98 | /* max code+data+bss space allocated to elf interpreter */ |
101 | 99 | #define INTERP_MAP_SIZE (32 * 1024 * 1024) |
102 | 100 | |
... | ... | @@ -123,23 +121,11 @@ struct exec |
123 | 121 | #define PER_XENIX (0x0007 | STICKY_TIMEOUTS) |
124 | 122 | |
125 | 123 | /* Necessary parameters */ |
126 | -#define ALPHA_PAGE_SIZE 4096 | |
127 | -#define X86_PAGE_SIZE 4096 | |
128 | - | |
129 | -#define ALPHA_PAGE_MASK (~(ALPHA_PAGE_SIZE-1)) | |
130 | -#define X86_PAGE_MASK (~(X86_PAGE_SIZE-1)) | |
131 | - | |
132 | -#define ALPHA_PAGE_ALIGN(addr) ((((addr)+ALPHA_PAGE_SIZE)-1)&ALPHA_PAGE_MASK) | |
133 | -#define X86_PAGE_ALIGN(addr) ((((addr)+X86_PAGE_SIZE)-1)&X86_PAGE_MASK) | |
134 | - | |
135 | 124 | #define NGROUPS 32 |
136 | 125 | |
137 | -#define X86_ELF_EXEC_PAGESIZE X86_PAGE_SIZE | |
138 | -#define X86_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(X86_ELF_EXEC_PAGESIZE-1)) | |
139 | -#define X86_ELF_PAGEOFFSET(_v) ((_v) & (X86_ELF_EXEC_PAGESIZE-1)) | |
140 | - | |
141 | -#define ALPHA_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ALPHA_PAGE_SIZE-1)) | |
142 | -#define ALPHA_ELF_PAGEOFFSET(_v) ((_v) & (ALPHA_PAGE_SIZE-1)) | |
126 | +#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE | |
127 | +#define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1)) | |
128 | +#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1)) | |
143 | 129 | |
144 | 130 | #define INTERPRETER_NONE 0 |
145 | 131 | #define INTERPRETER_AOUT 1 |
... | ... | @@ -160,9 +146,6 @@ static inline void memcpy_tofs(void * to, const void * from, unsigned long n) |
160 | 146 | memcpy(to, from, n); |
161 | 147 | } |
162 | 148 | |
163 | -//extern void * mmap4k(); | |
164 | -#define mmap4k(a, b, c, d, e, f) mmap((void *)(a), b, c, d, e, f) | |
165 | - | |
166 | 149 | extern unsigned long x86_stack_size; |
167 | 150 | |
168 | 151 | static int load_aout_interp(void * exptr, int interp_fd); |
... | ... | @@ -227,8 +210,8 @@ static void * get_free_page(void) |
227 | 210 | /* User-space version of kernel get_free_page. Returns a page-aligned |
228 | 211 | * page-sized chunk of memory. |
229 | 212 | */ |
230 | - retval = mmap4k(0, ALPHA_PAGE_SIZE, PROT_READ|PROT_WRITE, | |
231 | - MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); | |
213 | + retval = (void *)target_mmap(0, host_page_size, PROT_READ|PROT_WRITE, | |
214 | + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); | |
232 | 215 | |
233 | 216 | if((long)retval == -1) { |
234 | 217 | perror("get_free_page"); |
... | ... | @@ -241,7 +224,7 @@ static void * get_free_page(void) |
241 | 224 | |
242 | 225 | static void free_page(void * pageaddr) |
243 | 226 | { |
244 | - (void)munmap(pageaddr, ALPHA_PAGE_SIZE); | |
227 | + target_munmap((unsigned long)pageaddr, host_page_size); | |
245 | 228 | } |
246 | 229 | |
247 | 230 | /* |
... | ... | @@ -272,9 +255,9 @@ static unsigned long copy_strings(int argc,char ** argv,unsigned long *page, |
272 | 255 | while (len) { |
273 | 256 | --p; --tmp; --len; |
274 | 257 | if (--offset < 0) { |
275 | - offset = p % X86_PAGE_SIZE; | |
276 | - if (!(pag = (char *) page[p/X86_PAGE_SIZE]) && | |
277 | - !(pag = (char *) page[p/X86_PAGE_SIZE] = | |
258 | + offset = p % TARGET_PAGE_SIZE; | |
259 | + if (!(pag = (char *) page[p/TARGET_PAGE_SIZE]) && | |
260 | + !(pag = (char *) page[p/TARGET_PAGE_SIZE] = | |
278 | 261 | (unsigned long *) get_free_page())) { |
279 | 262 | return 0; |
280 | 263 | } |
... | ... | @@ -390,21 +373,21 @@ unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm, |
390 | 373 | * it for args, we'll use it for something else... |
391 | 374 | */ |
392 | 375 | size = x86_stack_size; |
393 | - if (size < MAX_ARG_PAGES*X86_PAGE_SIZE) | |
394 | - size = MAX_ARG_PAGES*X86_PAGE_SIZE; | |
395 | - error = (unsigned long)mmap4k(NULL, | |
396 | - size + X86_PAGE_SIZE, | |
397 | - PROT_READ | PROT_WRITE, | |
398 | - MAP_PRIVATE | MAP_ANONYMOUS, | |
399 | - -1, 0); | |
376 | + if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) | |
377 | + size = MAX_ARG_PAGES*TARGET_PAGE_SIZE; | |
378 | + error = target_mmap(0, | |
379 | + size + host_page_size, | |
380 | + PROT_READ | PROT_WRITE, | |
381 | + MAP_PRIVATE | MAP_ANONYMOUS, | |
382 | + -1, 0); | |
400 | 383 | if (error == -1) { |
401 | 384 | perror("stk mmap"); |
402 | 385 | exit(-1); |
403 | 386 | } |
404 | 387 | /* we reserve one extra page at the top of the stack as guard */ |
405 | - mprotect((void *)(error + size), X86_PAGE_SIZE, PROT_NONE); | |
388 | + target_mprotect(error + size, host_page_size, PROT_NONE); | |
406 | 389 | |
407 | - stack_base = error + size - MAX_ARG_PAGES*X86_PAGE_SIZE; | |
390 | + stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE; | |
408 | 391 | p += stack_base; |
409 | 392 | |
410 | 393 | if (bprm->loader) { |
... | ... | @@ -416,10 +399,10 @@ unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm, |
416 | 399 | if (bprm->page[i]) { |
417 | 400 | info->rss++; |
418 | 401 | |
419 | - memcpy((void *)stack_base, (void *)bprm->page[i], X86_PAGE_SIZE); | |
402 | + memcpy((void *)stack_base, (void *)bprm->page[i], TARGET_PAGE_SIZE); | |
420 | 403 | free_page((void *)bprm->page[i]); |
421 | 404 | } |
422 | - stack_base += X86_PAGE_SIZE; | |
405 | + stack_base += TARGET_PAGE_SIZE; | |
423 | 406 | } |
424 | 407 | return p; |
425 | 408 | } |
... | ... | @@ -427,13 +410,13 @@ unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm, |
427 | 410 | static void set_brk(unsigned long start, unsigned long end) |
428 | 411 | { |
429 | 412 | /* page-align the start and end addresses... */ |
430 | - start = ALPHA_PAGE_ALIGN(start); | |
431 | - end = ALPHA_PAGE_ALIGN(end); | |
413 | + start = HOST_PAGE_ALIGN(start); | |
414 | + end = HOST_PAGE_ALIGN(end); | |
432 | 415 | if (end <= start) |
433 | 416 | return; |
434 | - if((long)mmap4k(start, end - start, | |
435 | - PROT_READ | PROT_WRITE | PROT_EXEC, | |
436 | - MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) { | |
417 | + if(target_mmap(start, end - start, | |
418 | + PROT_READ | PROT_WRITE | PROT_EXEC, | |
419 | + MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) { | |
437 | 420 | perror("cannot mmap brk"); |
438 | 421 | exit(-1); |
439 | 422 | } |
... | ... | @@ -451,9 +434,9 @@ static void padzero(unsigned long elf_bss) |
451 | 434 | unsigned long nbyte; |
452 | 435 | char * fpnt; |
453 | 436 | |
454 | - nbyte = elf_bss & (ALPHA_PAGE_SIZE-1); /* was X86_PAGE_SIZE - JRP */ | |
437 | + nbyte = elf_bss & (host_page_size-1); /* was TARGET_PAGE_SIZE - JRP */ | |
455 | 438 | if (nbyte) { |
456 | - nbyte = ALPHA_PAGE_SIZE - nbyte; | |
439 | + nbyte = host_page_size - nbyte; | |
457 | 440 | fpnt = (char *) elf_bss; |
458 | 441 | do { |
459 | 442 | *fpnt++ = 0; |
... | ... | @@ -494,7 +477,7 @@ static unsigned int * create_elf_tables(char *p, int argc, int envc, |
494 | 477 | NEW_AUX_ENT (AT_PHDR, (target_ulong)(load_addr + exec->e_phoff)); |
495 | 478 | NEW_AUX_ENT (AT_PHENT, (target_ulong)(sizeof (struct elf_phdr))); |
496 | 479 | NEW_AUX_ENT (AT_PHNUM, (target_ulong)(exec->e_phnum)); |
497 | - NEW_AUX_ENT (AT_PAGESZ, (target_ulong)(ALPHA_PAGE_SIZE)); | |
480 | + NEW_AUX_ENT (AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE)); | |
498 | 481 | NEW_AUX_ENT (AT_BASE, (target_ulong)(interp_load_addr)); |
499 | 482 | NEW_AUX_ENT (AT_FLAGS, (target_ulong)0); |
500 | 483 | NEW_AUX_ENT (AT_ENTRY, load_bias + exec->e_entry); |
... | ... | @@ -554,7 +537,7 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex, |
554 | 537 | |
555 | 538 | /* Now read in all of the header information */ |
556 | 539 | |
557 | - if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > X86_PAGE_SIZE) | |
540 | + if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE) | |
558 | 541 | return ~0UL; |
559 | 542 | |
560 | 543 | elf_phdata = (struct elf_phdr *) |
... | ... | @@ -594,9 +577,9 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex, |
594 | 577 | if (interp_elf_ex->e_type == ET_DYN) { |
595 | 578 | /* in order to avoid harcoding the interpreter load |
596 | 579 | address in qemu, we allocate a big enough memory zone */ |
597 | - error = (unsigned long)mmap4k(NULL, INTERP_MAP_SIZE, | |
598 | - PROT_NONE, MAP_PRIVATE | MAP_ANON, | |
599 | - -1, 0); | |
580 | + error = target_mmap(0, INTERP_MAP_SIZE, | |
581 | + PROT_NONE, MAP_PRIVATE | MAP_ANON, | |
582 | + -1, 0); | |
600 | 583 | if (error == -1) { |
601 | 584 | perror("mmap"); |
602 | 585 | exit(-1); |
... | ... | @@ -620,12 +603,12 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex, |
620 | 603 | elf_type |= MAP_FIXED; |
621 | 604 | vaddr = eppnt->p_vaddr; |
622 | 605 | } |
623 | - error = (unsigned long)mmap4k(load_addr+X86_ELF_PAGESTART(vaddr), | |
624 | - eppnt->p_filesz + X86_ELF_PAGEOFFSET(eppnt->p_vaddr), | |
606 | + error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr), | |
607 | + eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr), | |
625 | 608 | elf_prot, |
626 | 609 | elf_type, |
627 | 610 | interpreter_fd, |
628 | - eppnt->p_offset - X86_ELF_PAGEOFFSET(eppnt->p_vaddr)); | |
611 | + eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr)); | |
629 | 612 | |
630 | 613 | if (error > -1024UL) { |
631 | 614 | /* Real error */ |
... | ... | @@ -665,13 +648,13 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex, |
665 | 648 | * bss page. |
666 | 649 | */ |
667 | 650 | padzero(elf_bss); |
668 | - elf_bss = X86_ELF_PAGESTART(elf_bss + ALPHA_PAGE_SIZE - 1); /* What we have mapped so far */ | |
651 | + elf_bss = TARGET_ELF_PAGESTART(elf_bss + host_page_size - 1); /* What we have mapped so far */ | |
669 | 652 | |
670 | 653 | /* Map the last of the bss segment */ |
671 | 654 | if (last_bss > elf_bss) { |
672 | - mmap4k(elf_bss, last_bss-elf_bss, | |
673 | - PROT_READ|PROT_WRITE|PROT_EXEC, | |
674 | - MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); | |
655 | + target_mmap(elf_bss, last_bss-elf_bss, | |
656 | + PROT_READ|PROT_WRITE|PROT_EXEC, | |
657 | + MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); | |
675 | 658 | } |
676 | 659 | free(elf_phdata); |
677 | 660 | |
... | ... | @@ -742,7 +725,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * r |
742 | 725 | unsigned int interpreter_type = INTERPRETER_NONE; |
743 | 726 | unsigned char ibcs2_interpreter; |
744 | 727 | int i; |
745 | - void * mapped_addr; | |
728 | + unsigned long mapped_addr; | |
746 | 729 | struct elf_phdr * elf_ppnt; |
747 | 730 | struct elf_phdr *elf_phdata; |
748 | 731 | unsigned long elf_bss, k, elf_brk; |
... | ... | @@ -979,33 +962,32 @@ static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * r |
979 | 962 | is because the brk will follow the loader, and is not movable. */ |
980 | 963 | /* NOTE: for qemu, we do a big mmap to get enough space |
981 | 964 | without harcoding any address */ |
982 | - error = (unsigned long)mmap4k(NULL, ET_DYN_MAP_SIZE, | |
983 | - PROT_NONE, MAP_PRIVATE | MAP_ANON, | |
984 | - -1, 0); | |
965 | + error = target_mmap(0, ET_DYN_MAP_SIZE, | |
966 | + PROT_NONE, MAP_PRIVATE | MAP_ANON, | |
967 | + -1, 0); | |
985 | 968 | if (error == -1) { |
986 | 969 | perror("mmap"); |
987 | 970 | exit(-1); |
988 | 971 | } |
989 | - load_bias = X86_ELF_PAGESTART(error - elf_ppnt->p_vaddr); | |
972 | + load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr); | |
990 | 973 | } |
991 | 974 | |
992 | - error = (unsigned long)mmap4k( | |
993 | - X86_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr), | |
994 | - (elf_ppnt->p_filesz + | |
995 | - X86_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)), | |
996 | - elf_prot, | |
997 | - (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE), | |
998 | - bprm->fd, | |
999 | - (elf_ppnt->p_offset - | |
1000 | - X86_ELF_PAGEOFFSET(elf_ppnt->p_vaddr))); | |
975 | + error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr), | |
976 | + (elf_ppnt->p_filesz + | |
977 | + TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)), | |
978 | + elf_prot, | |
979 | + (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE), | |
980 | + bprm->fd, | |
981 | + (elf_ppnt->p_offset - | |
982 | + TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr))); | |
1001 | 983 | if (error == -1) { |
1002 | 984 | perror("mmap"); |
1003 | 985 | exit(-1); |
1004 | 986 | } |
1005 | 987 | |
1006 | 988 | #ifdef LOW_ELF_STACK |
1007 | - if (X86_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack) | |
1008 | - elf_stack = X86_ELF_PAGESTART(elf_ppnt->p_vaddr); | |
989 | + if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack) | |
990 | + elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr); | |
1009 | 991 | #endif |
1010 | 992 | |
1011 | 993 | if (!load_addr_set) { |
... | ... | @@ -1013,7 +995,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * r |
1013 | 995 | load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset; |
1014 | 996 | if (elf_ex.e_type == ET_DYN) { |
1015 | 997 | load_bias += error - |
1016 | - X86_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr); | |
998 | + TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr); | |
1017 | 999 | load_addr += load_bias; |
1018 | 1000 | } |
1019 | 1001 | } |
... | ... | @@ -1108,8 +1090,8 @@ static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * r |
1108 | 1090 | and some applications "depend" upon this behavior. |
1109 | 1091 | Since we do not have the power to recompile these, we |
1110 | 1092 | emulate the SVr4 behavior. Sigh. */ |
1111 | - mapped_addr = mmap4k(NULL, ALPHA_PAGE_SIZE, PROT_READ | PROT_EXEC, | |
1112 | - MAP_FIXED | MAP_PRIVATE, -1, 0); | |
1093 | + mapped_addr = target_mmap(0, host_page_size, PROT_READ | PROT_EXEC, | |
1094 | + MAP_FIXED | MAP_PRIVATE, -1, 0); | |
1113 | 1095 | } |
1114 | 1096 | |
1115 | 1097 | #ifdef ELF_PLAT_INIT |
... | ... | @@ -1137,7 +1119,7 @@ int elf_exec(const char * filename, char ** argv, char ** envp, |
1137 | 1119 | int retval; |
1138 | 1120 | int i; |
1139 | 1121 | |
1140 | - bprm.p = X86_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int); | |
1122 | + bprm.p = TARGET_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int); | |
1141 | 1123 | for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */ |
1142 | 1124 | bprm.page[i] = 0; |
1143 | 1125 | retval = open(filename, O_RDONLY); | ... | ... |
linux-user/main.c
... | ... | @@ -231,13 +231,16 @@ void usage(void) |
231 | 231 | "usage: qemu [-h] [-d] [-L path] [-s size] program [arguments...]\n" |
232 | 232 | "Linux x86 emulator\n" |
233 | 233 | "\n" |
234 | - "-h print this help\n" | |
235 | - "-d activate log (logfile=%s)\n" | |
236 | - "-L path set the x86 elf interpreter prefix (default=%s)\n" | |
237 | - "-s size set the x86 stack size in bytes (default=%ld)\n", | |
238 | - DEBUG_LOGFILE, | |
234 | + "-h print this help\n" | |
235 | + "-L path set the x86 elf interpreter prefix (default=%s)\n" | |
236 | + "-s size set the x86 stack size in bytes (default=%ld)\n" | |
237 | + "\n" | |
238 | + "debug options:\n" | |
239 | + "-d activate log (logfile=%s)\n" | |
240 | + "-p pagesize set the host page size to 'pagesize'\n", | |
239 | 241 | interp_prefix, |
240 | - x86_stack_size); | |
242 | + x86_stack_size, | |
243 | + DEBUG_LOGFILE); | |
241 | 244 | _exit(1); |
242 | 245 | } |
243 | 246 | |
... | ... | @@ -284,6 +287,13 @@ int main(int argc, char **argv) |
284 | 287 | x86_stack_size *= 1024; |
285 | 288 | } else if (!strcmp(r, "L")) { |
286 | 289 | interp_prefix = argv[optind++]; |
290 | + } else if (!strcmp(r, "p")) { | |
291 | + host_page_size = atoi(argv[optind++]); | |
292 | + if (host_page_size == 0 || | |
293 | + (host_page_size & (host_page_size - 1)) != 0) { | |
294 | + fprintf(stderr, "page size must be a power of two\n"); | |
295 | + exit(1); | |
296 | + } | |
287 | 297 | } else { |
288 | 298 | usage(); |
289 | 299 | } |
... | ... | @@ -311,12 +321,18 @@ int main(int argc, char **argv) |
311 | 321 | /* Scan interp_prefix dir for replacement files. */ |
312 | 322 | init_paths(interp_prefix); |
313 | 323 | |
324 | + /* NOTE: we need to init the CPU at this stage to get the | |
325 | + host_page_size */ | |
326 | + env = cpu_x86_init(); | |
327 | + | |
314 | 328 | if (elf_exec(filename, argv+optind, environ, regs, info) != 0) { |
315 | 329 | printf("Error loading %s\n", filename); |
316 | 330 | _exit(1); |
317 | 331 | } |
318 | 332 | |
319 | 333 | if (loglevel) { |
334 | + page_dump(logfile); | |
335 | + | |
320 | 336 | fprintf(logfile, "start_brk 0x%08lx\n" , info->start_brk); |
321 | 337 | fprintf(logfile, "end_code 0x%08lx\n" , info->end_code); |
322 | 338 | fprintf(logfile, "start_code 0x%08lx\n" , info->start_code); |
... | ... | @@ -331,7 +347,6 @@ int main(int argc, char **argv) |
331 | 347 | syscall_init(); |
332 | 348 | signal_init(); |
333 | 349 | |
334 | - env = cpu_x86_init(); | |
335 | 350 | global_env = env; |
336 | 351 | |
337 | 352 | /* build Task State */ | ... | ... |
linux-user/mmap.c
0 → 100644
1 | +/* | |
2 | + * mmap support for qemu | |
3 | + * | |
4 | + * Copyright (c) 2003 Fabrice Bellard | |
5 | + * | |
6 | + * This program is free software; you can redistribute it and/or modify | |
7 | + * it under the terms of the GNU General Public License as published by | |
8 | + * the Free Software Foundation; either version 2 of the License, or | |
9 | + * (at your option) any later version. | |
10 | + * | |
11 | + * This program is distributed in the hope that it will be useful, | |
12 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | + * GNU General Public License for more details. | |
15 | + * | |
16 | + * You should have received a copy of the GNU General Public License | |
17 | + * along with this program; if not, write to the Free Software | |
18 | + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
19 | + */ | |
20 | +#include <stdlib.h> | |
21 | +#include <stdio.h> | |
22 | +#include <stdarg.h> | |
23 | +#include <string.h> | |
24 | +#include <unistd.h> | |
25 | +#include <errno.h> | |
26 | +#include <sys/mman.h> | |
27 | + | |
28 | +#include "qemu.h" | |
29 | + | |
30 | +//#define DEBUG_MMAP | |
31 | + | |
32 | +/* NOTE: all the constants are the HOST ones */ | |
33 | +int target_mprotect(unsigned long start, unsigned long len, int prot) | |
34 | +{ | |
35 | + unsigned long end, host_start, host_end, addr; | |
36 | + int prot1, ret; | |
37 | + | |
38 | +#ifdef DEBUG_MMAP | |
39 | + printf("mprotect: start=0x%lx len=0x%lx prot=%c%c%c\n", start, len, | |
40 | + prot & PROT_READ ? 'r' : '-', | |
41 | + prot & PROT_WRITE ? 'w' : '-', | |
42 | + prot & PROT_EXEC ? 'x' : '-'); | |
43 | +#endif | |
44 | + | |
45 | + if ((start & ~TARGET_PAGE_MASK) != 0) | |
46 | + return -EINVAL; | |
47 | + len = TARGET_PAGE_ALIGN(len); | |
48 | + end = start + len; | |
49 | + if (end < start) | |
50 | + return -EINVAL; | |
51 | + if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) | |
52 | + return -EINVAL; | |
53 | + if (len == 0) | |
54 | + return 0; | |
55 | + | |
56 | + host_start = start & host_page_mask; | |
57 | + host_end = HOST_PAGE_ALIGN(end); | |
58 | + if (start > host_start) { | |
59 | + /* handle host page containing start */ | |
60 | + prot1 = prot; | |
61 | + for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) { | |
62 | + prot1 |= page_get_flags(addr); | |
63 | + } | |
64 | + ret = mprotect((void *)host_start, host_page_size, prot1 & PAGE_BITS); | |
65 | + if (ret != 0) | |
66 | + return ret; | |
67 | + host_start += host_page_size; | |
68 | + } | |
69 | + if (end < host_end) { | |
70 | + /* handle host page containing end (can be the same as first page) */ | |
71 | + prot1 = prot; | |
72 | + for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) { | |
73 | + prot1 |= page_get_flags(addr); | |
74 | + } | |
75 | + ret = mprotect((void *)(host_end - host_page_size), host_page_size, | |
76 | + prot1 & PAGE_BITS); | |
77 | + if (ret != 0) | |
78 | + return ret; | |
79 | + host_end -= host_page_size; | |
80 | + } | |
81 | + | |
82 | + /* handle the pages in the middle */ | |
83 | + if (host_start < host_end) { | |
84 | + ret = mprotect((void *)host_start, host_end - host_start, prot); | |
85 | + if (ret != 0) | |
86 | + return ret; | |
87 | + } | |
88 | + | |
89 | + page_set_flags(start, start + len, prot | PAGE_VALID); | |
90 | + return 0; | |
91 | +} | |
92 | + | |
93 | +/* map an incomplete host page */ | |
94 | +int mmap_frag(unsigned long host_start, | |
95 | + unsigned long start, unsigned long end, | |
96 | + int prot, int flags, int fd, unsigned long offset) | |
97 | +{ | |
98 | + unsigned long host_end, ret, addr; | |
99 | + int prot1, prot_new; | |
100 | + | |
101 | + host_end = host_start + host_page_size; | |
102 | + | |
103 | + /* get the protection of the target pages outside the mapping */ | |
104 | + prot1 = 0; | |
105 | + for(addr = host_start; addr < host_end; addr++) { | |
106 | + if (addr < start || addr >= end) | |
107 | + prot1 |= page_get_flags(addr); | |
108 | + } | |
109 | + | |
110 | + if (prot1 == 0) { | |
111 | + /* no page was there, so we allocate one */ | |
112 | + ret = (long)mmap((void *)host_start, host_page_size, prot, | |
113 | + flags | MAP_ANONYMOUS, -1, 0); | |
114 | + if (ret == -1) | |
115 | + return ret; | |
116 | + } | |
117 | + prot1 &= PAGE_BITS; | |
118 | + | |
119 | + prot_new = prot | prot1; | |
120 | + if (!(flags & MAP_ANONYMOUS)) { | |
121 | + /* msync() won't work here, so we return an error if write is | |
122 | + possible while it is a shared mapping */ | |
123 | + if ((flags & MAP_TYPE) == MAP_SHARED && | |
124 | + (prot & PROT_WRITE)) | |
125 | + return -EINVAL; | |
126 | + | |
127 | + /* adjust protection to be able to read */ | |
128 | + if (!(prot1 & PROT_WRITE)) | |
129 | + mprotect((void *)host_start, host_page_size, prot1 | PROT_WRITE); | |
130 | + | |
131 | + /* read the corresponding file data */ | |
132 | + pread(fd, (void *)start, end - start, offset); | |
133 | + | |
134 | + /* put final protection */ | |
135 | + if (prot_new != (prot1 | PROT_WRITE)) | |
136 | + mprotect((void *)host_start, host_page_size, prot_new); | |
137 | + } else { | |
138 | + /* just update the protection */ | |
139 | + if (prot_new != prot1) { | |
140 | + mprotect((void *)host_start, host_page_size, prot_new); | |
141 | + } | |
142 | + } | |
143 | + return 0; | |
144 | +} | |
145 | + | |
146 | +/* NOTE: all the constants are the HOST ones */ | |
147 | +long target_mmap(unsigned long start, unsigned long len, int prot, | |
148 | + int flags, int fd, unsigned long offset) | |
149 | +{ | |
150 | + unsigned long ret, end, host_start, host_end, retaddr, host_offset, host_len; | |
151 | + | |
152 | +#ifdef DEBUG_MMAP | |
153 | + { | |
154 | + printf("mmap: start=0x%lx len=0x%lx prot=%c%c%c flags=", | |
155 | + start, len, | |
156 | + prot & PROT_READ ? 'r' : '-', | |
157 | + prot & PROT_WRITE ? 'w' : '-', | |
158 | + prot & PROT_EXEC ? 'x' : '-'); | |
159 | + if (flags & MAP_FIXED) | |
160 | + printf("MAP_FIXED "); | |
161 | + if (flags & MAP_ANONYMOUS) | |
162 | + printf("MAP_ANON "); | |
163 | + switch(flags & MAP_TYPE) { | |
164 | + case MAP_PRIVATE: | |
165 | + printf("MAP_PRIVATE "); | |
166 | + break; | |
167 | + case MAP_SHARED: | |
168 | + printf("MAP_SHARED "); | |
169 | + break; | |
170 | + default: | |
171 | + printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE); | |
172 | + break; | |
173 | + } | |
174 | + printf("fd=%d offset=%lx\n", fd, offset); | |
175 | + } | |
176 | +#endif | |
177 | + | |
178 | + if (offset & ~TARGET_PAGE_MASK) | |
179 | + return -EINVAL; | |
180 | + | |
181 | + len = TARGET_PAGE_ALIGN(len); | |
182 | + if (len == 0) | |
183 | + return start; | |
184 | + host_start = start & host_page_mask; | |
185 | + | |
186 | + if (!(flags & MAP_FIXED)) { | |
187 | + if (host_page_size != real_host_page_size) { | |
188 | + /* NOTE: this code is only for debugging with '-p' option */ | |
189 | + /* reserve a memory area */ | |
190 | + host_len = HOST_PAGE_ALIGN(len) + host_page_size - TARGET_PAGE_SIZE; | |
191 | + host_start = (long)mmap((void *)host_start, host_len, PROT_NONE, | |
192 | + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
193 | + if (host_start == -1) | |
194 | + return host_start; | |
195 | + host_end = host_start + host_len; | |
196 | + start = HOST_PAGE_ALIGN(host_start); | |
197 | + end = start + HOST_PAGE_ALIGN(len); | |
198 | + if (start > host_start) | |
199 | + munmap((void *)host_start, start - host_start); | |
200 | + if (end < host_end) | |
201 | + munmap((void *)end, host_end - end); | |
202 | + /* use it as a fixed mapping */ | |
203 | + flags |= MAP_FIXED; | |
204 | + } else { | |
205 | + /* if not fixed, no need to do anything */ | |
206 | + host_offset = offset & host_page_mask; | |
207 | + host_len = len + offset - host_offset; | |
208 | + start = (long)mmap((void *)host_start, host_len, | |
209 | + prot, flags, fd, host_offset); | |
210 | + if (start == -1) | |
211 | + return start; | |
212 | + /* update start so that it points to the file position at 'offset' */ | |
213 | + if (!(flags & MAP_ANONYMOUS)) | |
214 | + start += offset - host_offset; | |
215 | + goto the_end1; | |
216 | + } | |
217 | + } | |
218 | + | |
219 | + if (start & ~TARGET_PAGE_MASK) | |
220 | + return -EINVAL; | |
221 | + end = start + len; | |
222 | + host_end = HOST_PAGE_ALIGN(end); | |
223 | + | |
224 | + /* worst case: we cannot map the file because the offset is not | |
225 | + aligned, so we read it */ | |
226 | + if (!(flags & MAP_ANONYMOUS) && | |
227 | + (offset & ~host_page_mask) != (start & ~host_page_mask)) { | |
228 | + /* msync() won't work here, so we return an error if write is | |
229 | + possible while it is a shared mapping */ | |
230 | + if ((flags & MAP_TYPE) == MAP_SHARED && | |
231 | + (prot & PROT_WRITE)) | |
232 | + return -EINVAL; | |
233 | + retaddr = target_mmap(start, len, prot | PROT_WRITE, | |
234 | + MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, | |
235 | + -1, 0); | |
236 | + if (retaddr == -1) | |
237 | + return retaddr; | |
238 | + pread(fd, (void *)start, len, offset); | |
239 | + if (!(prot & PROT_WRITE)) { | |
240 | + ret = target_mprotect(start, len, prot); | |
241 | + if (ret != 0) | |
242 | + return ret; | |
243 | + } | |
244 | + goto the_end; | |
245 | + } | |
246 | + | |
247 | + /* handle the start of the mapping */ | |
248 | + if (start > host_start) { | |
249 | + if (host_end == host_start + host_page_size) { | |
250 | + /* one single host page */ | |
251 | + ret = mmap_frag(host_start, start, end, | |
252 | + prot, flags, fd, offset); | |
253 | + if (ret == -1) | |
254 | + return ret; | |
255 | + goto the_end1; | |
256 | + } | |
257 | + ret = mmap_frag(host_start, start, host_start + host_page_size, | |
258 | + prot, flags, fd, offset); | |
259 | + if (ret == -1) | |
260 | + return ret; | |
261 | + host_start += host_page_size; | |
262 | + } | |
263 | + /* handle the end of the mapping */ | |
264 | + if (end < host_end) { | |
265 | + ret = mmap_frag(host_end - host_page_size, | |
266 | + host_end - host_page_size, host_end, | |
267 | + prot, flags, fd, | |
268 | + offset + host_end - host_page_size - start); | |
269 | + if (ret == -1) | |
270 | + return ret; | |
271 | + host_end -= host_page_size; | |
272 | + } | |
273 | + | |
274 | + /* map the middle (easier) */ | |
275 | + if (host_start < host_end) { | |
276 | + ret = (long)mmap((void *)host_start, host_end - host_start, | |
277 | + prot, flags, fd, offset + host_start - start); | |
278 | + if (ret == -1) | |
279 | + return ret; | |
280 | + } | |
281 | + the_end1: | |
282 | + page_set_flags(start, start + len, prot | PAGE_VALID); | |
283 | + the_end: | |
284 | +#ifdef DEBUG_MMAP | |
285 | + page_dump(stdout); | |
286 | + printf("\n"); | |
287 | +#endif | |
288 | + return start; | |
289 | +} | |
290 | + | |
291 | +int target_munmap(unsigned long start, unsigned long len) | |
292 | +{ | |
293 | + unsigned long end, host_start, host_end, addr; | |
294 | + int prot, ret; | |
295 | + | |
296 | +#ifdef DEBUG_MMAP | |
297 | + printf("munmap: start=0x%lx len=0x%lx\n", start, len); | |
298 | +#endif | |
299 | + if (start & ~TARGET_PAGE_MASK) | |
300 | + return -EINVAL; | |
301 | + len = TARGET_PAGE_ALIGN(len); | |
302 | + if (len == 0) | |
303 | + return -EINVAL; | |
304 | + end = start + len; | |
305 | + host_start = start & host_page_mask; | |
306 | + host_end = HOST_PAGE_ALIGN(end); | |
307 | + | |
308 | + if (start > host_start) { | |
309 | + /* handle host page containing start */ | |
310 | + prot = 0; | |
311 | + for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) { | |
312 | + prot |= page_get_flags(addr); | |
313 | + } | |
314 | + if (prot != 0) | |
315 | + host_start += host_page_size; | |
316 | + } | |
317 | + if (end < host_end) { | |
318 | + /* handle host page containing end (can be the same as first page) */ | |
319 | + prot = 0; | |
320 | + for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) { | |
321 | + prot |= page_get_flags(addr); | |
322 | + } | |
323 | + if (prot != 0) | |
324 | + host_end -= host_page_size; | |
325 | + } | |
326 | + | |
327 | + /* unmap what we can */ | |
328 | + if (host_start < host_end) { | |
329 | + ret = munmap((void *)host_start, host_end - host_start); | |
330 | + if (ret != 0) | |
331 | + return ret; | |
332 | + } | |
333 | + | |
334 | + page_set_flags(start, start + len, 0); | |
335 | + return 0; | |
336 | +} | |
337 | + | |
338 | +/* XXX: currently, we only handle MAP_ANONYMOUS and not MAP_FIXED | |
339 | + blocks which have been allocated starting on a host page */ | |
340 | +long target_mremap(unsigned long old_addr, unsigned long old_size, | |
341 | + unsigned long new_size, unsigned long flags, | |
342 | + unsigned long new_addr) | |
343 | +{ | |
344 | + int prot; | |
345 | + | |
346 | + /* XXX: use 5 args syscall */ | |
347 | + new_addr = (long)mremap((void *)old_addr, old_size, new_size, flags); | |
348 | + if (new_addr == -1) | |
349 | + return new_addr; | |
350 | + prot = page_get_flags(old_addr); | |
351 | + page_set_flags(old_addr, old_addr + old_size, 0); | |
352 | + page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID); | |
353 | + return new_addr; | |
354 | +} | |
355 | + | |
356 | +int target_msync(unsigned long start, unsigned long len, int flags) | |
357 | +{ | |
358 | + unsigned long end; | |
359 | + | |
360 | + if (start & ~TARGET_PAGE_MASK) | |
361 | + return -EINVAL; | |
362 | + len = TARGET_PAGE_ALIGN(len); | |
363 | + if (len == 0) | |
364 | + return 0; | |
365 | + end = start + len; | |
366 | + | |
367 | + start &= host_page_mask; | |
368 | + return msync((void *)start, len, flags); | |
369 | +} | |
370 | + | ... | ... |
linux-user/qemu.h
... | ... | @@ -88,4 +88,14 @@ void handle_vm86_fault(CPUX86State *env); |
88 | 88 | int do_vm86(CPUX86State *env, long subfunction, |
89 | 89 | struct target_vm86plus_struct * target_v86); |
90 | 90 | |
91 | +/* mmap.c */ | |
92 | +int target_mprotect(unsigned long start, unsigned long len, int prot); | |
93 | +long target_mmap(unsigned long start, unsigned long len, int prot, | |
94 | + int flags, int fd, unsigned long offset); | |
95 | +int target_munmap(unsigned long start, unsigned long len); | |
96 | +long target_mremap(unsigned long old_addr, unsigned long old_size, | |
97 | + unsigned long new_size, unsigned long flags, | |
98 | + unsigned long new_addr); | |
99 | +int target_msync(unsigned long start, unsigned long len, int flags); | |
100 | + | |
91 | 101 | #endif | ... | ... |
linux-user/syscall.c
... | ... | @@ -64,11 +64,6 @@ |
64 | 64 | |
65 | 65 | //#define DEBUG |
66 | 66 | |
67 | -#ifndef PAGE_SIZE | |
68 | -#define PAGE_SIZE 4096 | |
69 | -#define PAGE_MASK ~(PAGE_SIZE - 1) | |
70 | -#endif | |
71 | - | |
72 | 67 | //#include <linux/msdos_fs.h> |
73 | 68 | #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct dirent [2]) |
74 | 69 | #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct dirent [2]) |
... | ... | @@ -153,7 +148,7 @@ static long do_brk(char *new_brk) |
153 | 148 | if (new_brk < target_original_brk) |
154 | 149 | return -ENOMEM; |
155 | 150 | |
156 | - brk_page = (char *)(((unsigned long)target_brk + PAGE_SIZE - 1) & PAGE_MASK); | |
151 | + brk_page = (char *)HOST_PAGE_ALIGN((unsigned long)target_brk); | |
157 | 152 | |
158 | 153 | /* If the new brk is less than this, set it and we're done... */ |
159 | 154 | if (new_brk < brk_page) { |
... | ... | @@ -162,11 +157,10 @@ static long do_brk(char *new_brk) |
162 | 157 | } |
163 | 158 | |
164 | 159 | /* We need to allocate more memory after the brk... */ |
165 | - new_alloc_size = ((new_brk - brk_page + 1)+(PAGE_SIZE-1)) & PAGE_MASK; | |
166 | - mapped_addr = get_errno((long)mmap((caddr_t)brk_page, new_alloc_size, | |
167 | - PROT_READ|PROT_WRITE, | |
168 | - MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0)); | |
169 | - | |
160 | + new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1); | |
161 | + mapped_addr = get_errno(target_mmap((unsigned long)brk_page, new_alloc_size, | |
162 | + PROT_READ|PROT_WRITE, | |
163 | + MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0)); | |
170 | 164 | if (is_error(mapped_addr)) { |
171 | 165 | return mapped_addr; |
172 | 166 | } else { |
... | ... | @@ -1709,7 +1703,7 @@ long do_syscall(void *cpu_env, int num, long arg1, long arg2, long arg3, |
1709 | 1703 | v4 = tswap32(vptr[3]); |
1710 | 1704 | v5 = tswap32(vptr[4]); |
1711 | 1705 | v6 = tswap32(vptr[5]); |
1712 | - ret = get_errno((long)mmap((void *)v1, v2, v3, v4, v5, v6)); | |
1706 | + ret = get_errno(target_mmap(v1, v2, v3, v4, v5, v6)); | |
1713 | 1707 | } |
1714 | 1708 | break; |
1715 | 1709 | #endif |
... | ... | @@ -1718,16 +1712,16 @@ long do_syscall(void *cpu_env, int num, long arg1, long arg2, long arg3, |
1718 | 1712 | #else |
1719 | 1713 | case TARGET_NR_mmap: |
1720 | 1714 | #endif |
1721 | - ret = get_errno((long)mmap((void *)arg1, arg2, arg3, arg4, arg5, arg6)); | |
1715 | + ret = get_errno(target_mmap(arg1, arg2, arg3, arg4, arg5, arg6)); | |
1722 | 1716 | break; |
1723 | 1717 | case TARGET_NR_munmap: |
1724 | - ret = get_errno(munmap((void *)arg1, arg2)); | |
1718 | + ret = get_errno(target_munmap(arg1, arg2)); | |
1725 | 1719 | break; |
1726 | 1720 | case TARGET_NR_mprotect: |
1727 | - ret = get_errno(mprotect((void *)arg1, arg2, arg3)); | |
1721 | + ret = get_errno(target_mprotect(arg1, arg2, arg3)); | |
1728 | 1722 | break; |
1729 | 1723 | case TARGET_NR_mremap: |
1730 | - ret = get_errno((long)mremap((void *)arg1, arg2, arg3, arg4)); | |
1724 | + ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); | |
1731 | 1725 | break; |
1732 | 1726 | case TARGET_NR_msync: |
1733 | 1727 | ret = get_errno(msync((void *)arg1, arg2, arg3)); | ... | ... |