Commit c8a706fe6242a553960ccc3071a4e75ceba6f3d2
1 parent
30813cea
Multithreaded locking for mmap().
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4654 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
3 changed files
with
98 additions
and
28 deletions
exec.c
... | ... | @@ -234,6 +234,7 @@ static void page_init(void) |
234 | 234 | FILE *f; |
235 | 235 | int n; |
236 | 236 | |
237 | + mmap_lock(); | |
237 | 238 | last_brk = (unsigned long)sbrk(0); |
238 | 239 | f = fopen("/proc/self/maps", "r"); |
239 | 240 | if (f) { |
... | ... | @@ -251,6 +252,7 @@ static void page_init(void) |
251 | 252 | } while (!feof(f)); |
252 | 253 | fclose(f); |
253 | 254 | } |
255 | + mmap_unlock(); | |
254 | 256 | } |
255 | 257 | #endif |
256 | 258 | } |
... | ... | @@ -326,6 +328,8 @@ static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) |
326 | 328 | static void tlb_protect_code(ram_addr_t ram_addr); |
327 | 329 | static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, |
328 | 330 | target_ulong vaddr); |
331 | +#define mmap_lock() do { } while(0) | |
332 | +#define mmap_unlock() do { } while(0) | |
329 | 333 | #endif |
330 | 334 | |
331 | 335 | #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024) |
... | ... | @@ -1049,6 +1053,9 @@ void tb_link_phys(TranslationBlock *tb, |
1049 | 1053 | unsigned int h; |
1050 | 1054 | TranslationBlock **ptb; |
1051 | 1055 | |
1056 | + /* Grab the mmap lock to stop another thread invalidating this TB | |
1057 | + before we are done. */ | |
1058 | + mmap_lock(); | |
1052 | 1059 | /* add in the physical hash table */ |
1053 | 1060 | h = tb_phys_hash_func(phys_pc); |
1054 | 1061 | ptb = &tb_phys_hash[h]; |
... | ... | @@ -1075,6 +1082,7 @@ void tb_link_phys(TranslationBlock *tb, |
1075 | 1082 | #ifdef DEBUG_TB_CHECK |
1076 | 1083 | tb_page_check(); |
1077 | 1084 | #endif |
1085 | + mmap_unlock(); | |
1078 | 1086 | } |
1079 | 1087 | |
1080 | 1088 | /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < |
... | ... | @@ -2002,6 +2010,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags) |
2002 | 2010 | PageDesc *p; |
2003 | 2011 | target_ulong addr; |
2004 | 2012 | |
2013 | + /* mmap_lock should already be held. */ | |
2005 | 2014 | start = start & TARGET_PAGE_MASK; |
2006 | 2015 | end = TARGET_PAGE_ALIGN(end); |
2007 | 2016 | if (flags & PAGE_WRITE) |
... | ... | @@ -2065,11 +2074,18 @@ int page_unprotect(target_ulong address, unsigned long pc, void *puc) |
2065 | 2074 | PageDesc *p, *p1; |
2066 | 2075 | target_ulong host_start, host_end, addr; |
2067 | 2076 | |
2077 | + /* Technically this isn't safe inside a signal handler. However we | |
2078 | + know this only ever happens in a synchronous SEGV handler, so in | |
2079 | + practice it seems to be ok. */ | |
2080 | + mmap_lock(); | |
2081 | + | |
2068 | 2082 | host_start = address & qemu_host_page_mask; |
2069 | 2083 | page_index = host_start >> TARGET_PAGE_BITS; |
2070 | 2084 | p1 = page_find(page_index); |
2071 | - if (!p1) | |
2085 | + if (!p1) { | |
2086 | + mmap_unlock(); | |
2072 | 2087 | return 0; |
2088 | + } | |
2073 | 2089 | host_end = host_start + qemu_host_page_size; |
2074 | 2090 | p = p1; |
2075 | 2091 | prot = 0; |
... | ... | @@ -2091,9 +2107,11 @@ int page_unprotect(target_ulong address, unsigned long pc, void *puc) |
2091 | 2107 | #ifdef DEBUG_TB_CHECK |
2092 | 2108 | tb_invalidate_check(address); |
2093 | 2109 | #endif |
2110 | + mmap_unlock(); | |
2094 | 2111 | return 1; |
2095 | 2112 | } |
2096 | 2113 | } |
2114 | + mmap_unlock(); | |
2097 | 2115 | return 0; |
2098 | 2116 | } |
2099 | 2117 | ... | ... |
linux-user/mmap.c
... | ... | @@ -29,6 +29,34 @@ |
29 | 29 | |
30 | 30 | //#define DEBUG_MMAP |
31 | 31 | |
32 | +#if defined(USE_NPTL) | |
33 | +pthread_mutex_t mmap_mutex; | |
34 | +static int __thread mmap_lock_count; | |
35 | + | |
36 | +void mmap_lock(void) | |
37 | +{ | |
38 | + if (mmap_lock_count++ == 0) { | |
39 | + pthread_mutex_lock(&mmap_mutex); | |
40 | + } | |
41 | +} | |
42 | + | |
43 | +void mmap_unlock(void) | |
44 | +{ | |
45 | + if (--mmap_lock_count == 0) { | |
46 | + pthread_mutex_unlock(&mmap_mutex); | |
47 | + } | |
48 | +} | |
49 | +#else | |
50 | +/* We aren't threadsafe to start with, so no need to worry about locking. */ | |
51 | +void mmap_lock(void) | |
52 | +{ | |
53 | +} | |
54 | + | |
55 | +void mmap_unlock(void) | |
56 | +{ | |
57 | +} | |
58 | +#endif | |
59 | + | |
32 | 60 | /* NOTE: all the constants are the HOST ones, but addresses are target. */ |
33 | 61 | int target_mprotect(abi_ulong start, abi_ulong len, int prot) |
34 | 62 | { |
... | ... | @@ -53,6 +81,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot) |
53 | 81 | if (len == 0) |
54 | 82 | return 0; |
55 | 83 | |
84 | + mmap_lock(); | |
56 | 85 | host_start = start & qemu_host_page_mask; |
57 | 86 | host_end = HOST_PAGE_ALIGN(end); |
58 | 87 | if (start > host_start) { |
... | ... | @@ -69,7 +98,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot) |
69 | 98 | } |
70 | 99 | ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS); |
71 | 100 | if (ret != 0) |
72 | - return ret; | |
101 | + goto error; | |
73 | 102 | host_start += qemu_host_page_size; |
74 | 103 | } |
75 | 104 | if (end < host_end) { |
... | ... | @@ -80,7 +109,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot) |
80 | 109 | ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size, |
81 | 110 | prot1 & PAGE_BITS); |
82 | 111 | if (ret != 0) |
83 | - return ret; | |
112 | + goto error; | |
84 | 113 | host_end -= qemu_host_page_size; |
85 | 114 | } |
86 | 115 | |
... | ... | @@ -88,10 +117,14 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot) |
88 | 117 | if (host_start < host_end) { |
89 | 118 | ret = mprotect(g2h(host_start), host_end - host_start, prot); |
90 | 119 | if (ret != 0) |
91 | - return ret; | |
120 | + goto error; | |
92 | 121 | } |
93 | 122 | page_set_flags(start, start + len, prot | PAGE_VALID); |
123 | + mmap_unlock(); | |
94 | 124 | return 0; |
125 | +error: | |
126 | + mmap_unlock(); | |
127 | + return ret; | |
95 | 128 | } |
96 | 129 | |
97 | 130 | /* map an incomplete host page */ |
... | ... | @@ -214,6 +247,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, |
214 | 247 | abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len; |
215 | 248 | unsigned long host_start; |
216 | 249 | |
250 | + mmap_lock(); | |
217 | 251 | #ifdef DEBUG_MMAP |
218 | 252 | { |
219 | 253 | printf("mmap: start=0x" TARGET_FMT_lx |
... | ... | @@ -243,12 +277,12 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, |
243 | 277 | |
244 | 278 | if (offset & ~TARGET_PAGE_MASK) { |
245 | 279 | errno = EINVAL; |
246 | - return -1; | |
280 | + goto fail; | |
247 | 281 | } |
248 | 282 | |
249 | 283 | len = TARGET_PAGE_ALIGN(len); |
250 | 284 | if (len == 0) |
251 | - return start; | |
285 | + goto the_end; | |
252 | 286 | real_start = start & qemu_host_page_mask; |
253 | 287 | |
254 | 288 | if (!(flags & MAP_FIXED)) { |
... | ... | @@ -260,7 +294,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, |
260 | 294 | mmap_start = mmap_find_vma(real_start, host_len); |
261 | 295 | if (mmap_start == (abi_ulong)-1) { |
262 | 296 | errno = ENOMEM; |
263 | - return -1; | |
297 | + goto fail; | |
264 | 298 | } |
265 | 299 | /* Note: we prefer to control the mapping address. It is |
266 | 300 | especially important if qemu_host_page_size > |
... | ... | @@ -268,7 +302,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, |
268 | 302 | p = mmap(g2h(mmap_start), |
269 | 303 | host_len, prot, flags | MAP_FIXED, fd, host_offset); |
270 | 304 | if (p == MAP_FAILED) |
271 | - return -1; | |
305 | + goto fail; | |
272 | 306 | /* update start so that it points to the file position at 'offset' */ |
273 | 307 | host_start = (unsigned long)p; |
274 | 308 | if (!(flags & MAP_ANONYMOUS)) |
... | ... | @@ -280,7 +314,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, |
280 | 314 | |
281 | 315 | if (start & ~TARGET_PAGE_MASK) { |
282 | 316 | errno = EINVAL; |
283 | - return -1; | |
317 | + goto fail; | |
284 | 318 | } |
285 | 319 | end = start + len; |
286 | 320 | real_end = HOST_PAGE_ALIGN(end); |
... | ... | @@ -289,7 +323,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, |
289 | 323 | flg = page_get_flags(addr); |
290 | 324 | if (flg & PAGE_RESERVED) { |
291 | 325 | errno = ENXIO; |
292 | - return -1; | |
326 | + goto fail; | |
293 | 327 | } |
294 | 328 | } |
295 | 329 | |
... | ... | @@ -302,18 +336,20 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, |
302 | 336 | if ((flags & MAP_TYPE) == MAP_SHARED && |
303 | 337 | (prot & PROT_WRITE)) { |
304 | 338 | errno = EINVAL; |
305 | - return -1; | |
339 | + goto fail; | |
306 | 340 | } |
307 | 341 | retaddr = target_mmap(start, len, prot | PROT_WRITE, |
308 | 342 | MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, |
309 | 343 | -1, 0); |
310 | 344 | if (retaddr == -1) |
311 | - return -1; | |
345 | + goto fail; | |
312 | 346 | pread(fd, g2h(start), len, offset); |
313 | 347 | if (!(prot & PROT_WRITE)) { |
314 | 348 | ret = target_mprotect(start, len, prot); |
315 | - if (ret != 0) | |
316 | - return ret; | |
349 | + if (ret != 0) { | |
350 | + start = ret; | |
351 | + goto the_end; | |
352 | + } | |
317 | 353 | } |
318 | 354 | goto the_end; |
319 | 355 | } |
... | ... | @@ -325,13 +361,13 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, |
325 | 361 | ret = mmap_frag(real_start, start, end, |
326 | 362 | prot, flags, fd, offset); |
327 | 363 | if (ret == -1) |
328 | - return ret; | |
364 | + goto fail; | |
329 | 365 | goto the_end1; |
330 | 366 | } |
331 | 367 | ret = mmap_frag(real_start, start, real_start + qemu_host_page_size, |
332 | 368 | prot, flags, fd, offset); |
333 | 369 | if (ret == -1) |
334 | - return ret; | |
370 | + goto fail; | |
335 | 371 | real_start += qemu_host_page_size; |
336 | 372 | } |
337 | 373 | /* handle the end of the mapping */ |
... | ... | @@ -341,7 +377,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, |
341 | 377 | prot, flags, fd, |
342 | 378 | offset + real_end - qemu_host_page_size - start); |
343 | 379 | if (ret == -1) |
344 | - return -1; | |
380 | + goto fail; | |
345 | 381 | real_end -= qemu_host_page_size; |
346 | 382 | } |
347 | 383 | |
... | ... | @@ -356,7 +392,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, |
356 | 392 | p = mmap(g2h(real_start), real_end - real_start, |
357 | 393 | prot, flags, fd, offset1); |
358 | 394 | if (p == MAP_FAILED) |
359 | - return -1; | |
395 | + goto fail; | |
360 | 396 | } |
361 | 397 | } |
362 | 398 | the_end1: |
... | ... | @@ -367,7 +403,11 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, |
367 | 403 | page_dump(stdout); |
368 | 404 | printf("\n"); |
369 | 405 | #endif |
406 | + mmap_unlock(); | |
370 | 407 | return start; |
408 | +fail: | |
409 | + mmap_unlock(); | |
410 | + return -1; | |
371 | 411 | } |
372 | 412 | |
373 | 413 | int target_munmap(abi_ulong start, abi_ulong len) |
... | ... | @@ -383,6 +423,7 @@ int target_munmap(abi_ulong start, abi_ulong len) |
383 | 423 | len = TARGET_PAGE_ALIGN(len); |
384 | 424 | if (len == 0) |
385 | 425 | return -EINVAL; |
426 | + mmap_lock(); | |
386 | 427 | end = start + len; |
387 | 428 | real_start = start & qemu_host_page_mask; |
388 | 429 | real_end = HOST_PAGE_ALIGN(end); |
... | ... | @@ -411,15 +452,16 @@ int target_munmap(abi_ulong start, abi_ulong len) |
411 | 452 | real_end -= qemu_host_page_size; |
412 | 453 | } |
413 | 454 | |
455 | + ret = 0; | |
414 | 456 | /* unmap what we can */ |
415 | 457 | if (real_start < real_end) { |
416 | 458 | ret = munmap(g2h(real_start), real_end - real_start); |
417 | - if (ret != 0) | |
418 | - return ret; | |
419 | 459 | } |
420 | 460 | |
421 | - page_set_flags(start, start + len, 0); | |
422 | - return 0; | |
461 | + if (ret == 0) | |
462 | + page_set_flags(start, start + len, 0); | |
463 | + mmap_unlock(); | |
464 | + return ret; | |
423 | 465 | } |
424 | 466 | |
425 | 467 | /* XXX: currently, we only handle MAP_ANONYMOUS and not MAP_FIXED |
... | ... | @@ -431,14 +473,18 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, |
431 | 473 | int prot; |
432 | 474 | unsigned long host_addr; |
433 | 475 | |
476 | + mmap_lock(); | |
434 | 477 | /* XXX: use 5 args syscall */ |
435 | 478 | host_addr = (long)mremap(g2h(old_addr), old_size, new_size, flags); |
436 | - if (host_addr == -1) | |
437 | - return -1; | |
438 | - new_addr = h2g(host_addr); | |
439 | - prot = page_get_flags(old_addr); | |
440 | - page_set_flags(old_addr, old_addr + old_size, 0); | |
441 | - page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID); | |
479 | + if (host_addr == -1) { | |
480 | + new_addr = -1; | |
481 | + } else { | |
482 | + new_addr = h2g(host_addr); | |
483 | + prot = page_get_flags(old_addr); | |
484 | + page_set_flags(old_addr, old_addr + old_size, 0); | |
485 | + page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID); | |
486 | + } | |
487 | + mmap_unlock(); | |
442 | 488 | return new_addr; |
443 | 489 | } |
444 | 490 | ... | ... |
linux-user/qemu.h
... | ... | @@ -233,6 +233,8 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, |
233 | 233 | abi_ulong new_addr); |
234 | 234 | int target_msync(abi_ulong start, abi_ulong len, int flags); |
235 | 235 | extern unsigned long last_brk; |
236 | +void mmap_lock(void); | |
237 | +void mmap_unlock(void); | |
236 | 238 | |
237 | 239 | /* user access */ |
238 | 240 | |
... | ... | @@ -423,4 +425,8 @@ static inline void *lock_user_string(abi_ulong guest_addr) |
423 | 425 | #define unlock_user_struct(host_ptr, guest_addr, copy) \ |
424 | 426 | unlock_user(host_ptr, guest_addr, (copy) ? sizeof(*host_ptr) : 0) |
425 | 427 | |
428 | +#if defined(USE_NPTL) | |
429 | +#include <pthread.h> | |
430 | +#endif | |
431 | + | |
426 | 432 | #endif /* QEMU_H */ | ... | ... |