Commit 94a6b54fd6d2d3321066cb4db7abeeb417af9365
1 parent
b0457b69
Implement dynamic guest ram allocation.
Signed-off-by: Paul Brook <paul@codesourcery.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@7088 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
5 changed files
with
149 additions
and
52 deletions
cpu-all.h
... | ... | @@ -854,11 +854,10 @@ typedef unsigned long ram_addr_t; |
854 | 854 | |
855 | 855 | /* memory API */ |
856 | 856 | |
857 | -extern ram_addr_t phys_ram_size; | |
858 | 857 | extern int phys_ram_fd; |
859 | -extern uint8_t *phys_ram_base; | |
860 | 858 | extern uint8_t *phys_ram_dirty; |
861 | 859 | extern ram_addr_t ram_size; |
860 | +extern ram_addr_t last_ram_offset; | |
862 | 861 | |
863 | 862 | /* physical memory access */ |
864 | 863 | ... | ... |
exec-all.h
... | ... | @@ -370,6 +370,9 @@ void kqemu_record_dump(void); |
370 | 370 | |
371 | 371 | extern uint32_t kqemu_comm_base; |
372 | 372 | |
373 | +extern ram_addr_t kqemu_phys_ram_size; | |
374 | +extern uint8_t *kqemu_phys_ram_base; | |
375 | + | |
373 | 376 | static inline int kqemu_is_ok(CPUState *env) |
374 | 377 | { |
375 | 378 | return(env->kqemu_enabled && | ... | ... |
exec.c
... | ... | @@ -107,12 +107,22 @@ static unsigned long code_gen_buffer_max_size; |
107 | 107 | uint8_t *code_gen_ptr; |
108 | 108 | |
109 | 109 | #if !defined(CONFIG_USER_ONLY) |
110 | -ram_addr_t phys_ram_size; | |
111 | 110 | int phys_ram_fd; |
112 | -uint8_t *phys_ram_base; | |
113 | 111 | uint8_t *phys_ram_dirty; |
114 | 112 | static int in_migration; |
115 | -static ram_addr_t phys_ram_alloc_offset = 0; | |
113 | + | |
114 | +typedef struct RAMBlock { | |
115 | + uint8_t *host; | |
116 | + ram_addr_t offset; | |
117 | + ram_addr_t length; | |
118 | + struct RAMBlock *next; | |
119 | +} RAMBlock; | |
120 | + | |
121 | +static RAMBlock *ram_blocks; | |
122 | +/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug) | |
123 | + then we can no longet assume contiguous ram offsets, and external uses | |
124 | + of this variable will break. */ | |
125 | +ram_addr_t last_ram_offset; | |
116 | 126 | #endif |
117 | 127 | |
118 | 128 | CPUState *first_cpu; |
... | ... | @@ -411,7 +421,7 @@ static void code_gen_alloc(unsigned long tb_size) |
411 | 421 | code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; |
412 | 422 | #else |
413 | 423 | /* XXX: needs ajustments */ |
414 | - code_gen_buffer_size = (unsigned long)(phys_ram_size / 4); | |
424 | + code_gen_buffer_size = (unsigned long)(ram_size / 4); | |
415 | 425 | #endif |
416 | 426 | } |
417 | 427 | if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE) |
... | ... | @@ -2419,22 +2429,55 @@ void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) |
2419 | 2429 | kvm_uncoalesce_mmio_region(addr, size); |
2420 | 2430 | } |
2421 | 2431 | |
2432 | +#ifdef USE_KQEMU | |
2422 | 2433 | /* XXX: better than nothing */ |
2423 | -ram_addr_t qemu_ram_alloc(ram_addr_t size) | |
2434 | +static ram_addr_t kqemu_ram_alloc(ram_addr_t size) | |
2424 | 2435 | { |
2425 | 2436 | ram_addr_t addr; |
2426 | - if ((phys_ram_alloc_offset + size) > phys_ram_size) { | |
2437 | + if ((last_ram_offset + size) > kqemu_phys_ram_size) { | |
2427 | 2438 | fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n", |
2428 | - (uint64_t)size, (uint64_t)phys_ram_size); | |
2439 | + (uint64_t)size, (uint64_t)kqemu_phys_ram_size); | |
2429 | 2440 | abort(); |
2430 | 2441 | } |
2431 | - addr = phys_ram_alloc_offset; | |
2432 | - phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size); | |
2442 | + addr = last_ram_offset; | |
2443 | + last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size); | |
2433 | 2444 | return addr; |
2434 | 2445 | } |
2446 | +#endif | |
2447 | + | |
2448 | +ram_addr_t qemu_ram_alloc(ram_addr_t size) | |
2449 | +{ | |
2450 | + RAMBlock *new_block; | |
2451 | + | |
2452 | +#ifdef USE_KQEMU | |
2453 | + if (kqemu_phys_ram_base) { | |
2454 | + return kqemu_ram_alloc(size); | |
2455 | + } | |
2456 | +#endif | |
2457 | + | |
2458 | + size = TARGET_PAGE_ALIGN(size); | |
2459 | + new_block = qemu_malloc(sizeof(*new_block)); | |
2460 | + | |
2461 | + new_block->host = qemu_vmalloc(size); | |
2462 | + new_block->offset = last_ram_offset; | |
2463 | + new_block->length = size; | |
2464 | + | |
2465 | + new_block->next = ram_blocks; | |
2466 | + ram_blocks = new_block; | |
2467 | + | |
2468 | + phys_ram_dirty = qemu_realloc(phys_ram_dirty, | |
2469 | + (last_ram_offset + size) >> TARGET_PAGE_BITS); | |
2470 | + memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS), | |
2471 | + 0xff, size >> TARGET_PAGE_BITS); | |
2472 | + | |
2473 | + last_ram_offset += size; | |
2474 | + | |
2475 | + return new_block->offset; | |
2476 | +} | |
2435 | 2477 | |
2436 | 2478 | void qemu_ram_free(ram_addr_t addr) |
2437 | 2479 | { |
2480 | + /* TODO: implement this. */ | |
2438 | 2481 | } |
2439 | 2482 | |
2440 | 2483 | /* Return a host pointer to ram allocated with qemu_ram_alloc. |
... | ... | @@ -2447,14 +2490,69 @@ void qemu_ram_free(ram_addr_t addr) |
2447 | 2490 | */ |
2448 | 2491 | void *qemu_get_ram_ptr(ram_addr_t addr) |
2449 | 2492 | { |
2450 | - return phys_ram_base + addr; | |
2493 | + RAMBlock *prev; | |
2494 | + RAMBlock **prevp; | |
2495 | + RAMBlock *block; | |
2496 | + | |
2497 | +#ifdef USE_KQEMU | |
2498 | + if (kqemu_phys_ram_base) { | |
2499 | + return kqemu_phys_ram_base + addr; | |
2500 | + } | |
2501 | +#endif | |
2502 | + | |
2503 | + prev = NULL; | |
2504 | + prevp = &ram_blocks; | |
2505 | + block = ram_blocks; | |
2506 | + while (block && (block->offset > addr | |
2507 | + || block->offset + block->length <= addr)) { | |
2508 | + if (prev) | |
2509 | + prevp = &prev->next; | |
2510 | + prev = block; | |
2511 | + block = block->next; | |
2512 | + } | |
2513 | + if (!block) { | |
2514 | + fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
2515 | + abort(); | |
2516 | + } | |
2517 | + /* Move this entry to to start of the list. */ | |
2518 | + if (prev) { | |
2519 | + prev->next = block->next; | |
2520 | + block->next = *prevp; | |
2521 | + *prevp = block; | |
2522 | + } | |
2523 | + return block->host + (addr - block->offset); | |
2451 | 2524 | } |
2452 | 2525 | |
2453 | 2526 | /* Some of the softmmu routines need to translate from a host pointer |
2454 | 2527 | (typically a TLB entry) back to a ram offset. */ |
2455 | 2528 | ram_addr_t qemu_ram_addr_from_host(void *ptr) |
2456 | 2529 | { |
2457 | - return (uint8_t *)ptr - phys_ram_base; | |
2530 | + RAMBlock *prev; | |
2531 | + RAMBlock **prevp; | |
2532 | + RAMBlock *block; | |
2533 | + uint8_t *host = ptr; | |
2534 | + | |
2535 | +#ifdef USE_KQEMU | |
2536 | + if (kqemu_phys_ram_base) { | |
2537 | + return host - kqemu_phys_ram_base; | |
2538 | + } | |
2539 | +#endif | |
2540 | + | |
2541 | + prev = NULL; | |
2542 | + prevp = &ram_blocks; | |
2543 | + block = ram_blocks; | |
2544 | + while (block && (block->host > host | |
2545 | + || block->host + block->length <= host)) { | |
2546 | + if (prev) | |
2547 | + prevp = &prev->next; | |
2548 | + prev = block; | |
2549 | + block = block->next; | |
2550 | + } | |
2551 | + if (!block) { | |
2552 | + fprintf(stderr, "Bad ram pointer %p\n", ptr); | |
2553 | + abort(); | |
2554 | + } | |
2555 | + return block->offset + (host - block->host); | |
2458 | 2556 | } |
2459 | 2557 | |
2460 | 2558 | static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) |
... | ... | @@ -2895,9 +2993,13 @@ static void io_mem_init(void) |
2895 | 2993 | |
2896 | 2994 | io_mem_watch = cpu_register_io_memory(0, watch_mem_read, |
2897 | 2995 | watch_mem_write, NULL); |
2898 | - /* alloc dirty bits array */ | |
2899 | - phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS); | |
2900 | - memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS); | |
2996 | +#ifdef USE_KQEMU | |
2997 | + if (kqemu_phys_ram_base) { | |
2998 | + /* alloc dirty bits array */ | |
2999 | + phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS); | |
3000 | + memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS); | |
3001 | + } | |
3002 | +#endif | |
2901 | 3003 | } |
2902 | 3004 | |
2903 | 3005 | /* mem_read and mem_write are arrays of functions containing the | ... | ... |
kqemu.c
... | ... | @@ -91,6 +91,8 @@ unsigned int nb_modified_ram_pages; |
91 | 91 | uint8_t *modified_ram_pages_table; |
92 | 92 | int qpi_io_memory; |
93 | 93 | uint32_t kqemu_comm_base; /* physical address of the QPI communication page */ |
94 | +ram_addr_t kqemu_phys_ram_size; | |
95 | +uint8_t *kqemu_phys_ram_base; | |
94 | 96 | |
95 | 97 | #define cpuid(index, eax, ebx, ecx, edx) \ |
96 | 98 | asm volatile ("cpuid" \ |
... | ... | @@ -214,13 +216,14 @@ int kqemu_init(CPUState *env) |
214 | 216 | sizeof(uint64_t)); |
215 | 217 | if (!modified_ram_pages) |
216 | 218 | goto fail; |
217 | - modified_ram_pages_table = qemu_mallocz(phys_ram_size >> TARGET_PAGE_BITS); | |
219 | + modified_ram_pages_table = | |
220 | + qemu_mallocz(kqemu_phys_ram_size >> TARGET_PAGE_BITS); | |
218 | 221 | if (!modified_ram_pages_table) |
219 | 222 | goto fail; |
220 | 223 | |
221 | 224 | memset(&kinit, 0, sizeof(kinit)); /* set the paddings to zero */ |
222 | - kinit.ram_base = phys_ram_base; | |
223 | - kinit.ram_size = phys_ram_size; | |
225 | + kinit.ram_base = kqemu_phys_ram_base; | |
226 | + kinit.ram_size = kqemu_phys_ram_size; | |
224 | 227 | kinit.ram_dirty = phys_ram_dirty; |
225 | 228 | kinit.pages_to_flush = pages_to_flush; |
226 | 229 | kinit.ram_pages_to_update = ram_pages_to_update; | ... | ... |
vl.c
... | ... | @@ -3094,9 +3094,9 @@ static int ram_load_v1(QEMUFile *f, void *opaque) |
3094 | 3094 | int ret; |
3095 | 3095 | ram_addr_t i; |
3096 | 3096 | |
3097 | - if (qemu_get_be32(f) != phys_ram_size) | |
3097 | + if (qemu_get_be32(f) != last_ram_offset) | |
3098 | 3098 | return -EINVAL; |
3099 | - for(i = 0; i < phys_ram_size; i+= TARGET_PAGE_SIZE) { | |
3099 | + for(i = 0; i < last_ram_offset; i+= TARGET_PAGE_SIZE) { | |
3100 | 3100 | ret = ram_get_page(f, qemu_get_ram_ptr(i), TARGET_PAGE_SIZE); |
3101 | 3101 | if (ret) |
3102 | 3102 | return ret; |
... | ... | @@ -3182,7 +3182,7 @@ static int ram_save_block(QEMUFile *f) |
3182 | 3182 | ram_addr_t addr = 0; |
3183 | 3183 | int found = 0; |
3184 | 3184 | |
3185 | - while (addr < phys_ram_size) { | |
3185 | + while (addr < last_ram_offset) { | |
3186 | 3186 | if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) { |
3187 | 3187 | uint8_t *p; |
3188 | 3188 | |
... | ... | @@ -3204,7 +3204,7 @@ static int ram_save_block(QEMUFile *f) |
3204 | 3204 | break; |
3205 | 3205 | } |
3206 | 3206 | addr += TARGET_PAGE_SIZE; |
3207 | - current_addr = (saved_addr + addr) % phys_ram_size; | |
3207 | + current_addr = (saved_addr + addr) % last_ram_offset; | |
3208 | 3208 | } |
3209 | 3209 | |
3210 | 3210 | return found; |
... | ... | @@ -3217,7 +3217,7 @@ static ram_addr_t ram_save_remaining(void) |
3217 | 3217 | ram_addr_t addr; |
3218 | 3218 | ram_addr_t count = 0; |
3219 | 3219 | |
3220 | - for (addr = 0; addr < phys_ram_size; addr += TARGET_PAGE_SIZE) { | |
3220 | + for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) { | |
3221 | 3221 | if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) |
3222 | 3222 | count++; |
3223 | 3223 | } |
... | ... | @@ -3231,7 +3231,7 @@ static int ram_save_live(QEMUFile *f, int stage, void *opaque) |
3231 | 3231 | |
3232 | 3232 | if (stage == 1) { |
3233 | 3233 | /* Make sure all dirty bits are set */ |
3234 | - for (addr = 0; addr < phys_ram_size; addr += TARGET_PAGE_SIZE) { | |
3234 | + for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) { | |
3235 | 3235 | if (!cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) |
3236 | 3236 | cpu_physical_memory_set_dirty(addr); |
3237 | 3237 | } |
... | ... | @@ -3239,7 +3239,7 @@ static int ram_save_live(QEMUFile *f, int stage, void *opaque) |
3239 | 3239 | /* Enable dirty memory tracking */ |
3240 | 3240 | cpu_physical_memory_set_dirty_tracking(1); |
3241 | 3241 | |
3242 | - qemu_put_be64(f, phys_ram_size | RAM_SAVE_FLAG_MEM_SIZE); | |
3242 | + qemu_put_be64(f, last_ram_offset | RAM_SAVE_FLAG_MEM_SIZE); | |
3243 | 3243 | } |
3244 | 3244 | |
3245 | 3245 | while (!qemu_file_rate_limit(f)) { |
... | ... | @@ -3272,7 +3272,7 @@ static int ram_load_dead(QEMUFile *f, void *opaque) |
3272 | 3272 | |
3273 | 3273 | if (ram_decompress_open(s, f) < 0) |
3274 | 3274 | return -EINVAL; |
3275 | - for(i = 0; i < phys_ram_size; i+= BDRV_HASH_BLOCK_SIZE) { | |
3275 | + for(i = 0; i < last_ram_offset; i+= BDRV_HASH_BLOCK_SIZE) { | |
3276 | 3276 | if (ram_decompress_buf(s, buf, 1) < 0) { |
3277 | 3277 | fprintf(stderr, "Error while reading ram block header\n"); |
3278 | 3278 | goto error; |
... | ... | @@ -3303,7 +3303,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) |
3303 | 3303 | return ram_load_v1(f, opaque); |
3304 | 3304 | |
3305 | 3305 | if (version_id == 2) { |
3306 | - if (qemu_get_be32(f) != phys_ram_size) | |
3306 | + if (qemu_get_be32(f) != last_ram_offset) | |
3307 | 3307 | return -EINVAL; |
3308 | 3308 | return ram_load_dead(f, opaque); |
3309 | 3309 | } |
... | ... | @@ -3318,7 +3318,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) |
3318 | 3318 | addr &= TARGET_PAGE_MASK; |
3319 | 3319 | |
3320 | 3320 | if (flags & RAM_SAVE_FLAG_MEM_SIZE) { |
3321 | - if (addr != phys_ram_size) | |
3321 | + if (addr != last_ram_offset) | |
3322 | 3322 | return -EINVAL; |
3323 | 3323 | } |
3324 | 3324 | |
... | ... | @@ -5132,31 +5132,21 @@ int main(int argc, char **argv, char **envp) |
5132 | 5132 | exit(1); |
5133 | 5133 | |
5134 | 5134 | /* init the memory */ |
5135 | - phys_ram_size = machine->ram_require & ~RAMSIZE_FIXED; | |
5136 | - | |
5137 | - if (machine->ram_require & RAMSIZE_FIXED) { | |
5138 | - if (ram_size > 0) { | |
5139 | - if (ram_size < phys_ram_size) { | |
5140 | - fprintf(stderr, "Machine `%s' requires %llu bytes of memory\n", | |
5141 | - machine->name, (unsigned long long) phys_ram_size); | |
5142 | - exit(-1); | |
5143 | - } | |
5144 | - | |
5145 | - phys_ram_size = ram_size; | |
5146 | - } else | |
5147 | - ram_size = phys_ram_size; | |
5148 | - } else { | |
5149 | - if (ram_size == 0) | |
5150 | - ram_size = DEFAULT_RAM_SIZE * 1024 * 1024; | |
5151 | - | |
5152 | - phys_ram_size += ram_size; | |
5153 | - } | |
5135 | + if (ram_size == 0) | |
5136 | + ram_size = DEFAULT_RAM_SIZE * 1024 * 1024; | |
5154 | 5137 | |
5155 | - phys_ram_base = qemu_vmalloc(phys_ram_size); | |
5156 | - if (!phys_ram_base) { | |
5157 | - fprintf(stderr, "Could not allocate physical memory\n"); | |
5158 | - exit(1); | |
5138 | +#ifdef USE_KQEMU | |
5139 | + /* FIXME: This is a nasty hack because kqemu can't cope with dynamic | |
5140 | + guest ram allocation. It needs to go away. */ | |
5141 | + if (kqemu_allowed) { | |
5142 | + kqemu_phys_ram_size = ram_size + VGA_RAM_SIZE + 4 * 1024 * 1024; | |
5143 | + kqemu_phys_ram_base = qemu_vmalloc(kqemu_phys_ram_size); | |
5144 | + if (!kqemu_phys_ram_base) { | |
5145 | + fprintf(stderr, "Could not allocate physical memory\n"); | |
5146 | + exit(1); | |
5147 | + } | |
5159 | 5148 | } |
5149 | +#endif | |
5160 | 5150 | |
5161 | 5151 | /* init the dynamic translator */ |
5162 | 5152 | cpu_exec_init_all(tb_size * 1024 * 1024); | ... | ... |