Commit 151f7749f26e8b0eaca517376a89c01430c584e5

Authored by Jan Kiszka
Committed by Anthony Liguori
1 parent 62518b8b

kvm: Rework dirty bitmap synchronization

Extend kvm_physical_sync_dirty_bitmap() so that is can sync across
multiple slots. Useful for updating the whole dirty log during
migration. Moreover, properly pass down errors the whole call chain.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
cpu-all.h
... ... @@ -901,7 +901,8 @@ int cpu_physical_memory_set_dirty_tracking(int enable);
901 901  
902 902 int cpu_physical_memory_get_dirty_tracking(void);
903 903  
904   -void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr);
  904 +int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
  905 + target_phys_addr_t end_addr);
905 906  
906 907 void dump_exec_info(FILE *f,
907 908 int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
... ...
... ... @@ -1931,10 +1931,14 @@ int cpu_physical_memory_get_dirty_tracking(void)
1931 1931 return in_migration;
1932 1932 }
1933 1933  
1934   -void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
  1934 +int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
  1935 + target_phys_addr_t end_addr)
1935 1936 {
  1937 + int ret = 0;
  1938 +
1936 1939 if (kvm_enabled())
1937   - kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
  1940 + ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
  1941 + return ret;
1938 1942 }
1939 1943  
1940 1944 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
... ...
kvm-all.c
... ... @@ -272,47 +272,58 @@ int kvm_set_migration_log(int enable)
272 272 * @start_add: start of logged region.
273 273 * @end_addr: end of logged region.
274 274 */
275   -void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
276   - target_phys_addr_t end_addr)
  275 +int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
  276 + target_phys_addr_t end_addr)
277 277 {
278 278 KVMState *s = kvm_state;
279   - KVMDirtyLog d;
280   - KVMSlot *mem = kvm_lookup_matching_slot(s, start_addr, end_addr);
281   - unsigned long alloc_size;
  279 + unsigned long size, allocated_size = 0;
  280 + target_phys_addr_t phys_addr;
282 281 ram_addr_t addr;
283   - target_phys_addr_t phys_addr = start_addr;
  282 + KVMDirtyLog d;
  283 + KVMSlot *mem;
  284 + int ret = 0;
284 285  
285   - dprintf("sync addr: " TARGET_FMT_lx " into %lx\n", start_addr,
286   - mem->phys_offset);
287   - if (mem == NULL) {
288   - fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
289   - TARGET_FMT_plx "\n", __func__, phys_addr, end_addr - 1);
290   - return;
291   - }
  286 + d.dirty_bitmap = NULL;
  287 + while (start_addr < end_addr) {
  288 + mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
  289 + if (mem == NULL) {
  290 + break;
  291 + }
292 292  
293   - alloc_size = ((mem->memory_size >> TARGET_PAGE_BITS) + 7) / 8;
294   - d.dirty_bitmap = qemu_mallocz(alloc_size);
  293 + size = ((mem->memory_size >> TARGET_PAGE_BITS) + 7) / 8;
  294 + if (!d.dirty_bitmap) {
  295 + d.dirty_bitmap = qemu_malloc(size);
  296 + } else if (size > allocated_size) {
  297 + d.dirty_bitmap = qemu_realloc(d.dirty_bitmap, size);
  298 + }
  299 + allocated_size = size;
  300 + memset(d.dirty_bitmap, 0, allocated_size);
295 301  
296   - d.slot = mem->slot;
297   - dprintf("slot %d, phys_addr %llx, uaddr: %llx\n",
298   - d.slot, mem->start_addr, mem->phys_offset);
  302 + d.slot = mem->slot;
299 303  
300   - if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
301   - dprintf("ioctl failed %d\n", errno);
302   - goto out;
303   - }
  304 + if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
  305 + dprintf("ioctl failed %d\n", errno);
  306 + ret = -1;
  307 + break;
  308 + }
  309 +
  310 + for (phys_addr = mem->start_addr, addr = mem->phys_offset;
  311 + phys_addr < mem->start_addr + mem->memory_size;
  312 + phys_addr += TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
  313 + unsigned long *bitmap = (unsigned long *)d.dirty_bitmap;
  314 + unsigned nr = (phys_addr - mem->start_addr) >> TARGET_PAGE_BITS;
  315 + unsigned word = nr / (sizeof(*bitmap) * 8);
  316 + unsigned bit = nr % (sizeof(*bitmap) * 8);
304 317  
305   - phys_addr = start_addr;
306   - for (addr = mem->phys_offset; phys_addr < end_addr; phys_addr+= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
307   - unsigned long *bitmap = (unsigned long *)d.dirty_bitmap;
308   - unsigned nr = (phys_addr - start_addr) >> TARGET_PAGE_BITS;
309   - unsigned word = nr / (sizeof(*bitmap) * 8);
310   - unsigned bit = nr % (sizeof(*bitmap) * 8);
311   - if ((bitmap[word] >> bit) & 1)
312   - cpu_physical_memory_set_dirty(addr);
  318 + if ((bitmap[word] >> bit) & 1) {
  319 + cpu_physical_memory_set_dirty(addr);
  320 + }
  321 + }
  322 + start_addr = phys_addr;
313 323 }
314   -out:
315 324 qemu_free(d.dirty_bitmap);
  325 +
  326 + return ret;
316 327 }
317 328  
318 329 int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
... ...
... ... @@ -40,8 +40,8 @@ void kvm_set_phys_mem(target_phys_addr_t start_addr,
40 40 ram_addr_t size,
41 41 ram_addr_t phys_offset);
42 42  
43   -void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
44   - target_phys_addr_t end_addr);
  43 +int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
  44 + target_phys_addr_t end_addr);
45 45  
46 46 int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size);
47 47 int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size);
... ...