Commit ff4b91c2f7e51dab148aba4bf43c2f39f219e495

Authored by aliguori
1 parent ab5ccbd6

qcow2: Fix cluster allocation (Kevin Wolf)

When allocating multiple clusters at once, the qcow2 implementation
tries to find as many physically contiguous clusters as possible to
allow larger writes. This search includes allocated clusters which are
in the right place and still free clusters. If the range to allocate
spans clusters in patterns like "10 allocated, then 10 free, then again
10 allocated" it is only checked that the chunks of allocated clusters
are contiguous for themselves.

However, what is actually needed is to have _all_ allocated clusters
contiguous, starting at the first cluster of the allocation and spanning
multiple such chunks. This patch changes the check so that each offset
is not compared to the offset of the first cluster in its own chunk but
to the first cluster in the whole allocation.

I haven't seen it happen, but without this fix data corruption on qcow2
images is possible.

Signed-off-by: Kevin Wolf <kwolf@suse.de>
Acked-by: Gleb Natapov <gleb@redhat.com>



git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6213 c046a42c-6fe2-441c-8c8c-71466251a162
Showing 1 changed file with 6 additions and 6 deletions
block-qcow2.c
@@ -615,7 +615,7 @@ static int size_to_clusters(BDRVQcowState *s, int64_t size) @@ -615,7 +615,7 @@ static int size_to_clusters(BDRVQcowState *s, int64_t size)
615 } 615 }
616 616
617 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, 617 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
618 - uint64_t *l2_table, uint64_t mask) 618 + uint64_t *l2_table, uint64_t start, uint64_t mask)
619 { 619 {
620 int i; 620 int i;
621 uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask; 621 uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
@@ -623,11 +623,11 @@ static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, @@ -623,11 +623,11 @@ static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
623 if (!offset) 623 if (!offset)
624 return 0; 624 return 0;
625 625
626 - for (i = 0; i < nb_clusters; i++) 626 + for (i = start; i < start + nb_clusters; i++)
627 if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask)) 627 if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
628 break; 628 break;
629 629
630 - return i; 630 + return (i - start);
631 } 631 }
632 632
633 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) 633 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
@@ -714,7 +714,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs, @@ -714,7 +714,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
714 } else { 714 } else {
715 /* how many allocated clusters ? */ 715 /* how many allocated clusters ? */
716 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 716 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
717 - &l2_table[l2_index], QCOW_OFLAG_COPIED); 717 + &l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
718 } 718 }
719 719
720 nb_available = (c * s->cluster_sectors); 720 nb_available = (c * s->cluster_sectors);
@@ -968,7 +968,7 @@ static uint64_t alloc_cluster_offset(BlockDriverState *bs, @@ -968,7 +968,7 @@ static uint64_t alloc_cluster_offset(BlockDriverState *bs,
968 968
969 if (cluster_offset & QCOW_OFLAG_COPIED) { 969 if (cluster_offset & QCOW_OFLAG_COPIED) {
970 nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size, 970 nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
971 - &l2_table[l2_index], 0); 971 + &l2_table[l2_index], 0, 0);
972 972
973 cluster_offset &= ~QCOW_OFLAG_COPIED; 973 cluster_offset &= ~QCOW_OFLAG_COPIED;
974 m->nb_clusters = 0; 974 m->nb_clusters = 0;
@@ -985,7 +985,7 @@ static uint64_t alloc_cluster_offset(BlockDriverState *bs, @@ -985,7 +985,7 @@ static uint64_t alloc_cluster_offset(BlockDriverState *bs,
985 985
986 while (i < nb_clusters) { 986 while (i < nb_clusters) {
987 i += count_contiguous_clusters(nb_clusters - i, s->cluster_size, 987 i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
988 - &l2_table[l2_index + i], 0); 988 + &l2_table[l2_index], i, 0);
989 989
990 if(be64_to_cpu(l2_table[l2_index + i])) 990 if(be64_to_cpu(l2_table[l2_index + i]))
991 break; 991 break;