Commit 6db6c63826a4a2c06cb02bec2bfce3f506eacda8
1 parent
643e5399
Cleanup {alloc|get}_cluster_offset() (Gleb Natapov)
Move duplicated code into helper functions. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5860 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
1 changed file
with
73 additions
and
89 deletions
block-qcow2.c
@@ -601,6 +601,34 @@ static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index) | @@ -601,6 +601,34 @@ static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index) | ||
601 | return l2_table; | 601 | return l2_table; |
602 | } | 602 | } |
603 | 603 | ||
604 | +static int size_to_clusters(BDRVQcowState *s, int64_t size) | ||
605 | +{ | ||
606 | + return (size + (s->cluster_size - 1)) >> s->cluster_bits; | ||
607 | +} | ||
608 | + | ||
609 | +static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, | ||
610 | + uint64_t *l2_table, uint64_t mask) | ||
611 | +{ | ||
612 | + int i; | ||
613 | + uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask; | ||
614 | + | ||
615 | + for (i = 0; i < nb_clusters; i++) | ||
616 | + if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask)) | ||
617 | + break; | ||
618 | + | ||
619 | + return i; | ||
620 | +} | ||
621 | + | ||
622 | +static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) | ||
623 | +{ | ||
624 | + int i = 0; | ||
625 | + | ||
626 | + while(nb_clusters-- && l2_table[i] == 0) | ||
627 | + i++; | ||
628 | + | ||
629 | + return i; | ||
630 | +} | ||
631 | + | ||
604 | /* | 632 | /* |
605 | * get_cluster_offset | 633 | * get_cluster_offset |
606 | * | 634 | * |
@@ -622,9 +650,9 @@ static uint64_t get_cluster_offset(BlockDriverState *bs, | @@ -622,9 +650,9 @@ static uint64_t get_cluster_offset(BlockDriverState *bs, | ||
622 | { | 650 | { |
623 | BDRVQcowState *s = bs->opaque; | 651 | BDRVQcowState *s = bs->opaque; |
624 | int l1_index, l2_index; | 652 | int l1_index, l2_index; |
625 | - uint64_t l2_offset, *l2_table, cluster_offset, next; | ||
626 | - int l1_bits; | ||
627 | - int index_in_cluster, nb_available, nb_needed; | 653 | + uint64_t l2_offset, *l2_table, cluster_offset; |
654 | + int l1_bits, c; | ||
655 | + int index_in_cluster, nb_available, nb_needed, nb_clusters; | ||
628 | 656 | ||
629 | index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); | 657 | index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); |
630 | nb_needed = *num + index_in_cluster; | 658 | nb_needed = *num + index_in_cluster; |
@@ -632,7 +660,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs, | @@ -632,7 +660,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs, | ||
632 | l1_bits = s->l2_bits + s->cluster_bits; | 660 | l1_bits = s->l2_bits + s->cluster_bits; |
633 | 661 | ||
634 | /* compute how many bytes there are between the offset and | 662 | /* compute how many bytes there are between the offset and |
635 | - * and the end of the l1 entry | 663 | + * the end of the l1 entry |
636 | */ | 664 | */ |
637 | 665 | ||
638 | nb_available = (1 << l1_bits) - (offset & ((1 << l1_bits) - 1)); | 666 | nb_available = (1 << l1_bits) - (offset & ((1 << l1_bits) - 1)); |
@@ -667,38 +695,25 @@ static uint64_t get_cluster_offset(BlockDriverState *bs, | @@ -667,38 +695,25 @@ static uint64_t get_cluster_offset(BlockDriverState *bs, | ||
667 | 695 | ||
668 | l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); | 696 | l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); |
669 | cluster_offset = be64_to_cpu(l2_table[l2_index]); | 697 | cluster_offset = be64_to_cpu(l2_table[l2_index]); |
670 | - nb_available = s->cluster_sectors; | ||
671 | - l2_index++; | 698 | + nb_clusters = size_to_clusters(s, nb_needed << 9); |
672 | 699 | ||
673 | if (!cluster_offset) { | 700 | if (!cluster_offset) { |
674 | - | ||
675 | - /* how many empty clusters ? */ | ||
676 | - | ||
677 | - while (nb_available < nb_needed && !l2_table[l2_index]) { | ||
678 | - l2_index++; | ||
679 | - nb_available += s->cluster_sectors; | ||
680 | - } | 701 | + /* how many empty clusters ? */ |
702 | + c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); | ||
681 | } else { | 703 | } else { |
704 | + /* how many allocated clusters ? */ | ||
705 | + c = count_contiguous_clusters(nb_clusters, s->cluster_size, | ||
706 | + &l2_table[l2_index], QCOW_OFLAG_COPIED); | ||
707 | + } | ||
682 | 708 | ||
683 | - /* how many allocated clusters ? */ | ||
684 | - | ||
685 | - cluster_offset &= ~QCOW_OFLAG_COPIED; | ||
686 | - while (nb_available < nb_needed) { | ||
687 | - next = be64_to_cpu(l2_table[l2_index]) & ~QCOW_OFLAG_COPIED; | ||
688 | - if (next != cluster_offset + (nb_available << 9)) | ||
689 | - break; | ||
690 | - l2_index++; | ||
691 | - nb_available += s->cluster_sectors; | ||
692 | - } | ||
693 | - } | ||
694 | - | 709 | + nb_available = (c * s->cluster_sectors); |
695 | out: | 710 | out: |
696 | if (nb_available > nb_needed) | 711 | if (nb_available > nb_needed) |
697 | nb_available = nb_needed; | 712 | nb_available = nb_needed; |
698 | 713 | ||
699 | *num = nb_available - index_in_cluster; | 714 | *num = nb_available - index_in_cluster; |
700 | 715 | ||
701 | - return cluster_offset; | 716 | + return cluster_offset & ~QCOW_OFLAG_COPIED; |
702 | } | 717 | } |
703 | 718 | ||
704 | /* | 719 | /* |
@@ -862,15 +877,15 @@ static uint64_t alloc_cluster_offset(BlockDriverState *bs, | @@ -862,15 +877,15 @@ static uint64_t alloc_cluster_offset(BlockDriverState *bs, | ||
862 | BDRVQcowState *s = bs->opaque; | 877 | BDRVQcowState *s = bs->opaque; |
863 | int l2_index, ret; | 878 | int l2_index, ret; |
864 | uint64_t l2_offset, *l2_table, cluster_offset; | 879 | uint64_t l2_offset, *l2_table, cluster_offset; |
865 | - int nb_available, nb_clusters, i, j; | ||
866 | - uint64_t start_sect, current; | 880 | + int nb_available, nb_clusters, i = 0; |
881 | + uint64_t start_sect; | ||
867 | 882 | ||
868 | ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); | 883 | ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); |
869 | if (ret == 0) | 884 | if (ret == 0) |
870 | return 0; | 885 | return 0; |
871 | 886 | ||
872 | - nb_clusters = ((n_end << 9) + s->cluster_size - 1) >> | ||
873 | - s->cluster_bits; | 887 | + nb_clusters = size_to_clusters(s, n_end << 9); |
888 | + | ||
874 | if (nb_clusters > s->l2_size - l2_index) | 889 | if (nb_clusters > s->l2_size - l2_index) |
875 | nb_clusters = s->l2_size - l2_index; | 890 | nb_clusters = s->l2_size - l2_index; |
876 | 891 | ||
@@ -879,13 +894,8 @@ static uint64_t alloc_cluster_offset(BlockDriverState *bs, | @@ -879,13 +894,8 @@ static uint64_t alloc_cluster_offset(BlockDriverState *bs, | ||
879 | /* We keep all QCOW_OFLAG_COPIED clusters */ | 894 | /* We keep all QCOW_OFLAG_COPIED clusters */ |
880 | 895 | ||
881 | if (cluster_offset & QCOW_OFLAG_COPIED) { | 896 | if (cluster_offset & QCOW_OFLAG_COPIED) { |
882 | - | ||
883 | - for (i = 1; i < nb_clusters; i++) { | ||
884 | - current = be64_to_cpu(l2_table[l2_index + i]); | ||
885 | - if (cluster_offset + (i << s->cluster_bits) != current) | ||
886 | - break; | ||
887 | - } | ||
888 | - nb_clusters = i; | 897 | + nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size, |
898 | + &l2_table[l2_index], 0); | ||
889 | 899 | ||
890 | nb_available = nb_clusters << (s->cluster_bits - 9); | 900 | nb_available = nb_clusters << (s->cluster_bits - 9); |
891 | if (nb_available > n_end) | 901 | if (nb_available > n_end) |
@@ -903,46 +913,27 @@ static uint64_t alloc_cluster_offset(BlockDriverState *bs, | @@ -903,46 +913,27 @@ static uint64_t alloc_cluster_offset(BlockDriverState *bs, | ||
903 | 913 | ||
904 | /* how many available clusters ? */ | 914 | /* how many available clusters ? */ |
905 | 915 | ||
906 | - i = 0; | ||
907 | while (i < nb_clusters) { | 916 | while (i < nb_clusters) { |
917 | + int j; | ||
918 | + i += count_contiguous_free_clusters(nb_clusters - i, | ||
919 | + &l2_table[l2_index + i]); | ||
908 | 920 | ||
909 | - i++; | ||
910 | - | ||
911 | - if (!cluster_offset) { | ||
912 | - | ||
913 | - /* how many free clusters ? */ | ||
914 | - | ||
915 | - while (i < nb_clusters) { | ||
916 | - cluster_offset = be64_to_cpu(l2_table[l2_index + i]); | ||
917 | - if (cluster_offset != 0) | ||
918 | - break; | ||
919 | - i++; | ||
920 | - } | 921 | + cluster_offset = be64_to_cpu(l2_table[l2_index + i]); |
921 | 922 | ||
922 | - if ((cluster_offset & QCOW_OFLAG_COPIED) || | 923 | + if ((cluster_offset & QCOW_OFLAG_COPIED) || |
923 | (cluster_offset & QCOW_OFLAG_COMPRESSED)) | 924 | (cluster_offset & QCOW_OFLAG_COMPRESSED)) |
924 | - break; | ||
925 | - | ||
926 | - } else { | 925 | + break; |
927 | 926 | ||
928 | - /* how many contiguous clusters ? */ | 927 | + j = count_contiguous_clusters(nb_clusters - i, s->cluster_size, |
928 | + &l2_table[l2_index + i], 0); | ||
929 | 929 | ||
930 | - j = 1; | ||
931 | - current = 0; | ||
932 | - while (i < nb_clusters) { | ||
933 | - current = be64_to_cpu(l2_table[l2_index + i]); | ||
934 | - if (cluster_offset + (j << s->cluster_bits) != current) | ||
935 | - break; | 930 | + if (j) |
931 | + free_any_clusters(bs, cluster_offset, j); | ||
936 | 932 | ||
937 | - i++; | ||
938 | - j++; | ||
939 | - } | 933 | + i += j; |
940 | 934 | ||
941 | - free_any_clusters(bs, cluster_offset, j); | ||
942 | - if (current) | ||
943 | - break; | ||
944 | - cluster_offset = current; | ||
945 | - } | 935 | + if(be64_to_cpu(l2_table[l2_index + i])) |
936 | + break; | ||
946 | } | 937 | } |
947 | nb_clusters = i; | 938 | nb_clusters = i; |
948 | 939 | ||
@@ -2194,26 +2185,19 @@ static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size) | @@ -2194,26 +2185,19 @@ static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size) | ||
2194 | BDRVQcowState *s = bs->opaque; | 2185 | BDRVQcowState *s = bs->opaque; |
2195 | int i, nb_clusters; | 2186 | int i, nb_clusters; |
2196 | 2187 | ||
2197 | - nb_clusters = (size + s->cluster_size - 1) >> s->cluster_bits; | ||
2198 | - for(;;) { | ||
2199 | - if (get_refcount(bs, s->free_cluster_index) == 0) { | ||
2200 | - s->free_cluster_index++; | ||
2201 | - for(i = 1; i < nb_clusters; i++) { | ||
2202 | - if (get_refcount(bs, s->free_cluster_index) != 0) | ||
2203 | - goto not_found; | ||
2204 | - s->free_cluster_index++; | ||
2205 | - } | 2188 | + nb_clusters = size_to_clusters(s, size); |
2189 | +retry: | ||
2190 | + for(i = 0; i < nb_clusters; i++) { | ||
2191 | + int64_t i = s->free_cluster_index++; | ||
2192 | + if (get_refcount(bs, i) != 0) | ||
2193 | + goto retry; | ||
2194 | + } | ||
2206 | #ifdef DEBUG_ALLOC2 | 2195 | #ifdef DEBUG_ALLOC2 |
2207 | - printf("alloc_clusters: size=%lld -> %lld\n", | ||
2208 | - size, | ||
2209 | - (s->free_cluster_index - nb_clusters) << s->cluster_bits); | 2196 | + printf("alloc_clusters: size=%lld -> %lld\n", |
2197 | + size, | ||
2198 | + (s->free_cluster_index - nb_clusters) << s->cluster_bits); | ||
2210 | #endif | 2199 | #endif |
2211 | - return (s->free_cluster_index - nb_clusters) << s->cluster_bits; | ||
2212 | - } else { | ||
2213 | - not_found: | ||
2214 | - s->free_cluster_index++; | ||
2215 | - } | ||
2216 | - } | 2200 | + return (s->free_cluster_index - nb_clusters) << s->cluster_bits; |
2217 | } | 2201 | } |
2218 | 2202 | ||
2219 | static int64_t alloc_clusters(BlockDriverState *bs, int64_t size) | 2203 | static int64_t alloc_clusters(BlockDriverState *bs, int64_t size) |
@@ -2548,7 +2532,7 @@ static void check_refcounts(BlockDriverState *bs) | @@ -2548,7 +2532,7 @@ static void check_refcounts(BlockDriverState *bs) | ||
2548 | uint16_t *refcount_table; | 2532 | uint16_t *refcount_table; |
2549 | 2533 | ||
2550 | size = bdrv_getlength(s->hd); | 2534 | size = bdrv_getlength(s->hd); |
2551 | - nb_clusters = (size + s->cluster_size - 1) >> s->cluster_bits; | 2535 | + nb_clusters = size_to_clusters(s, size); |
2552 | refcount_table = qemu_mallocz(nb_clusters * sizeof(uint16_t)); | 2536 | refcount_table = qemu_mallocz(nb_clusters * sizeof(uint16_t)); |
2553 | 2537 | ||
2554 | /* header */ | 2538 | /* header */ |
@@ -2600,7 +2584,7 @@ static void dump_refcounts(BlockDriverState *bs) | @@ -2600,7 +2584,7 @@ static void dump_refcounts(BlockDriverState *bs) | ||
2600 | int refcount; | 2584 | int refcount; |
2601 | 2585 | ||
2602 | size = bdrv_getlength(s->hd); | 2586 | size = bdrv_getlength(s->hd); |
2603 | - nb_clusters = (size + s->cluster_size - 1) >> s->cluster_bits; | 2587 | + nb_clusters = size_to_clusters(s, size); |
2604 | for(k = 0; k < nb_clusters;) { | 2588 | for(k = 0; k < nb_clusters;) { |
2605 | k1 = k; | 2589 | k1 = k; |
2606 | refcount = get_refcount(bs, k); | 2590 | refcount = get_refcount(bs, k); |