Commit 45aba42fba7ae2a768606e08cece87a4aed987a6

Authored by Kevin Wolf
Committed by Anthony Liguori
1 parent f7d0fe02

qcow2: Split out guest cluster functions

qcow2-cluster.c contains all functions related to the management of guest
clusters, i.e. what the guest sees on its virtual disk. This code is about
mapping these guest clusters to host clusters in the image file using the
two-level lookup tables.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Makefile
@@ -68,7 +68,7 @@ recurse-all: $(SUBDIR_RULES) @@ -68,7 +68,7 @@ recurse-all: $(SUBDIR_RULES)
68 BLOCK_OBJS=cutils.o cache-utils.o qemu-malloc.o qemu-option.o module.o 68 BLOCK_OBJS=cutils.o cache-utils.o qemu-malloc.o qemu-option.o module.o
69 BLOCK_OBJS+=block/cow.o block/qcow.o aes.o block/vmdk.o block/cloop.o 69 BLOCK_OBJS+=block/cow.o block/qcow.o aes.o block/vmdk.o block/cloop.o
70 BLOCK_OBJS+=block/dmg.o block/bochs.o block/vpc.o block/vvfat.o 70 BLOCK_OBJS+=block/dmg.o block/bochs.o block/vpc.o block/vvfat.o
71 -BLOCK_OBJS+=block/qcow2.o block/qcow2-refcount.o 71 +BLOCK_OBJS+=block/qcow2.o block/qcow2-refcount.o block/qcow2-cluster.o
72 BLOCK_OBJS+=block/parallels.o block/nbd.o 72 BLOCK_OBJS+=block/parallels.o block/nbd.o
73 BLOCK_OBJS+=nbd.o block.o aio.o 73 BLOCK_OBJS+=nbd.o block.o aio.o
74 74
block/qcow2-cluster.c 0 → 100644
  1 +/*
  2 + * Block driver for the QCOW version 2 format
  3 + *
  4 + * Copyright (c) 2004-2006 Fabrice Bellard
  5 + *
  6 + * Permission is hereby granted, free of charge, to any person obtaining a copy
  7 + * of this software and associated documentation files (the "Software"), to deal
  8 + * in the Software without restriction, including without limitation the rights
  9 + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 + * copies of the Software, and to permit persons to whom the Software is
  11 + * furnished to do so, subject to the following conditions:
  12 + *
  13 + * The above copyright notice and this permission notice shall be included in
  14 + * all copies or substantial portions of the Software.
  15 + *
  16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 + * THE SOFTWARE.
  23 + */
  24 +
  25 +#include <zlib.h>
  26 +
  27 +#include "qemu-common.h"
  28 +#include "block_int.h"
  29 +#include "block/qcow2.h"
  30 +
  31 +int grow_l1_table(BlockDriverState *bs, int min_size)
  32 +{
  33 + BDRVQcowState *s = bs->opaque;
  34 + int new_l1_size, new_l1_size2, ret, i;
  35 + uint64_t *new_l1_table;
  36 + uint64_t new_l1_table_offset;
  37 + uint8_t data[12];
  38 +
  39 + new_l1_size = s->l1_size;
  40 + if (min_size <= new_l1_size)
  41 + return 0;
  42 + while (min_size > new_l1_size) {
  43 + new_l1_size = (new_l1_size * 3 + 1) / 2;
  44 + }
  45 +#ifdef DEBUG_ALLOC2
  46 + printf("grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
  47 +#endif
  48 +
  49 + new_l1_size2 = sizeof(uint64_t) * new_l1_size;
  50 + new_l1_table = qemu_mallocz(new_l1_size2);
  51 + memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
  52 +
  53 + /* write new table (align to cluster) */
  54 + new_l1_table_offset = alloc_clusters(bs, new_l1_size2);
  55 +
  56 + for(i = 0; i < s->l1_size; i++)
  57 + new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
  58 + ret = bdrv_pwrite(s->hd, new_l1_table_offset, new_l1_table, new_l1_size2);
  59 + if (ret != new_l1_size2)
  60 + goto fail;
  61 + for(i = 0; i < s->l1_size; i++)
  62 + new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
  63 +
  64 + /* set new table */
  65 + cpu_to_be32w((uint32_t*)data, new_l1_size);
  66 + cpu_to_be64w((uint64_t*)(data + 4), new_l1_table_offset);
  67 + if (bdrv_pwrite(s->hd, offsetof(QCowHeader, l1_size), data,
  68 + sizeof(data)) != sizeof(data))
  69 + goto fail;
  70 + qemu_free(s->l1_table);
  71 + free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
  72 + s->l1_table_offset = new_l1_table_offset;
  73 + s->l1_table = new_l1_table;
  74 + s->l1_size = new_l1_size;
  75 + return 0;
  76 + fail:
  77 + qemu_free(s->l1_table);
  78 + return -EIO;
  79 +}
  80 +
  81 +void l2_cache_reset(BlockDriverState *bs)
  82 +{
  83 + BDRVQcowState *s = bs->opaque;
  84 +
  85 + memset(s->l2_cache, 0, s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
  86 + memset(s->l2_cache_offsets, 0, L2_CACHE_SIZE * sizeof(uint64_t));
  87 + memset(s->l2_cache_counts, 0, L2_CACHE_SIZE * sizeof(uint32_t));
  88 +}
  89 +
  90 +static inline int l2_cache_new_entry(BlockDriverState *bs)
  91 +{
  92 + BDRVQcowState *s = bs->opaque;
  93 + uint32_t min_count;
  94 + int min_index, i;
  95 +
  96 + /* find a new entry in the least used one */
  97 + min_index = 0;
  98 + min_count = 0xffffffff;
  99 + for(i = 0; i < L2_CACHE_SIZE; i++) {
  100 + if (s->l2_cache_counts[i] < min_count) {
  101 + min_count = s->l2_cache_counts[i];
  102 + min_index = i;
  103 + }
  104 + }
  105 + return min_index;
  106 +}
  107 +
  108 +/*
  109 + * seek_l2_table
  110 + *
  111 + * seek l2_offset in the l2_cache table
  112 + * if not found, return NULL,
  113 + * if found,
  114 + * increments the l2 cache hit count of the entry,
  115 + * if counter overflow, divide by two all counters
  116 + * return the pointer to the l2 cache entry
  117 + *
  118 + */
  119 +
  120 +static uint64_t *seek_l2_table(BDRVQcowState *s, uint64_t l2_offset)
  121 +{
  122 + int i, j;
  123 +
  124 + for(i = 0; i < L2_CACHE_SIZE; i++) {
  125 + if (l2_offset == s->l2_cache_offsets[i]) {
  126 + /* increment the hit count */
  127 + if (++s->l2_cache_counts[i] == 0xffffffff) {
  128 + for(j = 0; j < L2_CACHE_SIZE; j++) {
  129 + s->l2_cache_counts[j] >>= 1;
  130 + }
  131 + }
  132 + return s->l2_cache + (i << s->l2_bits);
  133 + }
  134 + }
  135 + return NULL;
  136 +}
  137 +
  138 +/*
  139 + * l2_load
  140 + *
  141 + * Loads a L2 table into memory. If the table is in the cache, the cache
  142 + * is used; otherwise the L2 table is loaded from the image file.
  143 + *
  144 + * Returns a pointer to the L2 table on success, or NULL if the read from
  145 + * the image file failed.
  146 + */
  147 +
  148 +static uint64_t *l2_load(BlockDriverState *bs, uint64_t l2_offset)
  149 +{
  150 + BDRVQcowState *s = bs->opaque;
  151 + int min_index;
  152 + uint64_t *l2_table;
  153 +
  154 + /* seek if the table for the given offset is in the cache */
  155 +
  156 + l2_table = seek_l2_table(s, l2_offset);
  157 + if (l2_table != NULL)
  158 + return l2_table;
  159 +
  160 + /* not found: load a new entry in the least used one */
  161 +
  162 + min_index = l2_cache_new_entry(bs);
  163 + l2_table = s->l2_cache + (min_index << s->l2_bits);
  164 + if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=
  165 + s->l2_size * sizeof(uint64_t))
  166 + return NULL;
  167 + s->l2_cache_offsets[min_index] = l2_offset;
  168 + s->l2_cache_counts[min_index] = 1;
  169 +
  170 + return l2_table;
  171 +}
  172 +
  173 +/*
  174 + * l2_allocate
  175 + *
  176 + * Allocate a new l2 entry in the file. If l1_index points to an already
  177 + * used entry in the L2 table (i.e. we are doing a copy on write for the L2
  178 + * table) copy the contents of the old L2 table into the newly allocated one.
  179 + * Otherwise the new table is initialized with zeros.
  180 + *
  181 + */
  182 +
  183 +static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index)
  184 +{
  185 + BDRVQcowState *s = bs->opaque;
  186 + int min_index;
  187 + uint64_t old_l2_offset, tmp;
  188 + uint64_t *l2_table, l2_offset;
  189 +
  190 + old_l2_offset = s->l1_table[l1_index];
  191 +
  192 + /* allocate a new l2 entry */
  193 +
  194 + l2_offset = alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
  195 +
  196 + /* update the L1 entry */
  197 +
  198 + s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
  199 +
  200 + tmp = cpu_to_be64(l2_offset | QCOW_OFLAG_COPIED);
  201 + if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp),
  202 + &tmp, sizeof(tmp)) != sizeof(tmp))
  203 + return NULL;
  204 +
  205 + /* allocate a new entry in the l2 cache */
  206 +
  207 + min_index = l2_cache_new_entry(bs);
  208 + l2_table = s->l2_cache + (min_index << s->l2_bits);
  209 +
  210 + if (old_l2_offset == 0) {
  211 + /* if there was no old l2 table, clear the new table */
  212 + memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
  213 + } else {
  214 + /* if there was an old l2 table, read it from the disk */
  215 + if (bdrv_pread(s->hd, old_l2_offset,
  216 + l2_table, s->l2_size * sizeof(uint64_t)) !=
  217 + s->l2_size * sizeof(uint64_t))
  218 + return NULL;
  219 + }
  220 + /* write the l2 table to the file */
  221 + if (bdrv_pwrite(s->hd, l2_offset,
  222 + l2_table, s->l2_size * sizeof(uint64_t)) !=
  223 + s->l2_size * sizeof(uint64_t))
  224 + return NULL;
  225 +
  226 + /* update the l2 cache entry */
  227 +
  228 + s->l2_cache_offsets[min_index] = l2_offset;
  229 + s->l2_cache_counts[min_index] = 1;
  230 +
  231 + return l2_table;
  232 +}
  233 +
  234 +static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
  235 + uint64_t *l2_table, uint64_t start, uint64_t mask)
  236 +{
  237 + int i;
  238 + uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
  239 +
  240 + if (!offset)
  241 + return 0;
  242 +
  243 + for (i = start; i < start + nb_clusters; i++)
  244 + if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
  245 + break;
  246 +
  247 + return (i - start);
  248 +}
  249 +
  250 +static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
  251 +{
  252 + int i = 0;
  253 +
  254 + while(nb_clusters-- && l2_table[i] == 0)
  255 + i++;
  256 +
  257 + return i;
  258 +}
  259 +
  260 +/* The crypt function is compatible with the linux cryptoloop
  261 + algorithm for < 4 GB images. NOTE: out_buf == in_buf is
  262 + supported */
  263 +void encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
  264 + uint8_t *out_buf, const uint8_t *in_buf,
  265 + int nb_sectors, int enc,
  266 + const AES_KEY *key)
  267 +{
  268 + union {
  269 + uint64_t ll[2];
  270 + uint8_t b[16];
  271 + } ivec;
  272 + int i;
  273 +
  274 + for(i = 0; i < nb_sectors; i++) {
  275 + ivec.ll[0] = cpu_to_le64(sector_num);
  276 + ivec.ll[1] = 0;
  277 + AES_cbc_encrypt(in_buf, out_buf, 512, key,
  278 + ivec.b, enc);
  279 + sector_num++;
  280 + in_buf += 512;
  281 + out_buf += 512;
  282 + }
  283 +}
  284 +
  285 +
  286 +static int qcow_read(BlockDriverState *bs, int64_t sector_num,
  287 + uint8_t *buf, int nb_sectors)
  288 +{
  289 + BDRVQcowState *s = bs->opaque;
  290 + int ret, index_in_cluster, n, n1;
  291 + uint64_t cluster_offset;
  292 +
  293 + while (nb_sectors > 0) {
  294 + n = nb_sectors;
  295 + cluster_offset = get_cluster_offset(bs, sector_num << 9, &n);
  296 + index_in_cluster = sector_num & (s->cluster_sectors - 1);
  297 + if (!cluster_offset) {
  298 + if (bs->backing_hd) {
  299 + /* read from the base image */
  300 + n1 = backing_read1(bs->backing_hd, sector_num, buf, n);
  301 + if (n1 > 0) {
  302 + ret = bdrv_read(bs->backing_hd, sector_num, buf, n1);
  303 + if (ret < 0)
  304 + return -1;
  305 + }
  306 + } else {
  307 + memset(buf, 0, 512 * n);
  308 + }
  309 + } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
  310 + if (decompress_cluster(s, cluster_offset) < 0)
  311 + return -1;
  312 + memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);
  313 + } else {
  314 + ret = bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);
  315 + if (ret != n * 512)
  316 + return -1;
  317 + if (s->crypt_method) {
  318 + encrypt_sectors(s, sector_num, buf, buf, n, 0,
  319 + &s->aes_decrypt_key);
  320 + }
  321 + }
  322 + nb_sectors -= n;
  323 + sector_num += n;
  324 + buf += n * 512;
  325 + }
  326 + return 0;
  327 +}
  328 +
  329 +static int copy_sectors(BlockDriverState *bs, uint64_t start_sect,
  330 + uint64_t cluster_offset, int n_start, int n_end)
  331 +{
  332 + BDRVQcowState *s = bs->opaque;
  333 + int n, ret;
  334 +
  335 + n = n_end - n_start;
  336 + if (n <= 0)
  337 + return 0;
  338 + ret = qcow_read(bs, start_sect + n_start, s->cluster_data, n);
  339 + if (ret < 0)
  340 + return ret;
  341 + if (s->crypt_method) {
  342 + encrypt_sectors(s, start_sect + n_start,
  343 + s->cluster_data,
  344 + s->cluster_data, n, 1,
  345 + &s->aes_encrypt_key);
  346 + }
  347 + ret = bdrv_write(s->hd, (cluster_offset >> 9) + n_start,
  348 + s->cluster_data, n);
  349 + if (ret < 0)
  350 + return ret;
  351 + return 0;
  352 +}
  353 +
  354 +
  355 +/*
  356 + * get_cluster_offset
  357 + *
  358 + * For a given offset of the disk image, return cluster offset in
  359 + * qcow2 file.
  360 + *
  361 + * on entry, *num is the number of contiguous clusters we'd like to
  362 + * access following offset.
  363 + *
  364 + * on exit, *num is the number of contiguous clusters we can read.
  365 + *
  366 + * Return 1, if the offset is found
  367 + * Return 0, otherwise.
  368 + *
  369 + */
  370 +
  371 +uint64_t get_cluster_offset(BlockDriverState *bs, uint64_t offset, int *num)
  372 +{
  373 + BDRVQcowState *s = bs->opaque;
  374 + int l1_index, l2_index;
  375 + uint64_t l2_offset, *l2_table, cluster_offset;
  376 + int l1_bits, c;
  377 + int index_in_cluster, nb_available, nb_needed, nb_clusters;
  378 +
  379 + index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
  380 + nb_needed = *num + index_in_cluster;
  381 +
  382 + l1_bits = s->l2_bits + s->cluster_bits;
  383 +
  384 + /* compute how many bytes there are between the offset and
  385 + * the end of the l1 entry
  386 + */
  387 +
  388 + nb_available = (1 << l1_bits) - (offset & ((1 << l1_bits) - 1));
  389 +
  390 + /* compute the number of available sectors */
  391 +
  392 + nb_available = (nb_available >> 9) + index_in_cluster;
  393 +
  394 + if (nb_needed > nb_available) {
  395 + nb_needed = nb_available;
  396 + }
  397 +
  398 + cluster_offset = 0;
  399 +
  400 + /* seek the the l2 offset in the l1 table */
  401 +
  402 + l1_index = offset >> l1_bits;
  403 + if (l1_index >= s->l1_size)
  404 + goto out;
  405 +
  406 + l2_offset = s->l1_table[l1_index];
  407 +
  408 + /* seek the l2 table of the given l2 offset */
  409 +
  410 + if (!l2_offset)
  411 + goto out;
  412 +
  413 + /* load the l2 table in memory */
  414 +
  415 + l2_offset &= ~QCOW_OFLAG_COPIED;
  416 + l2_table = l2_load(bs, l2_offset);
  417 + if (l2_table == NULL)
  418 + return 0;
  419 +
  420 + /* find the cluster offset for the given disk offset */
  421 +
  422 + l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
  423 + cluster_offset = be64_to_cpu(l2_table[l2_index]);
  424 + nb_clusters = size_to_clusters(s, nb_needed << 9);
  425 +
  426 + if (!cluster_offset) {
  427 + /* how many empty clusters ? */
  428 + c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
  429 + } else {
  430 + /* how many allocated clusters ? */
  431 + c = count_contiguous_clusters(nb_clusters, s->cluster_size,
  432 + &l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
  433 + }
  434 +
  435 + nb_available = (c * s->cluster_sectors);
  436 +out:
  437 + if (nb_available > nb_needed)
  438 + nb_available = nb_needed;
  439 +
  440 + *num = nb_available - index_in_cluster;
  441 +
  442 + return cluster_offset & ~QCOW_OFLAG_COPIED;
  443 +}
  444 +
  445 +/*
  446 + * get_cluster_table
  447 + *
  448 + * for a given disk offset, load (and allocate if needed)
  449 + * the l2 table.
  450 + *
  451 + * the l2 table offset in the qcow2 file and the cluster index
  452 + * in the l2 table are given to the caller.
  453 + *
  454 + */
  455 +
  456 +static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
  457 + uint64_t **new_l2_table,
  458 + uint64_t *new_l2_offset,
  459 + int *new_l2_index)
  460 +{
  461 + BDRVQcowState *s = bs->opaque;
  462 + int l1_index, l2_index, ret;
  463 + uint64_t l2_offset, *l2_table;
  464 +
  465 + /* seek the the l2 offset in the l1 table */
  466 +
  467 + l1_index = offset >> (s->l2_bits + s->cluster_bits);
  468 + if (l1_index >= s->l1_size) {
  469 + ret = grow_l1_table(bs, l1_index + 1);
  470 + if (ret < 0)
  471 + return 0;
  472 + }
  473 + l2_offset = s->l1_table[l1_index];
  474 +
  475 + /* seek the l2 table of the given l2 offset */
  476 +
  477 + if (l2_offset & QCOW_OFLAG_COPIED) {
  478 + /* load the l2 table in memory */
  479 + l2_offset &= ~QCOW_OFLAG_COPIED;
  480 + l2_table = l2_load(bs, l2_offset);
  481 + if (l2_table == NULL)
  482 + return 0;
  483 + } else {
  484 + if (l2_offset)
  485 + free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
  486 + l2_table = l2_allocate(bs, l1_index);
  487 + if (l2_table == NULL)
  488 + return 0;
  489 + l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
  490 + }
  491 +
  492 + /* find the cluster offset for the given disk offset */
  493 +
  494 + l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
  495 +
  496 + *new_l2_table = l2_table;
  497 + *new_l2_offset = l2_offset;
  498 + *new_l2_index = l2_index;
  499 +
  500 + return 1;
  501 +}
  502 +
  503 +/*
  504 + * alloc_compressed_cluster_offset
  505 + *
  506 + * For a given offset of the disk image, return cluster offset in
  507 + * qcow2 file.
  508 + *
  509 + * If the offset is not found, allocate a new compressed cluster.
  510 + *
  511 + * Return the cluster offset if successful,
  512 + * Return 0, otherwise.
  513 + *
  514 + */
  515 +
  516 +uint64_t alloc_compressed_cluster_offset(BlockDriverState *bs,
  517 + uint64_t offset,
  518 + int compressed_size)
  519 +{
  520 + BDRVQcowState *s = bs->opaque;
  521 + int l2_index, ret;
  522 + uint64_t l2_offset, *l2_table, cluster_offset;
  523 + int nb_csectors;
  524 +
  525 + ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
  526 + if (ret == 0)
  527 + return 0;
  528 +
  529 + cluster_offset = be64_to_cpu(l2_table[l2_index]);
  530 + if (cluster_offset & QCOW_OFLAG_COPIED)
  531 + return cluster_offset & ~QCOW_OFLAG_COPIED;
  532 +
  533 + if (cluster_offset)
  534 + free_any_clusters(bs, cluster_offset, 1);
  535 +
  536 + cluster_offset = alloc_bytes(bs, compressed_size);
  537 + nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
  538 + (cluster_offset >> 9);
  539 +
  540 + cluster_offset |= QCOW_OFLAG_COMPRESSED |
  541 + ((uint64_t)nb_csectors << s->csize_shift);
  542 +
  543 + /* update L2 table */
  544 +
  545 + /* compressed clusters never have the copied flag */
  546 +
  547 + l2_table[l2_index] = cpu_to_be64(cluster_offset);
  548 + if (bdrv_pwrite(s->hd,
  549 + l2_offset + l2_index * sizeof(uint64_t),
  550 + l2_table + l2_index,
  551 + sizeof(uint64_t)) != sizeof(uint64_t))
  552 + return 0;
  553 +
  554 + return cluster_offset;
  555 +}
  556 +
  557 +int alloc_cluster_link_l2(BlockDriverState *bs, uint64_t cluster_offset,
  558 + QCowL2Meta *m)
  559 +{
  560 + BDRVQcowState *s = bs->opaque;
  561 + int i, j = 0, l2_index, ret;
  562 + uint64_t *old_cluster, start_sect, l2_offset, *l2_table;
  563 +
  564 + if (m->nb_clusters == 0)
  565 + return 0;
  566 +
  567 + old_cluster = qemu_malloc(m->nb_clusters * sizeof(uint64_t));
  568 +
  569 + /* copy content of unmodified sectors */
  570 + start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;
  571 + if (m->n_start) {
  572 + ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
  573 + if (ret < 0)
  574 + goto err;
  575 + }
  576 +
  577 + if (m->nb_available & (s->cluster_sectors - 1)) {
  578 + uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
  579 + ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
  580 + m->nb_available - end, s->cluster_sectors);
  581 + if (ret < 0)
  582 + goto err;
  583 + }
  584 +
  585 + ret = -EIO;
  586 + /* update L2 table */
  587 + if (!get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index))
  588 + goto err;
  589 +
  590 + for (i = 0; i < m->nb_clusters; i++) {
  591 + /* if two concurrent writes happen to the same unallocated cluster
  592 + * each write allocates separate cluster and writes data concurrently.
  593 + * The first one to complete updates l2 table with pointer to its
  594 + * cluster the second one has to do RMW (which is done above by
  595 + * copy_sectors()), update l2 table with its cluster pointer and free
  596 + * old cluster. This is what this loop does */
  597 + if(l2_table[l2_index + i] != 0)
  598 + old_cluster[j++] = l2_table[l2_index + i];
  599 +
  600 + l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
  601 + (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
  602 + }
  603 +
  604 + if (bdrv_pwrite(s->hd, l2_offset + l2_index * sizeof(uint64_t),
  605 + l2_table + l2_index, m->nb_clusters * sizeof(uint64_t)) !=
  606 + m->nb_clusters * sizeof(uint64_t))
  607 + goto err;
  608 +
  609 + for (i = 0; i < j; i++)
  610 + free_any_clusters(bs, be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED,
  611 + 1);
  612 +
  613 + ret = 0;
  614 +err:
  615 + qemu_free(old_cluster);
  616 + return ret;
  617 + }
  618 +
  619 +/*
  620 + * alloc_cluster_offset
  621 + *
  622 + * For a given offset of the disk image, return cluster offset in
  623 + * qcow2 file.
  624 + *
  625 + * If the offset is not found, allocate a new cluster.
  626 + *
  627 + * Return the cluster offset if successful,
  628 + * Return 0, otherwise.
  629 + *
  630 + */
  631 +
  632 +uint64_t alloc_cluster_offset(BlockDriverState *bs,
  633 + uint64_t offset,
  634 + int n_start, int n_end,
  635 + int *num, QCowL2Meta *m)
  636 +{
  637 + BDRVQcowState *s = bs->opaque;
  638 + int l2_index, ret;
  639 + uint64_t l2_offset, *l2_table, cluster_offset;
  640 + int nb_clusters, i = 0;
  641 +
  642 + ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
  643 + if (ret == 0)
  644 + return 0;
  645 +
  646 + nb_clusters = size_to_clusters(s, n_end << 9);
  647 +
  648 + nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
  649 +
  650 + cluster_offset = be64_to_cpu(l2_table[l2_index]);
  651 +
  652 + /* We keep all QCOW_OFLAG_COPIED clusters */
  653 +
  654 + if (cluster_offset & QCOW_OFLAG_COPIED) {
  655 + nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
  656 + &l2_table[l2_index], 0, 0);
  657 +
  658 + cluster_offset &= ~QCOW_OFLAG_COPIED;
  659 + m->nb_clusters = 0;
  660 +
  661 + goto out;
  662 + }
  663 +
  664 + /* for the moment, multiple compressed clusters are not managed */
  665 +
  666 + if (cluster_offset & QCOW_OFLAG_COMPRESSED)
  667 + nb_clusters = 1;
  668 +
  669 + /* how many available clusters ? */
  670 +
  671 + while (i < nb_clusters) {
  672 + i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
  673 + &l2_table[l2_index], i, 0);
  674 +
  675 + if(be64_to_cpu(l2_table[l2_index + i]))
  676 + break;
  677 +
  678 + i += count_contiguous_free_clusters(nb_clusters - i,
  679 + &l2_table[l2_index + i]);
  680 +
  681 + cluster_offset = be64_to_cpu(l2_table[l2_index + i]);
  682 +
  683 + if ((cluster_offset & QCOW_OFLAG_COPIED) ||
  684 + (cluster_offset & QCOW_OFLAG_COMPRESSED))
  685 + break;
  686 + }
  687 + nb_clusters = i;
  688 +
  689 + /* allocate a new cluster */
  690 +
  691 + cluster_offset = alloc_clusters(bs, nb_clusters * s->cluster_size);
  692 +
  693 + /* save info needed for meta data update */
  694 + m->offset = offset;
  695 + m->n_start = n_start;
  696 + m->nb_clusters = nb_clusters;
  697 +
  698 +out:
  699 + m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
  700 +
  701 + *num = m->nb_available - n_start;
  702 +
  703 + return cluster_offset;
  704 +}
  705 +
  706 +static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
  707 + const uint8_t *buf, int buf_size)
  708 +{
  709 + z_stream strm1, *strm = &strm1;
  710 + int ret, out_len;
  711 +
  712 + memset(strm, 0, sizeof(*strm));
  713 +
  714 + strm->next_in = (uint8_t *)buf;
  715 + strm->avail_in = buf_size;
  716 + strm->next_out = out_buf;
  717 + strm->avail_out = out_buf_size;
  718 +
  719 + ret = inflateInit2(strm, -12);
  720 + if (ret != Z_OK)
  721 + return -1;
  722 + ret = inflate(strm, Z_FINISH);
  723 + out_len = strm->next_out - out_buf;
  724 + if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
  725 + out_len != out_buf_size) {
  726 + inflateEnd(strm);
  727 + return -1;
  728 + }
  729 + inflateEnd(strm);
  730 + return 0;
  731 +}
  732 +
  733 +int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset)
  734 +{
  735 + int ret, csize, nb_csectors, sector_offset;
  736 + uint64_t coffset;
  737 +
  738 + coffset = cluster_offset & s->cluster_offset_mask;
  739 + if (s->cluster_cache_offset != coffset) {
  740 + nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
  741 + sector_offset = coffset & 511;
  742 + csize = nb_csectors * 512 - sector_offset;
  743 + ret = bdrv_read(s->hd, coffset >> 9, s->cluster_data, nb_csectors);
  744 + if (ret < 0) {
  745 + return -1;
  746 + }
  747 + if (decompress_buffer(s->cluster_cache, s->cluster_size,
  748 + s->cluster_data + sector_offset, csize) < 0) {
  749 + return -1;
  750 + }
  751 + s->cluster_cache_offset = coffset;
  752 + }
  753 + return 0;
  754 +}
block/qcow2-refcount.c
@@ -389,6 +389,34 @@ void free_clusters(BlockDriverState *bs, @@ -389,6 +389,34 @@ void free_clusters(BlockDriverState *bs,
389 update_refcount(bs, offset, size, -1); 389 update_refcount(bs, offset, size, -1);
390 } 390 }
391 391
  392 +/*
  393 + * free_any_clusters
  394 + *
  395 + * free clusters according to its type: compressed or not
  396 + *
  397 + */
  398 +
  399 +void free_any_clusters(BlockDriverState *bs,
  400 + uint64_t cluster_offset, int nb_clusters)
  401 +{
  402 + BDRVQcowState *s = bs->opaque;
  403 +
  404 + /* free the cluster */
  405 +
  406 + if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
  407 + int nb_csectors;
  408 + nb_csectors = ((cluster_offset >> s->csize_shift) &
  409 + s->csize_mask) + 1;
  410 + free_clusters(bs, (cluster_offset & s->cluster_offset_mask) & ~511,
  411 + nb_csectors * 512);
  412 + return;
  413 + }
  414 +
  415 + free_clusters(bs, cluster_offset, nb_clusters << s->cluster_bits);
  416 +
  417 + return;
  418 +}
  419 +
392 420
393 421
394 /*********************************************************/ 422 /*********************************************************/
block/qcow2.c
@@ -78,9 +78,6 @@ typedef struct __attribute__((packed)) QCowSnapshotHeader { @@ -78,9 +78,6 @@ typedef struct __attribute__((packed)) QCowSnapshotHeader {
78 } QCowSnapshotHeader; 78 } QCowSnapshotHeader;
79 79
80 80
81 -static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset);  
82 -static int qcow_read(BlockDriverState *bs, int64_t sector_num,  
83 - uint8_t *buf, int nb_sectors);  
84 static int qcow_read_snapshots(BlockDriverState *bs); 81 static int qcow_read_snapshots(BlockDriverState *bs);
85 static void qcow_free_snapshots(BlockDriverState *bs); 82 static void qcow_free_snapshots(BlockDriverState *bs);
86 83
@@ -334,679 +331,12 @@ static int qcow_set_key(BlockDriverState *bs, const char *key) @@ -334,679 +331,12 @@ static int qcow_set_key(BlockDriverState *bs, const char *key)
334 return 0; 331 return 0;
335 } 332 }
336 333
337 -/* The crypt function is compatible with the linux cryptoloop  
338 - algorithm for < 4 GB images. NOTE: out_buf == in_buf is  
339 - supported */  
340 -static void encrypt_sectors(BDRVQcowState *s, int64_t sector_num,  
341 - uint8_t *out_buf, const uint8_t *in_buf,  
342 - int nb_sectors, int enc,  
343 - const AES_KEY *key)  
344 -{  
345 - union {  
346 - uint64_t ll[2];  
347 - uint8_t b[16];  
348 - } ivec;  
349 - int i;  
350 -  
351 - for(i = 0; i < nb_sectors; i++) {  
352 - ivec.ll[0] = cpu_to_le64(sector_num);  
353 - ivec.ll[1] = 0;  
354 - AES_cbc_encrypt(in_buf, out_buf, 512, key,  
355 - ivec.b, enc);  
356 - sector_num++;  
357 - in_buf += 512;  
358 - out_buf += 512;  
359 - }  
360 -}  
361 -  
362 -static int copy_sectors(BlockDriverState *bs, uint64_t start_sect,  
363 - uint64_t cluster_offset, int n_start, int n_end)  
364 -{  
365 - BDRVQcowState *s = bs->opaque;  
366 - int n, ret;  
367 -  
368 - n = n_end - n_start;  
369 - if (n <= 0)  
370 - return 0;  
371 - ret = qcow_read(bs, start_sect + n_start, s->cluster_data, n);  
372 - if (ret < 0)  
373 - return ret;  
374 - if (s->crypt_method) {  
375 - encrypt_sectors(s, start_sect + n_start,  
376 - s->cluster_data,  
377 - s->cluster_data, n, 1,  
378 - &s->aes_encrypt_key);  
379 - }  
380 - ret = bdrv_write(s->hd, (cluster_offset >> 9) + n_start,  
381 - s->cluster_data, n);  
382 - if (ret < 0)  
383 - return ret;  
384 - return 0;  
385 -}  
386 -  
387 -void l2_cache_reset(BlockDriverState *bs)  
388 -{  
389 - BDRVQcowState *s = bs->opaque;  
390 -  
391 - memset(s->l2_cache, 0, s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));  
392 - memset(s->l2_cache_offsets, 0, L2_CACHE_SIZE * sizeof(uint64_t));  
393 - memset(s->l2_cache_counts, 0, L2_CACHE_SIZE * sizeof(uint32_t));  
394 -}  
395 -  
396 -static inline int l2_cache_new_entry(BlockDriverState *bs)  
397 -{  
398 - BDRVQcowState *s = bs->opaque;  
399 - uint32_t min_count;  
400 - int min_index, i;  
401 -  
402 - /* find a new entry in the least used one */  
403 - min_index = 0;  
404 - min_count = 0xffffffff;  
405 - for(i = 0; i < L2_CACHE_SIZE; i++) {  
406 - if (s->l2_cache_counts[i] < min_count) {  
407 - min_count = s->l2_cache_counts[i];  
408 - min_index = i;  
409 - }  
410 - }  
411 - return min_index;  
412 -}  
413 -  
414 static int64_t align_offset(int64_t offset, int n) 334 static int64_t align_offset(int64_t offset, int n)
415 { 335 {
416 offset = (offset + n - 1) & ~(n - 1); 336 offset = (offset + n - 1) & ~(n - 1);
417 return offset; 337 return offset;
418 } 338 }
419 339
420 -static int grow_l1_table(BlockDriverState *bs, int min_size)  
421 -{  
422 - BDRVQcowState *s = bs->opaque;  
423 - int new_l1_size, new_l1_size2, ret, i;  
424 - uint64_t *new_l1_table;  
425 - uint64_t new_l1_table_offset;  
426 - uint8_t data[12];  
427 -  
428 - new_l1_size = s->l1_size;  
429 - if (min_size <= new_l1_size)  
430 - return 0;  
431 - while (min_size > new_l1_size) {  
432 - new_l1_size = (new_l1_size * 3 + 1) / 2;  
433 - }  
434 -#ifdef DEBUG_ALLOC2  
435 - printf("grow l1_table from %d to %d\n", s->l1_size, new_l1_size);  
436 -#endif  
437 -  
438 - new_l1_size2 = sizeof(uint64_t) * new_l1_size;  
439 - new_l1_table = qemu_mallocz(new_l1_size2);  
440 - memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));  
441 -  
442 - /* write new table (align to cluster) */  
443 - new_l1_table_offset = alloc_clusters(bs, new_l1_size2);  
444 -  
445 - for(i = 0; i < s->l1_size; i++)  
446 - new_l1_table[i] = cpu_to_be64(new_l1_table[i]);  
447 - ret = bdrv_pwrite(s->hd, new_l1_table_offset, new_l1_table, new_l1_size2);  
448 - if (ret != new_l1_size2)  
449 - goto fail;  
450 - for(i = 0; i < s->l1_size; i++)  
451 - new_l1_table[i] = be64_to_cpu(new_l1_table[i]);  
452 -  
453 - /* set new table */  
454 - cpu_to_be32w((uint32_t*)data, new_l1_size);  
455 - cpu_to_be64w((uint64_t*)(data + 4), new_l1_table_offset);  
456 - if (bdrv_pwrite(s->hd, offsetof(QCowHeader, l1_size), data,  
457 - sizeof(data)) != sizeof(data))  
458 - goto fail;  
459 - qemu_free(s->l1_table);  
460 - free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));  
461 - s->l1_table_offset = new_l1_table_offset;  
462 - s->l1_table = new_l1_table;  
463 - s->l1_size = new_l1_size;  
464 - return 0;  
465 - fail:  
466 - qemu_free(s->l1_table);  
467 - return -EIO;  
468 -}  
469 -  
470 -/*  
471 - * seek_l2_table  
472 - *  
473 - * seek l2_offset in the l2_cache table  
474 - * if not found, return NULL,  
475 - * if found,  
476 - * increments the l2 cache hit count of the entry,  
477 - * if counter overflow, divide by two all counters  
478 - * return the pointer to the l2 cache entry  
479 - *  
480 - */  
481 -  
482 -static uint64_t *seek_l2_table(BDRVQcowState *s, uint64_t l2_offset)  
483 -{  
484 - int i, j;  
485 -  
486 - for(i = 0; i < L2_CACHE_SIZE; i++) {  
487 - if (l2_offset == s->l2_cache_offsets[i]) {  
488 - /* increment the hit count */  
489 - if (++s->l2_cache_counts[i] == 0xffffffff) {  
490 - for(j = 0; j < L2_CACHE_SIZE; j++) {  
491 - s->l2_cache_counts[j] >>= 1;  
492 - }  
493 - }  
494 - return s->l2_cache + (i << s->l2_bits);  
495 - }  
496 - }  
497 - return NULL;  
498 -}  
499 -  
500 -/*  
501 - * l2_load  
502 - *  
503 - * Loads a L2 table into memory. If the table is in the cache, the cache  
504 - * is used; otherwise the L2 table is loaded from the image file.  
505 - *  
506 - * Returns a pointer to the L2 table on success, or NULL if the read from  
507 - * the image file failed.  
508 - */  
509 -  
510 -static uint64_t *l2_load(BlockDriverState *bs, uint64_t l2_offset)  
511 -{  
512 - BDRVQcowState *s = bs->opaque;  
513 - int min_index;  
514 - uint64_t *l2_table;  
515 -  
516 - /* seek if the table for the given offset is in the cache */  
517 -  
518 - l2_table = seek_l2_table(s, l2_offset);  
519 - if (l2_table != NULL)  
520 - return l2_table;  
521 -  
522 - /* not found: load a new entry in the least used one */  
523 -  
524 - min_index = l2_cache_new_entry(bs);  
525 - l2_table = s->l2_cache + (min_index << s->l2_bits);  
526 - if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=  
527 - s->l2_size * sizeof(uint64_t))  
528 - return NULL;  
529 - s->l2_cache_offsets[min_index] = l2_offset;  
530 - s->l2_cache_counts[min_index] = 1;  
531 -  
532 - return l2_table;  
533 -}  
534 -  
535 -/*  
536 - * l2_allocate  
537 - *  
538 - * Allocate a new l2 entry in the file. If l1_index points to an already  
539 - * used entry in the L2 table (i.e. we are doing a copy on write for the L2  
540 - * table) copy the contents of the old L2 table into the newly allocated one.  
541 - * Otherwise the new table is initialized with zeros.  
542 - *  
543 - */  
544 -  
545 -static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index)  
546 -{  
547 - BDRVQcowState *s = bs->opaque;  
548 - int min_index;  
549 - uint64_t old_l2_offset, tmp;  
550 - uint64_t *l2_table, l2_offset;  
551 -  
552 - old_l2_offset = s->l1_table[l1_index];  
553 -  
554 - /* allocate a new l2 entry */  
555 -  
556 - l2_offset = alloc_clusters(bs, s->l2_size * sizeof(uint64_t));  
557 -  
558 - /* update the L1 entry */  
559 -  
560 - s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;  
561 -  
562 - tmp = cpu_to_be64(l2_offset | QCOW_OFLAG_COPIED);  
563 - if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp),  
564 - &tmp, sizeof(tmp)) != sizeof(tmp))  
565 - return NULL;  
566 -  
567 - /* allocate a new entry in the l2 cache */  
568 -  
569 - min_index = l2_cache_new_entry(bs);  
570 - l2_table = s->l2_cache + (min_index << s->l2_bits);  
571 -  
572 - if (old_l2_offset == 0) {  
573 - /* if there was no old l2 table, clear the new table */  
574 - memset(l2_table, 0, s->l2_size * sizeof(uint64_t));  
575 - } else {  
576 - /* if there was an old l2 table, read it from the disk */  
577 - if (bdrv_pread(s->hd, old_l2_offset,  
578 - l2_table, s->l2_size * sizeof(uint64_t)) !=  
579 - s->l2_size * sizeof(uint64_t))  
580 - return NULL;  
581 - }  
582 - /* write the l2 table to the file */  
583 - if (bdrv_pwrite(s->hd, l2_offset,  
584 - l2_table, s->l2_size * sizeof(uint64_t)) !=  
585 - s->l2_size * sizeof(uint64_t))  
586 - return NULL;  
587 -  
588 - /* update the l2 cache entry */  
589 -  
590 - s->l2_cache_offsets[min_index] = l2_offset;  
591 - s->l2_cache_counts[min_index] = 1;  
592 -  
593 - return l2_table;  
594 -}  
595 -  
596 -static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,  
597 - uint64_t *l2_table, uint64_t start, uint64_t mask)  
598 -{  
599 - int i;  
600 - uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;  
601 -  
602 - if (!offset)  
603 - return 0;  
604 -  
605 - for (i = start; i < start + nb_clusters; i++)  
606 - if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))  
607 - break;  
608 -  
609 - return (i - start);  
610 -}  
611 -  
612 -static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)  
613 -{  
614 - int i = 0;  
615 -  
616 - while(nb_clusters-- && l2_table[i] == 0)  
617 - i++;  
618 -  
619 - return i;  
620 -}  
621 -  
622 -/*  
623 - * get_cluster_offset  
624 - *  
625 - * For a given offset of the disk image, return cluster offset in  
626 - * qcow2 file.  
627 - *  
628 - * on entry, *num is the number of contiguous clusters we'd like to  
629 - * access following offset.  
630 - *  
631 - * on exit, *num is the number of contiguous clusters we can read.  
632 - *  
633 - * Return 1, if the offset is found  
634 - * Return 0, otherwise.  
635 - *  
636 - */  
637 -  
638 -static uint64_t get_cluster_offset(BlockDriverState *bs,  
639 - uint64_t offset, int *num)  
640 -{  
641 - BDRVQcowState *s = bs->opaque;  
642 - int l1_index, l2_index;  
643 - uint64_t l2_offset, *l2_table, cluster_offset;  
644 - int l1_bits, c;  
645 - int index_in_cluster, nb_available, nb_needed, nb_clusters;  
646 -  
647 - index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);  
648 - nb_needed = *num + index_in_cluster;  
649 -  
650 - l1_bits = s->l2_bits + s->cluster_bits;  
651 -  
652 - /* compute how many bytes there are between the offset and  
653 - * the end of the l1 entry  
654 - */  
655 -  
656 - nb_available = (1 << l1_bits) - (offset & ((1 << l1_bits) - 1));  
657 -  
658 - /* compute the number of available sectors */  
659 -  
660 - nb_available = (nb_available >> 9) + index_in_cluster;  
661 -  
662 - if (nb_needed > nb_available) {  
663 - nb_needed = nb_available;  
664 - }  
665 -  
666 - cluster_offset = 0;  
667 -  
668 - /* seek the the l2 offset in the l1 table */  
669 -  
670 - l1_index = offset >> l1_bits;  
671 - if (l1_index >= s->l1_size)  
672 - goto out;  
673 -  
674 - l2_offset = s->l1_table[l1_index];  
675 -  
676 - /* seek the l2 table of the given l2 offset */  
677 -  
678 - if (!l2_offset)  
679 - goto out;  
680 -  
681 - /* load the l2 table in memory */  
682 -  
683 - l2_offset &= ~QCOW_OFLAG_COPIED;  
684 - l2_table = l2_load(bs, l2_offset);  
685 - if (l2_table == NULL)  
686 - return 0;  
687 -  
688 - /* find the cluster offset for the given disk offset */  
689 -  
690 - l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);  
691 - cluster_offset = be64_to_cpu(l2_table[l2_index]);  
692 - nb_clusters = size_to_clusters(s, nb_needed << 9);  
693 -  
694 - if (!cluster_offset) {  
695 - /* how many empty clusters ? */  
696 - c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);  
697 - } else {  
698 - /* how many allocated clusters ? */  
699 - c = count_contiguous_clusters(nb_clusters, s->cluster_size,  
700 - &l2_table[l2_index], 0, QCOW_OFLAG_COPIED);  
701 - }  
702 -  
703 - nb_available = (c * s->cluster_sectors);  
704 -out:  
705 - if (nb_available > nb_needed)  
706 - nb_available = nb_needed;  
707 -  
708 - *num = nb_available - index_in_cluster;  
709 -  
710 - return cluster_offset & ~QCOW_OFLAG_COPIED;  
711 -}  
712 -  
713 -/*  
714 - * free_any_clusters  
715 - *  
716 - * free clusters according to its type: compressed or not  
717 - *  
718 - */  
719 -  
720 -static void free_any_clusters(BlockDriverState *bs,  
721 - uint64_t cluster_offset, int nb_clusters)  
722 -{  
723 - BDRVQcowState *s = bs->opaque;  
724 -  
725 - /* free the cluster */  
726 -  
727 - if (cluster_offset & QCOW_OFLAG_COMPRESSED) {  
728 - int nb_csectors;  
729 - nb_csectors = ((cluster_offset >> s->csize_shift) &  
730 - s->csize_mask) + 1;  
731 - free_clusters(bs, (cluster_offset & s->cluster_offset_mask) & ~511,  
732 - nb_csectors * 512);  
733 - return;  
734 - }  
735 -  
736 - free_clusters(bs, cluster_offset, nb_clusters << s->cluster_bits);  
737 -  
738 - return;  
739 -}  
740 -  
741 -/*  
742 - * get_cluster_table  
743 - *  
744 - * for a given disk offset, load (and allocate if needed)  
745 - * the l2 table.  
746 - *  
747 - * the l2 table offset in the qcow2 file and the cluster index  
748 - * in the l2 table are given to the caller.  
749 - *  
750 - */  
751 -  
752 -static int get_cluster_table(BlockDriverState *bs, uint64_t offset,  
753 - uint64_t **new_l2_table,  
754 - uint64_t *new_l2_offset,  
755 - int *new_l2_index)  
756 -{  
757 - BDRVQcowState *s = bs->opaque;  
758 - int l1_index, l2_index, ret;  
759 - uint64_t l2_offset, *l2_table;  
760 -  
761 - /* seek the the l2 offset in the l1 table */  
762 -  
763 - l1_index = offset >> (s->l2_bits + s->cluster_bits);  
764 - if (l1_index >= s->l1_size) {  
765 - ret = grow_l1_table(bs, l1_index + 1);  
766 - if (ret < 0)  
767 - return 0;  
768 - }  
769 - l2_offset = s->l1_table[l1_index];  
770 -  
771 - /* seek the l2 table of the given l2 offset */  
772 -  
773 - if (l2_offset & QCOW_OFLAG_COPIED) {  
774 - /* load the l2 table in memory */  
775 - l2_offset &= ~QCOW_OFLAG_COPIED;  
776 - l2_table = l2_load(bs, l2_offset);  
777 - if (l2_table == NULL)  
778 - return 0;  
779 - } else {  
780 - if (l2_offset)  
781 - free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));  
782 - l2_table = l2_allocate(bs, l1_index);  
783 - if (l2_table == NULL)  
784 - return 0;  
785 - l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;  
786 - }  
787 -  
788 - /* find the cluster offset for the given disk offset */  
789 -  
790 - l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);  
791 -  
792 - *new_l2_table = l2_table;  
793 - *new_l2_offset = l2_offset;  
794 - *new_l2_index = l2_index;  
795 -  
796 - return 1;  
797 -}  
798 -  
799 -/*  
800 - * alloc_compressed_cluster_offset  
801 - *  
802 - * For a given offset of the disk image, return cluster offset in  
803 - * qcow2 file.  
804 - *  
805 - * If the offset is not found, allocate a new compressed cluster.  
806 - *  
807 - * Return the cluster offset if successful,  
808 - * Return 0, otherwise.  
809 - *  
810 - */  
811 -  
812 -static uint64_t alloc_compressed_cluster_offset(BlockDriverState *bs,  
813 - uint64_t offset,  
814 - int compressed_size)  
815 -{  
816 - BDRVQcowState *s = bs->opaque;  
817 - int l2_index, ret;  
818 - uint64_t l2_offset, *l2_table, cluster_offset;  
819 - int nb_csectors;  
820 -  
821 - ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);  
822 - if (ret == 0)  
823 - return 0;  
824 -  
825 - cluster_offset = be64_to_cpu(l2_table[l2_index]);  
826 - if (cluster_offset & QCOW_OFLAG_COPIED)  
827 - return cluster_offset & ~QCOW_OFLAG_COPIED;  
828 -  
829 - if (cluster_offset)  
830 - free_any_clusters(bs, cluster_offset, 1);  
831 -  
832 - cluster_offset = alloc_bytes(bs, compressed_size);  
833 - nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -  
834 - (cluster_offset >> 9);  
835 -  
836 - cluster_offset |= QCOW_OFLAG_COMPRESSED |  
837 - ((uint64_t)nb_csectors << s->csize_shift);  
838 -  
839 - /* update L2 table */  
840 -  
841 - /* compressed clusters never have the copied flag */  
842 -  
843 - l2_table[l2_index] = cpu_to_be64(cluster_offset);  
844 - if (bdrv_pwrite(s->hd,  
845 - l2_offset + l2_index * sizeof(uint64_t),  
846 - l2_table + l2_index,  
847 - sizeof(uint64_t)) != sizeof(uint64_t))  
848 - return 0;  
849 -  
850 - return cluster_offset;  
851 -}  
852 -  
853 -typedef struct QCowL2Meta  
854 -{  
855 - uint64_t offset;  
856 - int n_start;  
857 - int nb_available;  
858 - int nb_clusters;  
859 -} QCowL2Meta;  
860 -  
861 -static int alloc_cluster_link_l2(BlockDriverState *bs, uint64_t cluster_offset,  
862 - QCowL2Meta *m)  
863 -{  
864 - BDRVQcowState *s = bs->opaque;  
865 - int i, j = 0, l2_index, ret;  
866 - uint64_t *old_cluster, start_sect, l2_offset, *l2_table;  
867 -  
868 - if (m->nb_clusters == 0)  
869 - return 0;  
870 -  
871 - old_cluster = qemu_malloc(m->nb_clusters * sizeof(uint64_t));  
872 -  
873 - /* copy content of unmodified sectors */  
874 - start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;  
875 - if (m->n_start) {  
876 - ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);  
877 - if (ret < 0)  
878 - goto err;  
879 - }  
880 -  
881 - if (m->nb_available & (s->cluster_sectors - 1)) {  
882 - uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);  
883 - ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),  
884 - m->nb_available - end, s->cluster_sectors);  
885 - if (ret < 0)  
886 - goto err;  
887 - }  
888 -  
889 - ret = -EIO;  
890 - /* update L2 table */  
891 - if (!get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index))  
892 - goto err;  
893 -  
894 - for (i = 0; i < m->nb_clusters; i++) {  
895 - /* if two concurrent writes happen to the same unallocated cluster  
896 - * each write allocates separate cluster and writes data concurrently.  
897 - * The first one to complete updates l2 table with pointer to its  
898 - * cluster the second one has to do RMW (which is done above by  
899 - * copy_sectors()), update l2 table with its cluster pointer and free  
900 - * old cluster. This is what this loop does */  
901 - if(l2_table[l2_index + i] != 0)  
902 - old_cluster[j++] = l2_table[l2_index + i];  
903 -  
904 - l2_table[l2_index + i] = cpu_to_be64((cluster_offset +  
905 - (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);  
906 - }  
907 -  
908 - if (bdrv_pwrite(s->hd, l2_offset + l2_index * sizeof(uint64_t),  
909 - l2_table + l2_index, m->nb_clusters * sizeof(uint64_t)) !=  
910 - m->nb_clusters * sizeof(uint64_t))  
911 - goto err;  
912 -  
913 - for (i = 0; i < j; i++)  
914 - free_any_clusters(bs, be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED,  
915 - 1);  
916 -  
917 - ret = 0;  
918 -err:  
919 - qemu_free(old_cluster);  
920 - return ret;  
921 - }  
922 -  
923 -/*  
924 - * alloc_cluster_offset  
925 - *  
926 - * For a given offset of the disk image, return cluster offset in  
927 - * qcow2 file.  
928 - *  
929 - * If the offset is not found, allocate a new cluster.  
930 - *  
931 - * Return the cluster offset if successful,  
932 - * Return 0, otherwise.  
933 - *  
934 - */  
935 -  
936 -static uint64_t alloc_cluster_offset(BlockDriverState *bs,  
937 - uint64_t offset,  
938 - int n_start, int n_end,  
939 - int *num, QCowL2Meta *m)  
940 -{  
941 - BDRVQcowState *s = bs->opaque;  
942 - int l2_index, ret;  
943 - uint64_t l2_offset, *l2_table, cluster_offset;  
944 - int nb_clusters, i = 0;  
945 -  
946 - ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);  
947 - if (ret == 0)  
948 - return 0;  
949 -  
950 - nb_clusters = size_to_clusters(s, n_end << 9);  
951 -  
952 - nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);  
953 -  
954 - cluster_offset = be64_to_cpu(l2_table[l2_index]);  
955 -  
956 - /* We keep all QCOW_OFLAG_COPIED clusters */  
957 -  
958 - if (cluster_offset & QCOW_OFLAG_COPIED) {  
959 - nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,  
960 - &l2_table[l2_index], 0, 0);  
961 -  
962 - cluster_offset &= ~QCOW_OFLAG_COPIED;  
963 - m->nb_clusters = 0;  
964 -  
965 - goto out;  
966 - }  
967 -  
968 - /* for the moment, multiple compressed clusters are not managed */  
969 -  
970 - if (cluster_offset & QCOW_OFLAG_COMPRESSED)  
971 - nb_clusters = 1;  
972 -  
973 - /* how many available clusters ? */  
974 -  
975 - while (i < nb_clusters) {  
976 - i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,  
977 - &l2_table[l2_index], i, 0);  
978 -  
979 - if(be64_to_cpu(l2_table[l2_index + i]))  
980 - break;  
981 -  
982 - i += count_contiguous_free_clusters(nb_clusters - i,  
983 - &l2_table[l2_index + i]);  
984 -  
985 - cluster_offset = be64_to_cpu(l2_table[l2_index + i]);  
986 -  
987 - if ((cluster_offset & QCOW_OFLAG_COPIED) ||  
988 - (cluster_offset & QCOW_OFLAG_COMPRESSED))  
989 - break;  
990 - }  
991 - nb_clusters = i;  
992 -  
993 - /* allocate a new cluster */  
994 -  
995 - cluster_offset = alloc_clusters(bs, nb_clusters * s->cluster_size);  
996 -  
997 - /* save info needed for meta data update */  
998 - m->offset = offset;  
999 - m->n_start = n_start;  
1000 - m->nb_clusters = nb_clusters;  
1001 -  
1002 -out:  
1003 - m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);  
1004 -  
1005 - *num = m->nb_available - n_start;  
1006 -  
1007 - return cluster_offset;  
1008 -}  
1009 -  
1010 static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num, 340 static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
1011 int nb_sectors, int *pnum) 341 int nb_sectors, int *pnum)
1012 { 342 {
@@ -1018,59 +348,9 @@ static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num, @@ -1018,59 +348,9 @@ static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
1018 return (cluster_offset != 0); 348 return (cluster_offset != 0);
1019 } 349 }
1020 350
1021 -static int decompress_buffer(uint8_t *out_buf, int out_buf_size,  
1022 - const uint8_t *buf, int buf_size)  
1023 -{  
1024 - z_stream strm1, *strm = &strm1;  
1025 - int ret, out_len;  
1026 -  
1027 - memset(strm, 0, sizeof(*strm));  
1028 -  
1029 - strm->next_in = (uint8_t *)buf;  
1030 - strm->avail_in = buf_size;  
1031 - strm->next_out = out_buf;  
1032 - strm->avail_out = out_buf_size;  
1033 -  
1034 - ret = inflateInit2(strm, -12);  
1035 - if (ret != Z_OK)  
1036 - return -1;  
1037 - ret = inflate(strm, Z_FINISH);  
1038 - out_len = strm->next_out - out_buf;  
1039 - if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||  
1040 - out_len != out_buf_size) {  
1041 - inflateEnd(strm);  
1042 - return -1;  
1043 - }  
1044 - inflateEnd(strm);  
1045 - return 0;  
1046 -}  
1047 -  
1048 -static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset)  
1049 -{  
1050 - int ret, csize, nb_csectors, sector_offset;  
1051 - uint64_t coffset;  
1052 -  
1053 - coffset = cluster_offset & s->cluster_offset_mask;  
1054 - if (s->cluster_cache_offset != coffset) {  
1055 - nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;  
1056 - sector_offset = coffset & 511;  
1057 - csize = nb_csectors * 512 - sector_offset;  
1058 - ret = bdrv_read(s->hd, coffset >> 9, s->cluster_data, nb_csectors);  
1059 - if (ret < 0) {  
1060 - return -1;  
1061 - }  
1062 - if (decompress_buffer(s->cluster_cache, s->cluster_size,  
1063 - s->cluster_data + sector_offset, csize) < 0) {  
1064 - return -1;  
1065 - }  
1066 - s->cluster_cache_offset = coffset;  
1067 - }  
1068 - return 0;  
1069 -}  
1070 -  
1071 /* handle reading after the end of the backing file */ 351 /* handle reading after the end of the backing file */
1072 -static int backing_read1(BlockDriverState *bs,  
1073 - int64_t sector_num, uint8_t *buf, int nb_sectors) 352 +int backing_read1(BlockDriverState *bs,
  353 + int64_t sector_num, uint8_t *buf, int nb_sectors)
1074 { 354 {
1075 int n1; 355 int n1;
1076 if ((sector_num + nb_sectors) <= bs->total_sectors) 356 if ((sector_num + nb_sectors) <= bs->total_sectors)
@@ -1083,49 +363,6 @@ static int backing_read1(BlockDriverState *bs, @@ -1083,49 +363,6 @@ static int backing_read1(BlockDriverState *bs,
1083 return n1; 363 return n1;
1084 } 364 }
1085 365
1086 -static int qcow_read(BlockDriverState *bs, int64_t sector_num,  
1087 - uint8_t *buf, int nb_sectors)  
1088 -{  
1089 - BDRVQcowState *s = bs->opaque;  
1090 - int ret, index_in_cluster, n, n1;  
1091 - uint64_t cluster_offset;  
1092 -  
1093 - while (nb_sectors > 0) {  
1094 - n = nb_sectors;  
1095 - cluster_offset = get_cluster_offset(bs, sector_num << 9, &n);  
1096 - index_in_cluster = sector_num & (s->cluster_sectors - 1);  
1097 - if (!cluster_offset) {  
1098 - if (bs->backing_hd) {  
1099 - /* read from the base image */  
1100 - n1 = backing_read1(bs->backing_hd, sector_num, buf, n);  
1101 - if (n1 > 0) {  
1102 - ret = bdrv_read(bs->backing_hd, sector_num, buf, n1);  
1103 - if (ret < 0)  
1104 - return -1;  
1105 - }  
1106 - } else {  
1107 - memset(buf, 0, 512 * n);  
1108 - }  
1109 - } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {  
1110 - if (decompress_cluster(s, cluster_offset) < 0)  
1111 - return -1;  
1112 - memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);  
1113 - } else {  
1114 - ret = bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);  
1115 - if (ret != n * 512)  
1116 - return -1;  
1117 - if (s->crypt_method) {  
1118 - encrypt_sectors(s, sector_num, buf, buf, n, 0,  
1119 - &s->aes_decrypt_key);  
1120 - }  
1121 - }  
1122 - nb_sectors -= n;  
1123 - sector_num += n;  
1124 - buf += n * 512;  
1125 - }  
1126 - return 0;  
1127 -}  
1128 -  
1129 typedef struct QCowAIOCB { 366 typedef struct QCowAIOCB {
1130 BlockDriverAIOCB common; 367 BlockDriverAIOCB common;
1131 int64_t sector_num; 368 int64_t sector_num;
block/qcow2.h
@@ -124,7 +124,16 @@ typedef struct QCowCreateState { @@ -124,7 +124,16 @@ typedef struct QCowCreateState {
124 int64_t refcount_block_offset; 124 int64_t refcount_block_offset;
125 } QCowCreateState; 125 } QCowCreateState;
126 126
127 -static int size_to_clusters(BDRVQcowState *s, int64_t size) 127 +/* XXX This could be private for qcow2-cluster.c */
  128 +typedef struct QCowL2Meta
  129 +{
  130 + uint64_t offset;
  131 + int n_start;
  132 + int nb_available;
  133 + int nb_clusters;
  134 +} QCowL2Meta;
  135 +
  136 +static inline int size_to_clusters(BDRVQcowState *s, int64_t size)
128 { 137 {
129 return (size + (s->cluster_size - 1)) >> s->cluster_bits; 138 return (size + (s->cluster_size - 1)) >> s->cluster_bits;
130 } 139 }
@@ -133,6 +142,8 @@ static int size_to_clusters(BDRVQcowState *s, int64_t size) @@ -133,6 +142,8 @@ static int size_to_clusters(BDRVQcowState *s, int64_t size)
133 142
134 /* qcow2.c functions */ 143 /* qcow2.c functions */
135 void l2_cache_reset(BlockDriverState *bs); 144 void l2_cache_reset(BlockDriverState *bs);
  145 +int backing_read1(BlockDriverState *bs,
  146 + int64_t sector_num, uint8_t *buf, int nb_sectors);
136 147
137 /* qcow2-refcount.c functions */ 148 /* qcow2-refcount.c functions */
138 int refcount_init(BlockDriverState *bs); 149 int refcount_init(BlockDriverState *bs);
@@ -141,7 +152,9 @@ void refcount_close(BlockDriverState *bs); @@ -141,7 +152,9 @@ void refcount_close(BlockDriverState *bs);
141 int64_t alloc_clusters(BlockDriverState *bs, int64_t size); 152 int64_t alloc_clusters(BlockDriverState *bs, int64_t size);
142 int64_t alloc_bytes(BlockDriverState *bs, int size); 153 int64_t alloc_bytes(BlockDriverState *bs, int size);
143 void free_clusters(BlockDriverState *bs, 154 void free_clusters(BlockDriverState *bs,
144 - int64_t offset, int64_t size); 155 + int64_t offset, int64_t size);
  156 +void free_any_clusters(BlockDriverState *bs,
  157 + uint64_t cluster_offset, int nb_clusters);
145 158
146 void create_refcount_update(QCowCreateState *s, int64_t offset, int64_t size); 159 void create_refcount_update(QCowCreateState *s, int64_t offset, int64_t size);
147 int update_snapshot_refcount(BlockDriverState *bs, 160 int update_snapshot_refcount(BlockDriverState *bs,
@@ -151,4 +164,24 @@ int update_snapshot_refcount(BlockDriverState *bs, @@ -151,4 +164,24 @@ int update_snapshot_refcount(BlockDriverState *bs,
151 164
152 int check_refcounts(BlockDriverState *bs); 165 int check_refcounts(BlockDriverState *bs);
153 166
  167 +/* qcow2-cluster.c functions */
  168 +int grow_l1_table(BlockDriverState *bs, int min_size);
  169 +int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset);
  170 +void encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
  171 + uint8_t *out_buf, const uint8_t *in_buf,
  172 + int nb_sectors, int enc,
  173 + const AES_KEY *key);
  174 +
  175 +uint64_t get_cluster_offset(BlockDriverState *bs, uint64_t offset, int *num);
  176 +uint64_t alloc_cluster_offset(BlockDriverState *bs,
  177 + uint64_t offset,
  178 + int n_start, int n_end,
  179 + int *num, QCowL2Meta *m);
  180 +uint64_t alloc_compressed_cluster_offset(BlockDriverState *bs,
  181 + uint64_t offset,
  182 + int compressed_size);
  183 +
  184 +int alloc_cluster_link_l2(BlockDriverState *bs, uint64_t cluster_offset,
  185 + QCowL2Meta *m);
  186 +
154 #endif 187 #endif