Commit 108534b9681579002f46942139a77732a2510436

Authored by aliguori
1 parent 16b98a97

qcow2: Extract code from get_cluster_offset() (Laurent Vivier)

Extract code from get_cluster_offset() into new functions:

- seek_l2_table()

Search an l2 offset in the l2_cache table.

- l2_load()

Read the l2 entry from disk

- l2_allocate()

Allocate a new l2 entry.

Some comment fixups from Kevin Wolf

Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Kevin Wolf <kwolf@suse.de>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>



git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5003 c046a42c-6fe2-441c-8c8c-71466251a162
Showing 1 changed file with 153 additions and 62 deletions
block-qcow2.c
@@ -480,101 +480,191 @@ static int grow_l1_table(BlockDriverState *bs, int min_size) @@ -480,101 +480,191 @@ static int grow_l1_table(BlockDriverState *bs, int min_size)
480 return -EIO; 480 return -EIO;
481 } 481 }
482 482
483 -/* 'allocate' is: 483 +/*
  484 + * seek_l2_table
  485 + *
  486 + * seek l2_offset in the l2_cache table
  487 + * if not found, return NULL,
  488 + * if found,
  489 + * increments the l2 cache hit count of the entry,
  490 + * if counter overflow, divide by two all counters
  491 + * return the pointer to the l2 cache entry
  492 + *
  493 + */
  494 +
  495 +static uint64_t *seek_l2_table(BDRVQcowState *s, uint64_t l2_offset)
  496 +{
  497 + int i, j;
  498 +
  499 + for(i = 0; i < L2_CACHE_SIZE; i++) {
  500 + if (l2_offset == s->l2_cache_offsets[i]) {
  501 + /* increment the hit count */
  502 + if (++s->l2_cache_counts[i] == 0xffffffff) {
  503 + for(j = 0; j < L2_CACHE_SIZE; j++) {
  504 + s->l2_cache_counts[j] >>= 1;
  505 + }
  506 + }
  507 + return s->l2_cache + (i << s->l2_bits);
  508 + }
  509 + }
  510 + return NULL;
  511 +}
  512 +
  513 +/*
  514 + * l2_load
484 * 515 *
485 - * 0 not to allocate. 516 + * Loads a L2 table into memory. If the table is in the cache, the cache
  517 + * is used; otherwise the L2 table is loaded from the image file.
486 * 518 *
487 - * 1 to allocate a normal cluster (for sector indexes 'n_start' to  
488 - * 'n_end') 519 + * Returns a pointer to the L2 table on success, or NULL if the read from
  520 + * the image file failed.
  521 + */
  522 +
  523 +static uint64_t *l2_load(BlockDriverState *bs, uint64_t l2_offset)
  524 +{
  525 + BDRVQcowState *s = bs->opaque;
  526 + int min_index;
  527 + uint64_t *l2_table;
  528 +
  529 + /* seek if the table for the given offset is in the cache */
  530 +
  531 + l2_table = seek_l2_table(s, l2_offset);
  532 + if (l2_table != NULL)
  533 + return l2_table;
  534 +
  535 + /* not found: load a new entry in the least used one */
  536 +
  537 + min_index = l2_cache_new_entry(bs);
  538 + l2_table = s->l2_cache + (min_index << s->l2_bits);
  539 + if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=
  540 + s->l2_size * sizeof(uint64_t))
  541 + return NULL;
  542 + s->l2_cache_offsets[min_index] = l2_offset;
  543 + s->l2_cache_counts[min_index] = 1;
  544 +
  545 + return l2_table;
  546 +}
  547 +
  548 +/*
  549 + * l2_allocate
489 * 550 *
490 - * 2 to allocate a compressed cluster of size  
491 - * 'compressed_size'. 'compressed_size' must be > 0 and <  
492 - * cluster_size 551 + * Allocate a new l2 entry in the file. If l1_index points to an already
  552 + * used entry in the L2 table (i.e. we are doing a copy on write for the L2
  553 + * table) copy the contents of the old L2 table into the newly allocated one.
  554 + * Otherwise the new table is initialized with zeros.
493 * 555 *
494 - * return 0 if not allocated.  
495 */ 556 */
  557 +
  558 +static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index)
  559 +{
  560 + BDRVQcowState *s = bs->opaque;
  561 + int min_index;
  562 + uint64_t old_l2_offset, tmp;
  563 + uint64_t *l2_table, l2_offset;
  564 +
  565 + old_l2_offset = s->l1_table[l1_index];
  566 +
  567 + /* allocate a new l2 entry */
  568 +
  569 + l2_offset = alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
  570 +
  571 + /* update the L1 entry */
  572 +
  573 + s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
  574 +
  575 + tmp = cpu_to_be64(l2_offset | QCOW_OFLAG_COPIED);
  576 + if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp),
  577 + &tmp, sizeof(tmp)) != sizeof(tmp))
  578 + return NULL;
  579 +
  580 + /* allocate a new entry in the l2 cache */
  581 +
  582 + min_index = l2_cache_new_entry(bs);
  583 + l2_table = s->l2_cache + (min_index << s->l2_bits);
  584 +
  585 + if (old_l2_offset == 0) {
  586 + /* if there was no old l2 table, clear the new table */
  587 + memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
  588 + } else {
  589 + /* if there was an old l2 table, read it from the disk */
  590 + if (bdrv_pread(s->hd, old_l2_offset,
  591 + l2_table, s->l2_size * sizeof(uint64_t)) !=
  592 + s->l2_size * sizeof(uint64_t))
  593 + return NULL;
  594 + }
  595 + /* write the l2 table to the file */
  596 + if (bdrv_pwrite(s->hd, l2_offset,
  597 + l2_table, s->l2_size * sizeof(uint64_t)) !=
  598 + s->l2_size * sizeof(uint64_t))
  599 + return NULL;
  600 +
  601 + /* update the l2 cache entry */
  602 +
  603 + s->l2_cache_offsets[min_index] = l2_offset;
  604 + s->l2_cache_counts[min_index] = 1;
  605 +
  606 + return l2_table;
  607 +}
  608 +
496 static uint64_t get_cluster_offset(BlockDriverState *bs, 609 static uint64_t get_cluster_offset(BlockDriverState *bs,
497 uint64_t offset, int allocate, 610 uint64_t offset, int allocate,
498 int compressed_size, 611 int compressed_size,
499 int n_start, int n_end) 612 int n_start, int n_end)
500 { 613 {
501 BDRVQcowState *s = bs->opaque; 614 BDRVQcowState *s = bs->opaque;
502 - int min_index, i, j, l1_index, l2_index, ret;  
503 - uint64_t l2_offset, *l2_table, cluster_offset, tmp, old_l2_offset; 615 + int l1_index, l2_index, ret;
  616 + uint64_t l2_offset, *l2_table, cluster_offset, tmp;
  617 +
  618 + /* seek the the l2 offset in the l1 table */
504 619
505 l1_index = offset >> (s->l2_bits + s->cluster_bits); 620 l1_index = offset >> (s->l2_bits + s->cluster_bits);
506 if (l1_index >= s->l1_size) { 621 if (l1_index >= s->l1_size) {
507 /* outside l1 table is allowed: we grow the table if needed */ 622 /* outside l1 table is allowed: we grow the table if needed */
508 if (!allocate) 623 if (!allocate)
509 return 0; 624 return 0;
510 - if (grow_l1_table(bs, l1_index + 1) < 0) 625 + ret = grow_l1_table(bs, l1_index + 1);
  626 + if (ret < 0)
511 return 0; 627 return 0;
512 } 628 }
513 l2_offset = s->l1_table[l1_index]; 629 l2_offset = s->l1_table[l1_index];
  630 +
  631 + /* seek the l2 table of the given l2 offset */
  632 +
514 if (!l2_offset) { 633 if (!l2_offset) {
  634 + /* the l2 table doesn't exist */
515 if (!allocate) 635 if (!allocate)
516 return 0; 636 return 0;
517 - l2_allocate:  
518 - old_l2_offset = l2_offset;  
519 - /* allocate a new l2 entry */  
520 - l2_offset = alloc_clusters(bs, s->l2_size * sizeof(uint64_t));  
521 - /* update the L1 entry */  
522 - s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;  
523 - tmp = cpu_to_be64(l2_offset | QCOW_OFLAG_COPIED);  
524 - if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp),  
525 - &tmp, sizeof(tmp)) != sizeof(tmp))  
526 - return 0;  
527 - min_index = l2_cache_new_entry(bs);  
528 - l2_table = s->l2_cache + (min_index << s->l2_bits);  
529 -  
530 - if (old_l2_offset == 0) {  
531 - memset(l2_table, 0, s->l2_size * sizeof(uint64_t));  
532 - } else {  
533 - if (bdrv_pread(s->hd, old_l2_offset,  
534 - l2_table, s->l2_size * sizeof(uint64_t)) !=  
535 - s->l2_size * sizeof(uint64_t))  
536 - return 0;  
537 - }  
538 - if (bdrv_pwrite(s->hd, l2_offset,  
539 - l2_table, s->l2_size * sizeof(uint64_t)) !=  
540 - s->l2_size * sizeof(uint64_t)) 637 + /* allocate a new l2 table for this offset */
  638 + l2_table = l2_allocate(bs, l1_index);
  639 + if (l2_table == NULL)
541 return 0; 640 return 0;
  641 + l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
542 } else { 642 } else {
543 - if (!(l2_offset & QCOW_OFLAG_COPIED)) {  
544 - if (allocate) {  
545 - free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));  
546 - goto l2_allocate;  
547 - } 643 + /* the l2 table exists */
  644 + if (!(l2_offset & QCOW_OFLAG_COPIED) && allocate) {
  645 + /* duplicate the l2 table, and free the old table */
  646 + free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
  647 + l2_table = l2_allocate(bs, l1_index);
  648 + if (l2_table == NULL)
  649 + return 0;
  650 + l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
548 } else { 651 } else {
  652 + /* load the l2 table in memory */
549 l2_offset &= ~QCOW_OFLAG_COPIED; 653 l2_offset &= ~QCOW_OFLAG_COPIED;
  654 + l2_table = l2_load(bs, l2_offset);
  655 + if (l2_table == NULL)
  656 + return 0;
550 } 657 }
551 - for(i = 0; i < L2_CACHE_SIZE; i++) {  
552 - if (l2_offset == s->l2_cache_offsets[i]) {  
553 - /* increment the hit count */  
554 - if (++s->l2_cache_counts[i] == 0xffffffff) {  
555 - for(j = 0; j < L2_CACHE_SIZE; j++) {  
556 - s->l2_cache_counts[j] >>= 1;  
557 - }  
558 - }  
559 - l2_table = s->l2_cache + (i << s->l2_bits);  
560 - goto found;  
561 - }  
562 - }  
563 - /* not found: load a new entry in the least used one */  
564 - min_index = l2_cache_new_entry(bs);  
565 - l2_table = s->l2_cache + (min_index << s->l2_bits);  
566 - if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=  
567 - s->l2_size * sizeof(uint64_t))  
568 - return 0;  
569 } 658 }
570 - s->l2_cache_offsets[min_index] = l2_offset;  
571 - s->l2_cache_counts[min_index] = 1;  
572 - found: 659 +
  660 + /* find the cluster offset for the given disk offset */
  661 +
573 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 662 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
574 cluster_offset = be64_to_cpu(l2_table[l2_index]); 663 cluster_offset = be64_to_cpu(l2_table[l2_index]);
575 if (!cluster_offset) { 664 if (!cluster_offset) {
  665 + /* cluster doesn't exist */
576 if (!allocate) 666 if (!allocate)
577 - return cluster_offset; 667 + return 0;
578 } else if (!(cluster_offset & QCOW_OFLAG_COPIED)) { 668 } else if (!(cluster_offset & QCOW_OFLAG_COPIED)) {
579 if (!allocate) 669 if (!allocate)
580 return cluster_offset; 670 return cluster_offset;
@@ -592,6 +682,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs, @@ -592,6 +682,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
592 cluster_offset &= ~QCOW_OFLAG_COPIED; 682 cluster_offset &= ~QCOW_OFLAG_COPIED;
593 return cluster_offset; 683 return cluster_offset;
594 } 684 }
  685 +
595 if (allocate == 1) { 686 if (allocate == 1) {
596 /* allocate a new cluster */ 687 /* allocate a new cluster */
597 cluster_offset = alloc_clusters(bs, s->cluster_size); 688 cluster_offset = alloc_clusters(bs, s->cluster_size);