Commit ce1a14dc0d94cf85393356f56f197c5e8b6a7f60

Authored by pbrook
1 parent 51d6bae7

Dynamically allocate AIO Completion Blocks.


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@2098 c046a42c-6fe2-441c-8c8c-71466251a162
block-qcow.c
@@ -522,7 +522,8 @@ static int qcow_write(BlockDriverState *bs, int64_t sector_num, @@ -522,7 +522,8 @@ static int qcow_write(BlockDriverState *bs, int64_t sector_num,
522 return 0; 522 return 0;
523 } 523 }
524 524
525 -typedef struct { 525 +typedef struct QCowAIOCB {
  526 + BlockDriverAIOCB common;
526 int64_t sector_num; 527 int64_t sector_num;
527 uint8_t *buf; 528 uint8_t *buf;
528 int nb_sectors; 529 int nb_sectors;
@@ -530,223 +531,198 @@ typedef struct { @@ -530,223 +531,198 @@ typedef struct {
530 uint64_t cluster_offset; 531 uint64_t cluster_offset;
531 uint8_t *cluster_data; 532 uint8_t *cluster_data;
532 BlockDriverAIOCB *hd_aiocb; 533 BlockDriverAIOCB *hd_aiocb;
533 - BlockDriverAIOCB *backing_hd_aiocb;  
534 } QCowAIOCB; 534 } QCowAIOCB;
535 535
536 -static void qcow_aio_delete(BlockDriverAIOCB *acb);  
537 -  
538 -static int qcow_aio_new(BlockDriverAIOCB *acb)  
539 -{  
540 - BlockDriverState *bs = acb->bs;  
541 - BDRVQcowState *s = bs->opaque;  
542 - QCowAIOCB *acb1;  
543 - acb1 = qemu_mallocz(sizeof(QCowAIOCB));  
544 - if (!acb1)  
545 - return -1;  
546 - acb->opaque = acb1;  
547 - acb1->hd_aiocb = bdrv_aio_new(s->hd);  
548 - if (!acb1->hd_aiocb)  
549 - goto fail;  
550 - if (bs->backing_hd) {  
551 - acb1->backing_hd_aiocb = bdrv_aio_new(bs->backing_hd);  
552 - if (!acb1->backing_hd_aiocb)  
553 - goto fail;  
554 - }  
555 - return 0;  
556 - fail:  
557 - qcow_aio_delete(acb);  
558 - return -1;  
559 -}  
560 -  
561 static void qcow_aio_read_cb(void *opaque, int ret) 536 static void qcow_aio_read_cb(void *opaque, int ret)
562 { 537 {
563 - BlockDriverAIOCB *acb = opaque;  
564 - BlockDriverState *bs = acb->bs; 538 + QCowAIOCB *acb = opaque;
  539 + BlockDriverState *bs = acb->common.bs;
565 BDRVQcowState *s = bs->opaque; 540 BDRVQcowState *s = bs->opaque;
566 - QCowAIOCB *acb1 = acb->opaque;  
567 int index_in_cluster; 541 int index_in_cluster;
568 542
  543 + acb->hd_aiocb = NULL;
569 if (ret < 0) { 544 if (ret < 0) {
570 fail: 545 fail:
571 - acb->cb(acb->cb_opaque, ret); 546 + acb->common.cb(acb->common.opaque, ret);
  547 + qemu_aio_release(acb);
572 return; 548 return;
573 } 549 }
574 550
575 redo: 551 redo:
576 /* post process the read buffer */ 552 /* post process the read buffer */
577 - if (!acb1->cluster_offset) { 553 + if (!acb->cluster_offset) {
578 /* nothing to do */ 554 /* nothing to do */
579 - } else if (acb1->cluster_offset & QCOW_OFLAG_COMPRESSED) { 555 + } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
580 /* nothing to do */ 556 /* nothing to do */
581 } else { 557 } else {
582 if (s->crypt_method) { 558 if (s->crypt_method) {
583 - encrypt_sectors(s, acb1->sector_num, acb1->buf, acb1->buf,  
584 - acb1->n, 0, 559 + encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
  560 + acb->n, 0,
585 &s->aes_decrypt_key); 561 &s->aes_decrypt_key);
586 } 562 }
587 } 563 }
588 564
589 - acb1->nb_sectors -= acb1->n;  
590 - acb1->sector_num += acb1->n;  
591 - acb1->buf += acb1->n * 512; 565 + acb->nb_sectors -= acb->n;
  566 + acb->sector_num += acb->n;
  567 + acb->buf += acb->n * 512;
592 568
593 - if (acb1->nb_sectors == 0) { 569 + if (acb->nb_sectors == 0) {
594 /* request completed */ 570 /* request completed */
595 - acb->cb(acb->cb_opaque, 0); 571 + acb->common.cb(acb->common.opaque, 0);
  572 + qemu_aio_release(acb);
596 return; 573 return;
597 } 574 }
598 575
599 /* prepare next AIO request */ 576 /* prepare next AIO request */
600 - acb1->cluster_offset = get_cluster_offset(bs,  
601 - acb1->sector_num << 9,  
602 - 0, 0, 0, 0);  
603 - index_in_cluster = acb1->sector_num & (s->cluster_sectors - 1);  
604 - acb1->n = s->cluster_sectors - index_in_cluster;  
605 - if (acb1->n > acb1->nb_sectors)  
606 - acb1->n = acb1->nb_sectors;  
607 -  
608 - if (!acb1->cluster_offset) { 577 + acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9,
  578 + 0, 0, 0, 0);
  579 + index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
  580 + acb->n = s->cluster_sectors - index_in_cluster;
  581 + if (acb->n > acb->nb_sectors)
  582 + acb->n = acb->nb_sectors;
  583 +
  584 + if (!acb->cluster_offset) {
609 if (bs->backing_hd) { 585 if (bs->backing_hd) {
610 /* read from the base image */ 586 /* read from the base image */
611 - ret = bdrv_aio_read(acb1->backing_hd_aiocb, acb1->sector_num,  
612 - acb1->buf, acb1->n, qcow_aio_read_cb, acb);  
613 - if (ret < 0) 587 + acb->hd_aiocb = bdrv_aio_read(bs->backing_hd,
  588 + acb->sector_num, acb->buf, acb->n, qcow_aio_read_cb, acb);
  589 + if (acb->hd_aiocb == NULL)
614 goto fail; 590 goto fail;
615 } else { 591 } else {
616 /* Note: in this case, no need to wait */ 592 /* Note: in this case, no need to wait */
617 - memset(acb1->buf, 0, 512 * acb1->n); 593 + memset(acb->buf, 0, 512 * acb->n);
618 goto redo; 594 goto redo;
619 } 595 }
620 - } else if (acb1->cluster_offset & QCOW_OFLAG_COMPRESSED) { 596 + } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
621 /* add AIO support for compressed blocks ? */ 597 /* add AIO support for compressed blocks ? */
622 - if (decompress_cluster(s, acb1->cluster_offset) < 0) 598 + if (decompress_cluster(s, acb->cluster_offset) < 0)
623 goto fail; 599 goto fail;
624 - memcpy(acb1->buf,  
625 - s->cluster_cache + index_in_cluster * 512, 512 * acb1->n); 600 + memcpy(acb->buf,
  601 + s->cluster_cache + index_in_cluster * 512, 512 * acb->n);
626 goto redo; 602 goto redo;
627 } else { 603 } else {
628 - if ((acb1->cluster_offset & 511) != 0) { 604 + if ((acb->cluster_offset & 511) != 0) {
629 ret = -EIO; 605 ret = -EIO;
630 goto fail; 606 goto fail;
631 } 607 }
632 - ret = bdrv_aio_read(acb1->hd_aiocb,  
633 - (acb1->cluster_offset >> 9) + index_in_cluster,  
634 - acb1->buf, acb1->n, qcow_aio_read_cb, acb);  
635 - if (ret < 0) 608 + acb->hd_aiocb = bdrv_aio_read(s->hd,
  609 + (acb->cluster_offset >> 9) + index_in_cluster,
  610 + acb->buf, acb->n, qcow_aio_read_cb, acb);
  611 + if (acb->hd_aiocb == NULL)
636 goto fail; 612 goto fail;
637 } 613 }
638 } 614 }
639 615
640 -static int qcow_aio_read(BlockDriverAIOCB *acb, int64_t sector_num,  
641 - uint8_t *buf, int nb_sectors) 616 +static BlockDriverAIOCB *qcow_aio_read(BlockDriverState *bs,
  617 + int64_t sector_num, uint8_t *buf, int nb_sectors,
  618 + BlockDriverCompletionFunc *cb, void *opaque)
642 { 619 {
643 - QCowAIOCB *acb1 = acb->opaque;  
644 -  
645 - acb1->sector_num = sector_num;  
646 - acb1->buf = buf;  
647 - acb1->nb_sectors = nb_sectors;  
648 - acb1->n = 0;  
649 - acb1->cluster_offset = 0; 620 + QCowAIOCB *acb;
  621 +
  622 + acb = qemu_aio_get(bs, cb, opaque);
  623 + if (!acb)
  624 + return NULL;
  625 + acb->hd_aiocb = NULL;
  626 + acb->sector_num = sector_num;
  627 + acb->buf = buf;
  628 + acb->nb_sectors = nb_sectors;
  629 + acb->n = 0;
  630 + acb->cluster_offset = 0;
650 631
651 qcow_aio_read_cb(acb, 0); 632 qcow_aio_read_cb(acb, 0);
652 - return 0; 633 + return &acb->common;
653 } 634 }
654 635
655 static void qcow_aio_write_cb(void *opaque, int ret) 636 static void qcow_aio_write_cb(void *opaque, int ret)
656 { 637 {
657 - BlockDriverAIOCB *acb = opaque;  
658 - BlockDriverState *bs = acb->bs; 638 + QCowAIOCB *acb = opaque;
  639 + BlockDriverState *bs = acb->common.bs;
659 BDRVQcowState *s = bs->opaque; 640 BDRVQcowState *s = bs->opaque;
660 - QCowAIOCB *acb1 = acb->opaque;  
661 int index_in_cluster; 641 int index_in_cluster;
662 uint64_t cluster_offset; 642 uint64_t cluster_offset;
663 const uint8_t *src_buf; 643 const uint8_t *src_buf;
664 - 644 +
  645 + acb->hd_aiocb = NULL;
  646 +
665 if (ret < 0) { 647 if (ret < 0) {
666 fail: 648 fail:
667 - acb->cb(acb->cb_opaque, ret); 649 + acb->common.cb(acb->common.opaque, ret);
  650 + qemu_aio_release(acb);
668 return; 651 return;
669 } 652 }
670 653
671 - acb1->nb_sectors -= acb1->n;  
672 - acb1->sector_num += acb1->n;  
673 - acb1->buf += acb1->n * 512; 654 + acb->nb_sectors -= acb->n;
  655 + acb->sector_num += acb->n;
  656 + acb->buf += acb->n * 512;
674 657
675 - if (acb1->nb_sectors == 0) { 658 + if (acb->nb_sectors == 0) {
676 /* request completed */ 659 /* request completed */
677 - acb->cb(acb->cb_opaque, 0); 660 + acb->common.cb(acb->common.opaque, 0);
  661 + qemu_aio_release(acb);
678 return; 662 return;
679 } 663 }
680 664
681 - index_in_cluster = acb1->sector_num & (s->cluster_sectors - 1);  
682 - acb1->n = s->cluster_sectors - index_in_cluster;  
683 - if (acb1->n > acb1->nb_sectors)  
684 - acb1->n = acb1->nb_sectors;  
685 - cluster_offset = get_cluster_offset(bs, acb1->sector_num << 9, 1, 0, 665 + index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
  666 + acb->n = s->cluster_sectors - index_in_cluster;
  667 + if (acb->n > acb->nb_sectors)
  668 + acb->n = acb->nb_sectors;
  669 + cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, 1, 0,
686 index_in_cluster, 670 index_in_cluster,
687 - index_in_cluster + acb1->n); 671 + index_in_cluster + acb->n);
688 if (!cluster_offset || (cluster_offset & 511) != 0) { 672 if (!cluster_offset || (cluster_offset & 511) != 0) {
689 ret = -EIO; 673 ret = -EIO;
690 goto fail; 674 goto fail;
691 } 675 }
692 if (s->crypt_method) { 676 if (s->crypt_method) {
693 - if (!acb1->cluster_data) {  
694 - acb1->cluster_data = qemu_mallocz(s->cluster_size);  
695 - if (!acb1->cluster_data) { 677 + if (!acb->cluster_data) {
  678 + acb->cluster_data = qemu_mallocz(s->cluster_size);
  679 + if (!acb->cluster_data) {
696 ret = -ENOMEM; 680 ret = -ENOMEM;
697 goto fail; 681 goto fail;
698 } 682 }
699 } 683 }
700 - encrypt_sectors(s, acb1->sector_num, acb1->cluster_data, acb1->buf,  
701 - acb1->n, 1, &s->aes_encrypt_key);  
702 - src_buf = acb1->cluster_data; 684 + encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf,
  685 + acb->n, 1, &s->aes_encrypt_key);
  686 + src_buf = acb->cluster_data;
703 } else { 687 } else {
704 - src_buf = acb1->buf; 688 + src_buf = acb->buf;
705 } 689 }
706 - ret = bdrv_aio_write(acb1->hd_aiocb,  
707 - (cluster_offset >> 9) + index_in_cluster,  
708 - src_buf, acb1->n,  
709 - qcow_aio_write_cb, acb);  
710 - if (ret < 0) 690 + acb->hd_aiocb = bdrv_aio_write(s->hd,
  691 + (cluster_offset >> 9) + index_in_cluster,
  692 + src_buf, acb->n,
  693 + qcow_aio_write_cb, acb);
  694 + if (acb->hd_aiocb == NULL)
711 goto fail; 695 goto fail;
712 } 696 }
713 697
714 -static int qcow_aio_write(BlockDriverAIOCB *acb, int64_t sector_num,  
715 - const uint8_t *buf, int nb_sectors) 698 +static BlockDriverAIOCB *qcow_aio_write(BlockDriverState *bs,
  699 + int64_t sector_num, const uint8_t *buf, int nb_sectors,
  700 + BlockDriverCompletionFunc *cb, void *opaque)
716 { 701 {
717 - QCowAIOCB *acb1 = acb->opaque;  
718 - BlockDriverState *bs = acb->bs;  
719 BDRVQcowState *s = bs->opaque; 702 BDRVQcowState *s = bs->opaque;
  703 + QCowAIOCB *acb;
720 704
721 s->cluster_cache_offset = -1; /* disable compressed cache */ 705 s->cluster_cache_offset = -1; /* disable compressed cache */
722 706
723 - acb1->sector_num = sector_num;  
724 - acb1->buf = (uint8_t *)buf;  
725 - acb1->nb_sectors = nb_sectors;  
726 - acb1->n = 0; 707 + acb = qemu_aio_get(bs, cb, opaque);
  708 + if (!acb)
  709 + return NULL;
  710 + acb->hd_aiocb = NULL;
  711 + acb->sector_num = sector_num;
  712 + acb->buf = (uint8_t *)buf;
  713 + acb->nb_sectors = nb_sectors;
  714 + acb->n = 0;
727 715
728 qcow_aio_write_cb(acb, 0); 716 qcow_aio_write_cb(acb, 0);
729 - return 0;  
730 -}  
731 -  
732 -static void qcow_aio_cancel(BlockDriverAIOCB *acb)  
733 -{  
734 - QCowAIOCB *acb1 = acb->opaque;  
735 - if (acb1->hd_aiocb)  
736 - bdrv_aio_cancel(acb1->hd_aiocb);  
737 - if (acb1->backing_hd_aiocb)  
738 - bdrv_aio_cancel(acb1->backing_hd_aiocb); 717 + return &acb->common;
739 } 718 }
740 719
741 -static void qcow_aio_delete(BlockDriverAIOCB *acb) 720 +static void qcow_aio_cancel(BlockDriverAIOCB *blockacb)
742 { 721 {
743 - QCowAIOCB *acb1 = acb->opaque;  
744 - if (acb1->hd_aiocb)  
745 - bdrv_aio_delete(acb1->hd_aiocb);  
746 - if (acb1->backing_hd_aiocb)  
747 - bdrv_aio_delete(acb1->backing_hd_aiocb);  
748 - qemu_free(acb1->cluster_data);  
749 - qemu_free(acb1); 722 + QCowAIOCB *acb = (QCowAIOCB *)blockacb;
  723 + if (acb->hd_aiocb)
  724 + bdrv_aio_cancel(acb->hd_aiocb);
  725 + qemu_aio_release(acb);
750 } 726 }
751 727
752 static void qcow_close(BlockDriverState *bs) 728 static void qcow_close(BlockDriverState *bs)
@@ -920,11 +896,10 @@ BlockDriver bdrv_qcow = { @@ -920,11 +896,10 @@ BlockDriver bdrv_qcow = {
920 qcow_set_key, 896 qcow_set_key,
921 qcow_make_empty, 897 qcow_make_empty,
922 898
923 - .bdrv_aio_new = qcow_aio_new,  
924 .bdrv_aio_read = qcow_aio_read, 899 .bdrv_aio_read = qcow_aio_read,
925 .bdrv_aio_write = qcow_aio_write, 900 .bdrv_aio_write = qcow_aio_write,
926 .bdrv_aio_cancel = qcow_aio_cancel, 901 .bdrv_aio_cancel = qcow_aio_cancel,
927 - .bdrv_aio_delete = qcow_aio_delete, 902 + .aiocb_size = sizeof(QCowAIOCB),
928 .bdrv_write_compressed = qcow_write_compressed, 903 .bdrv_write_compressed = qcow_write_compressed,
929 .bdrv_get_info = qcow_get_info, 904 .bdrv_get_info = qcow_get_info,
930 }; 905 };
block-qcow2.c
@@ -791,7 +791,8 @@ static int qcow_write(BlockDriverState *bs, int64_t sector_num, @@ -791,7 +791,8 @@ static int qcow_write(BlockDriverState *bs, int64_t sector_num,
791 return 0; 791 return 0;
792 } 792 }
793 793
794 -typedef struct { 794 +typedef struct QCowAIOCB {
  795 + BlockDriverAIOCB common;
795 int64_t sector_num; 796 int64_t sector_num;
796 uint8_t *buf; 797 uint8_t *buf;
797 int nb_sectors; 798 int nb_sectors;
@@ -799,229 +800,211 @@ typedef struct { @@ -799,229 +800,211 @@ typedef struct {
799 uint64_t cluster_offset; 800 uint64_t cluster_offset;
800 uint8_t *cluster_data; 801 uint8_t *cluster_data;
801 BlockDriverAIOCB *hd_aiocb; 802 BlockDriverAIOCB *hd_aiocb;
802 - BlockDriverAIOCB *backing_hd_aiocb;  
803 } QCowAIOCB; 803 } QCowAIOCB;
804 804
805 -static void qcow_aio_delete(BlockDriverAIOCB *acb);  
806 -  
807 -static int qcow_aio_new(BlockDriverAIOCB *acb)  
808 -{  
809 - BlockDriverState *bs = acb->bs;  
810 - BDRVQcowState *s = bs->opaque;  
811 - QCowAIOCB *acb1;  
812 - acb1 = qemu_mallocz(sizeof(QCowAIOCB));  
813 - if (!acb1)  
814 - return -1;  
815 - acb->opaque = acb1;  
816 - acb1->hd_aiocb = bdrv_aio_new(s->hd);  
817 - if (!acb1->hd_aiocb)  
818 - goto fail;  
819 - if (bs->backing_hd) {  
820 - acb1->backing_hd_aiocb = bdrv_aio_new(bs->backing_hd);  
821 - if (!acb1->backing_hd_aiocb)  
822 - goto fail;  
823 - }  
824 - return 0;  
825 - fail:  
826 - qcow_aio_delete(acb);  
827 - return -1;  
828 -}  
829 -  
830 static void qcow_aio_read_cb(void *opaque, int ret) 805 static void qcow_aio_read_cb(void *opaque, int ret)
831 { 806 {
832 - BlockDriverAIOCB *acb = opaque;  
833 - BlockDriverState *bs = acb->bs; 807 + QCowAIOCB *acb = opaque;
  808 + BlockDriverState *bs = acb->common.bs;
834 BDRVQcowState *s = bs->opaque; 809 BDRVQcowState *s = bs->opaque;
835 - QCowAIOCB *acb1 = acb->opaque;  
836 int index_in_cluster, n1; 810 int index_in_cluster, n1;
837 811
  812 + acb->hd_aiocb = NULL;
838 if (ret < 0) { 813 if (ret < 0) {
839 fail: 814 fail:
840 - acb->cb(acb->cb_opaque, ret); 815 + acb->common.cb(acb->common.opaque, ret);
  816 + qemu_aio_release(acb);
841 return; 817 return;
842 } 818 }
843 819
844 redo: 820 redo:
845 /* post process the read buffer */ 821 /* post process the read buffer */
846 - if (!acb1->cluster_offset) { 822 + if (!acb->cluster_offset) {
847 /* nothing to do */ 823 /* nothing to do */
848 - } else if (acb1->cluster_offset & QCOW_OFLAG_COMPRESSED) { 824 + } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
849 /* nothing to do */ 825 /* nothing to do */
850 } else { 826 } else {
851 if (s->crypt_method) { 827 if (s->crypt_method) {
852 - encrypt_sectors(s, acb1->sector_num, acb1->buf, acb1->buf,  
853 - acb1->n, 0, 828 + encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
  829 + acb->n, 0,
854 &s->aes_decrypt_key); 830 &s->aes_decrypt_key);
855 } 831 }
856 } 832 }
857 833
858 - acb1->nb_sectors -= acb1->n;  
859 - acb1->sector_num += acb1->n;  
860 - acb1->buf += acb1->n * 512; 834 + acb->nb_sectors -= acb->n;
  835 + acb->sector_num += acb->n;
  836 + acb->buf += acb->n * 512;
861 837
862 - if (acb1->nb_sectors == 0) { 838 + if (acb->nb_sectors == 0) {
863 /* request completed */ 839 /* request completed */
864 - acb->cb(acb->cb_opaque, 0); 840 + acb->common.cb(acb->common.opaque, 0);
  841 + qemu_aio_release(acb);
865 return; 842 return;
866 } 843 }
867 844
868 /* prepare next AIO request */ 845 /* prepare next AIO request */
869 - acb1->cluster_offset = get_cluster_offset(bs,  
870 - acb1->sector_num << 9,  
871 - 0, 0, 0, 0);  
872 - index_in_cluster = acb1->sector_num & (s->cluster_sectors - 1);  
873 - acb1->n = s->cluster_sectors - index_in_cluster;  
874 - if (acb1->n > acb1->nb_sectors)  
875 - acb1->n = acb1->nb_sectors;  
876 -  
877 - if (!acb1->cluster_offset) { 846 + acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9,
  847 + 0, 0, 0, 0);
  848 + index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
  849 + acb->n = s->cluster_sectors - index_in_cluster;
  850 + if (acb->n > acb->nb_sectors)
  851 + acb->n = acb->nb_sectors;
  852 +
  853 + if (!acb->cluster_offset) {
878 if (bs->backing_hd) { 854 if (bs->backing_hd) {
879 /* read from the base image */ 855 /* read from the base image */
880 - n1 = backing_read1(bs->backing_hd, acb1->sector_num,  
881 - acb1->buf, acb1->n); 856 + n1 = backing_read1(bs->backing_hd, acb->sector_num,
  857 + acb->buf, acb->n);
882 if (n1 > 0) { 858 if (n1 > 0) {
883 - ret = bdrv_aio_read(acb1->backing_hd_aiocb, acb1->sector_num,  
884 - acb1->buf, n1, qcow_aio_read_cb, acb);  
885 - if (ret < 0) 859 + acb->hd_aiocb = bdrv_aio_read(bs->backing_hd, acb->sector_num,
  860 + acb->buf, acb->n, qcow_aio_read_cb, acb);
  861 + if (acb->hd_aiocb == NULL)
886 goto fail; 862 goto fail;
887 } else { 863 } else {
888 goto redo; 864 goto redo;
889 } 865 }
890 } else { 866 } else {
891 /* Note: in this case, no need to wait */ 867 /* Note: in this case, no need to wait */
892 - memset(acb1->buf, 0, 512 * acb1->n); 868 + memset(acb->buf, 0, 512 * acb->n);
893 goto redo; 869 goto redo;
894 } 870 }
895 - } else if (acb1->cluster_offset & QCOW_OFLAG_COMPRESSED) { 871 + } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
896 /* add AIO support for compressed blocks ? */ 872 /* add AIO support for compressed blocks ? */
897 - if (decompress_cluster(s, acb1->cluster_offset) < 0) 873 + if (decompress_cluster(s, acb->cluster_offset) < 0)
898 goto fail; 874 goto fail;
899 - memcpy(acb1->buf,  
900 - s->cluster_cache + index_in_cluster * 512, 512 * acb1->n); 875 + memcpy(acb->buf,
  876 + s->cluster_cache + index_in_cluster * 512, 512 * acb->n);
901 goto redo; 877 goto redo;
902 } else { 878 } else {
903 - if ((acb1->cluster_offset & 511) != 0) { 879 + if ((acb->cluster_offset & 511) != 0) {
904 ret = -EIO; 880 ret = -EIO;
905 goto fail; 881 goto fail;
906 } 882 }
907 - ret = bdrv_aio_read(acb1->hd_aiocb,  
908 - (acb1->cluster_offset >> 9) + index_in_cluster,  
909 - acb1->buf, acb1->n, qcow_aio_read_cb, acb);  
910 - if (ret < 0) 883 + acb->hd_aiocb = bdrv_aio_read(s->hd,
  884 + (acb->cluster_offset >> 9) + index_in_cluster,
  885 + acb->buf, acb->n, qcow_aio_read_cb, acb);
  886 + if (acb->hd_aiocb == NULL)
911 goto fail; 887 goto fail;
912 } 888 }
913 } 889 }
914 890
915 -static int qcow_aio_read(BlockDriverAIOCB *acb, int64_t sector_num,  
916 - uint8_t *buf, int nb_sectors) 891 +static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs,
  892 + int64_t sector_num, uint8_t *buf, int nb_sectors,
  893 + BlockDriverCompletionFunc *cb, void *opaque)
917 { 894 {
918 - QCowAIOCB *acb1 = acb->opaque;  
919 -  
920 - acb1->sector_num = sector_num;  
921 - acb1->buf = buf;  
922 - acb1->nb_sectors = nb_sectors;  
923 - acb1->n = 0;  
924 - acb1->cluster_offset = 0; 895 + QCowAIOCB *acb;
  896 +
  897 + acb = qemu_aio_get(bs, cb, opaque);
  898 + if (!acb)
  899 + return NULL;
  900 + acb->hd_aiocb = NULL;
  901 + acb->sector_num = sector_num;
  902 + acb->buf = buf;
  903 + acb->nb_sectors = nb_sectors;
  904 + acb->n = 0;
  905 + acb->cluster_offset = 0;
  906 + return acb;
  907 +}
  908 +
  909 +static BlockDriverAIOCB *qcow_aio_read(BlockDriverState *bs,
  910 + int64_t sector_num, uint8_t *buf, int nb_sectors,
  911 + BlockDriverCompletionFunc *cb, void *opaque)
  912 +{
  913 + QCowAIOCB *acb;
  914 +
  915 + acb = qcow_aio_setup(bs, sector_num, buf, nb_sectors, cb, opaque);
  916 + if (!acb)
  917 + return NULL;
925 918
926 qcow_aio_read_cb(acb, 0); 919 qcow_aio_read_cb(acb, 0);
927 - return 0; 920 + return &acb->common;
928 } 921 }
929 922
930 static void qcow_aio_write_cb(void *opaque, int ret) 923 static void qcow_aio_write_cb(void *opaque, int ret)
931 { 924 {
932 - BlockDriverAIOCB *acb = opaque;  
933 - BlockDriverState *bs = acb->bs; 925 + QCowAIOCB *acb = opaque;
  926 + BlockDriverState *bs = acb->common.bs;
934 BDRVQcowState *s = bs->opaque; 927 BDRVQcowState *s = bs->opaque;
935 - QCowAIOCB *acb1 = acb->opaque;  
936 int index_in_cluster; 928 int index_in_cluster;
937 uint64_t cluster_offset; 929 uint64_t cluster_offset;
938 const uint8_t *src_buf; 930 const uint8_t *src_buf;
939 - 931 +
  932 + acb->hd_aiocb = NULL;
  933 +
940 if (ret < 0) { 934 if (ret < 0) {
941 fail: 935 fail:
942 - acb->cb(acb->cb_opaque, ret); 936 + acb->common.cb(acb->common.opaque, ret);
  937 + qemu_aio_release(acb);
943 return; 938 return;
944 } 939 }
945 940
946 - acb1->nb_sectors -= acb1->n;  
947 - acb1->sector_num += acb1->n;  
948 - acb1->buf += acb1->n * 512; 941 + acb->nb_sectors -= acb->n;
  942 + acb->sector_num += acb->n;
  943 + acb->buf += acb->n * 512;
949 944
950 - if (acb1->nb_sectors == 0) { 945 + if (acb->nb_sectors == 0) {
951 /* request completed */ 946 /* request completed */
952 - acb->cb(acb->cb_opaque, 0); 947 + acb->common.cb(acb->common.opaque, 0);
  948 + qemu_aio_release(acb);
953 return; 949 return;
954 } 950 }
955 951
956 - index_in_cluster = acb1->sector_num & (s->cluster_sectors - 1);  
957 - acb1->n = s->cluster_sectors - index_in_cluster;  
958 - if (acb1->n > acb1->nb_sectors)  
959 - acb1->n = acb1->nb_sectors;  
960 - cluster_offset = get_cluster_offset(bs, acb1->sector_num << 9, 1, 0, 952 + index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
  953 + acb->n = s->cluster_sectors - index_in_cluster;
  954 + if (acb->n > acb->nb_sectors)
  955 + acb->n = acb->nb_sectors;
  956 + cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, 1, 0,
961 index_in_cluster, 957 index_in_cluster,
962 - index_in_cluster + acb1->n); 958 + index_in_cluster + acb->n);
963 if (!cluster_offset || (cluster_offset & 511) != 0) { 959 if (!cluster_offset || (cluster_offset & 511) != 0) {
964 ret = -EIO; 960 ret = -EIO;
965 goto fail; 961 goto fail;
966 } 962 }
967 if (s->crypt_method) { 963 if (s->crypt_method) {
968 - if (!acb1->cluster_data) {  
969 - acb1->cluster_data = qemu_mallocz(s->cluster_size);  
970 - if (!acb1->cluster_data) { 964 + if (!acb->cluster_data) {
  965 + acb->cluster_data = qemu_mallocz(s->cluster_size);
  966 + if (!acb->cluster_data) {
971 ret = -ENOMEM; 967 ret = -ENOMEM;
972 goto fail; 968 goto fail;
973 } 969 }
974 } 970 }
975 - encrypt_sectors(s, acb1->sector_num, acb1->cluster_data, acb1->buf,  
976 - acb1->n, 1, &s->aes_encrypt_key);  
977 - src_buf = acb1->cluster_data; 971 + encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf,
  972 + acb->n, 1, &s->aes_encrypt_key);
  973 + src_buf = acb->cluster_data;
978 } else { 974 } else {
979 - src_buf = acb1->buf; 975 + src_buf = acb->buf;
980 } 976 }
981 - ret = bdrv_aio_write(acb1->hd_aiocb,  
982 - (cluster_offset >> 9) + index_in_cluster,  
983 - src_buf, acb1->n,  
984 - qcow_aio_write_cb, acb);  
985 - if (ret < 0) 977 + acb->hd_aiocb = bdrv_aio_write(s->hd,
  978 + (cluster_offset >> 9) + index_in_cluster,
  979 + src_buf, acb->n,
  980 + qcow_aio_write_cb, acb);
  981 + if (acb->hd_aiocb == NULL)
986 goto fail; 982 goto fail;
987 } 983 }
988 984
989 -static int qcow_aio_write(BlockDriverAIOCB *acb, int64_t sector_num,  
990 - const uint8_t *buf, int nb_sectors) 985 +static BlockDriverAIOCB *qcow_aio_write(BlockDriverState *bs,
  986 + int64_t sector_num, const uint8_t *buf, int nb_sectors,
  987 + BlockDriverCompletionFunc *cb, void *opaque)
991 { 988 {
992 - QCowAIOCB *acb1 = acb->opaque;  
993 - BlockDriverState *bs = acb->bs;  
994 BDRVQcowState *s = bs->opaque; 989 BDRVQcowState *s = bs->opaque;
  990 + QCowAIOCB *acb;
995 991
996 s->cluster_cache_offset = -1; /* disable compressed cache */ 992 s->cluster_cache_offset = -1; /* disable compressed cache */
997 993
998 - acb1->sector_num = sector_num;  
999 - acb1->buf = (uint8_t *)buf;  
1000 - acb1->nb_sectors = nb_sectors;  
1001 - acb1->n = 0; 994 + acb = qcow_aio_setup(bs, sector_num, (uint8_t*)buf, nb_sectors, cb, opaque);
  995 + if (!acb)
  996 + return NULL;
1002 997
1003 qcow_aio_write_cb(acb, 0); 998 qcow_aio_write_cb(acb, 0);
1004 - return 0;  
1005 -}  
1006 -  
1007 -static void qcow_aio_cancel(BlockDriverAIOCB *acb)  
1008 -{  
1009 - QCowAIOCB *acb1 = acb->opaque;  
1010 - if (acb1->hd_aiocb)  
1011 - bdrv_aio_cancel(acb1->hd_aiocb);  
1012 - if (acb1->backing_hd_aiocb)  
1013 - bdrv_aio_cancel(acb1->backing_hd_aiocb); 999 + return &acb->common;
1014 } 1000 }
1015 1001
1016 -static void qcow_aio_delete(BlockDriverAIOCB *acb) 1002 +static void qcow_aio_cancel(BlockDriverAIOCB *blockacb)
1017 { 1003 {
1018 - QCowAIOCB *acb1 = acb->opaque;  
1019 - if (acb1->hd_aiocb)  
1020 - bdrv_aio_delete(acb1->hd_aiocb);  
1021 - if (acb1->backing_hd_aiocb)  
1022 - bdrv_aio_delete(acb1->backing_hd_aiocb);  
1023 - qemu_free(acb1->cluster_data);  
1024 - qemu_free(acb1); 1004 + QCowAIOCB *acb = (QCowAIOCB *)blockacb;
  1005 + if (acb->hd_aiocb)
  1006 + bdrv_aio_cancel(acb->hd_aiocb);
  1007 + qemu_aio_release(acb);
1025 } 1008 }
1026 1009
1027 static void qcow_close(BlockDriverState *bs) 1010 static void qcow_close(BlockDriverState *bs)
@@ -2249,11 +2232,10 @@ BlockDriver bdrv_qcow2 = { @@ -2249,11 +2232,10 @@ BlockDriver bdrv_qcow2 = {
2249 qcow_set_key, 2232 qcow_set_key,
2250 qcow_make_empty, 2233 qcow_make_empty,
2251 2234
2252 - .bdrv_aio_new = qcow_aio_new,  
2253 .bdrv_aio_read = qcow_aio_read, 2235 .bdrv_aio_read = qcow_aio_read,
2254 .bdrv_aio_write = qcow_aio_write, 2236 .bdrv_aio_write = qcow_aio_write,
2255 .bdrv_aio_cancel = qcow_aio_cancel, 2237 .bdrv_aio_cancel = qcow_aio_cancel,
2256 - .bdrv_aio_delete = qcow_aio_delete, 2238 + .aiocb_size = sizeof(QCowAIOCB),
2257 .bdrv_write_compressed = qcow_write_compressed, 2239 .bdrv_write_compressed = qcow_write_compressed,
2258 2240
2259 .bdrv_snapshot_create = qcow_snapshot_create, 2241 .bdrv_snapshot_create = qcow_snapshot_create,
block-raw.c
@@ -200,13 +200,13 @@ static int raw_pwrite(BlockDriverState *bs, int64_t offset, @@ -200,13 +200,13 @@ static int raw_pwrite(BlockDriverState *bs, int64_t offset,
200 /* Unix AOP using POSIX AIO */ 200 /* Unix AOP using POSIX AIO */
201 201
202 typedef struct RawAIOCB { 202 typedef struct RawAIOCB {
  203 + BlockDriverAIOCB common;
203 struct aiocb aiocb; 204 struct aiocb aiocb;
204 - int busy; /* only used for debugging */  
205 - BlockDriverAIOCB *next; 205 + struct RawAIOCB *next;
206 } RawAIOCB; 206 } RawAIOCB;
207 207
208 static int aio_sig_num = SIGUSR2; 208 static int aio_sig_num = SIGUSR2;
209 -static BlockDriverAIOCB *first_aio; /* AIO issued */ 209 +static RawAIOCB *first_aio; /* AIO issued */
210 static int aio_initialized = 0; 210 static int aio_initialized = 0;
211 211
212 static void aio_signal_handler(int signum) 212 static void aio_signal_handler(int signum)
@@ -249,8 +249,7 @@ void qemu_aio_init(void) @@ -249,8 +249,7 @@ void qemu_aio_init(void)
249 249
250 void qemu_aio_poll(void) 250 void qemu_aio_poll(void)
251 { 251 {
252 - BlockDriverAIOCB *acb, **pacb;  
253 - RawAIOCB *acb1; 252 + RawAIOCB *acb, **pacb;
254 int ret; 253 int ret;
255 254
256 for(;;) { 255 for(;;) {
@@ -259,17 +258,16 @@ void qemu_aio_poll(void) @@ -259,17 +258,16 @@ void qemu_aio_poll(void)
259 acb = *pacb; 258 acb = *pacb;
260 if (!acb) 259 if (!acb)
261 goto the_end; 260 goto the_end;
262 - acb1 = acb->opaque;  
263 - ret = aio_error(&acb1->aiocb); 261 + ret = aio_error(&acb->aiocb);
264 if (ret == ECANCELED) { 262 if (ret == ECANCELED) {
265 /* remove the request */ 263 /* remove the request */
266 - acb1->busy = 0;  
267 - *pacb = acb1->next; 264 + *pacb = acb->next;
  265 + qemu_aio_release(acb);
268 } else if (ret != EINPROGRESS) { 266 } else if (ret != EINPROGRESS) {
269 /* end of aio */ 267 /* end of aio */
270 if (ret == 0) { 268 if (ret == 0) {
271 - ret = aio_return(&acb1->aiocb);  
272 - if (ret == acb1->aiocb.aio_nbytes) 269 + ret = aio_return(&acb->aiocb);
  270 + if (ret == acb->aiocb.aio_nbytes)
273 ret = 0; 271 ret = 0;
274 else 272 else
275 ret = -1; 273 ret = -1;
@@ -277,13 +275,13 @@ void qemu_aio_poll(void) @@ -277,13 +275,13 @@ void qemu_aio_poll(void)
277 ret = -ret; 275 ret = -ret;
278 } 276 }
279 /* remove the request */ 277 /* remove the request */
280 - acb1->busy = 0;  
281 - *pacb = acb1->next; 278 + *pacb = acb->next;
282 /* call the callback */ 279 /* call the callback */
283 - acb->cb(acb->cb_opaque, ret); 280 + acb->common.cb(acb->common.opaque, ret);
  281 + qemu_aio_release(acb);
284 break; 282 break;
285 } else { 283 } else {
286 - pacb = &acb1->next; 284 + pacb = &acb->next;
287 } 285 }
288 } 286 }
289 } 287 }
@@ -324,70 +322,70 @@ void qemu_aio_wait_end(void) @@ -324,70 +322,70 @@ void qemu_aio_wait_end(void)
324 sigprocmask(SIG_SETMASK, &wait_oset, NULL); 322 sigprocmask(SIG_SETMASK, &wait_oset, NULL);
325 } 323 }
326 324
327 -static int raw_aio_new(BlockDriverAIOCB *acb) 325 +static RawAIOCB *raw_aio_setup(BlockDriverState *bs,
  326 + int64_t sector_num, uint8_t *buf, int nb_sectors,
  327 + BlockDriverCompletionFunc *cb, void *opaque)
328 { 328 {
329 - RawAIOCB *acb1;  
330 - BDRVRawState *s = acb->bs->opaque;  
331 -  
332 - acb1 = qemu_mallocz(sizeof(RawAIOCB));  
333 - if (!acb1)  
334 - return -1;  
335 - acb->opaque = acb1;  
336 - acb1->aiocb.aio_fildes = s->fd;  
337 - acb1->aiocb.aio_sigevent.sigev_signo = aio_sig_num;  
338 - acb1->aiocb.aio_sigevent.sigev_notify = SIGEV_SIGNAL;  
339 - return 0; 329 + BDRVRawState *s = bs->opaque;
  330 + RawAIOCB *acb;
  331 +
  332 + acb = qemu_aio_get(bs, cb, opaque);
  333 + if (!acb)
  334 + return NULL;
  335 + acb->aiocb.aio_fildes = s->fd;
  336 + acb->aiocb.aio_sigevent.sigev_signo = aio_sig_num;
  337 + acb->aiocb.aio_sigevent.sigev_notify = SIGEV_SIGNAL;
  338 + acb->aiocb.aio_buf = buf;
  339 + acb->aiocb.aio_nbytes = nb_sectors * 512;
  340 + acb->aiocb.aio_offset = sector_num * 512;
  341 + acb->next = first_aio;
  342 + first_aio = acb;
  343 + return acb;
340 } 344 }
341 345
342 -static int raw_aio_read(BlockDriverAIOCB *acb, int64_t sector_num,  
343 - uint8_t *buf, int nb_sectors) 346 +static BlockDriverAIOCB *raw_aio_read(BlockDriverState *bs,
  347 + int64_t sector_num, uint8_t *buf, int nb_sectors,
  348 + BlockDriverCompletionFunc *cb, void *opaque)
344 { 349 {
345 - RawAIOCB *acb1 = acb->opaque; 350 + RawAIOCB *acb;
346 351
347 - assert(acb1->busy == 0);  
348 - acb1->busy = 1;  
349 - acb1->aiocb.aio_buf = buf;  
350 - acb1->aiocb.aio_nbytes = nb_sectors * 512;  
351 - acb1->aiocb.aio_offset = sector_num * 512;  
352 - acb1->next = first_aio;  
353 - first_aio = acb;  
354 - if (aio_read(&acb1->aiocb) < 0) {  
355 - acb1->busy = 0;  
356 - return -errno; 352 + acb = raw_aio_setup(bs, sector_num, buf, nb_sectors, cb, opaque);
  353 + if (!acb)
  354 + return NULL;
  355 + if (aio_read(&acb->aiocb) < 0) {
  356 + qemu_aio_release(acb);
  357 + return NULL;
357 } 358 }
358 - return 0; 359 + return &acb->common;
359 } 360 }
360 361
361 -static int raw_aio_write(BlockDriverAIOCB *acb, int64_t sector_num,  
362 - const uint8_t *buf, int nb_sectors) 362 +static BlockDriverAIOCB *raw_aio_write(BlockDriverState *bs,
  363 + int64_t sector_num, const uint8_t *buf, int nb_sectors,
  364 + BlockDriverCompletionFunc *cb, void *opaque)
363 { 365 {
364 - RawAIOCB *acb1 = acb->opaque; 366 + RawAIOCB *acb;
365 367
366 - assert(acb1->busy == 0);  
367 - acb1->busy = 1;  
368 - acb1->aiocb.aio_buf = (uint8_t *)buf;  
369 - acb1->aiocb.aio_nbytes = nb_sectors * 512;  
370 - acb1->aiocb.aio_offset = sector_num * 512;  
371 - acb1->next = first_aio;  
372 - first_aio = acb;  
373 - if (aio_write(&acb1->aiocb) < 0) {  
374 - acb1->busy = 0;  
375 - return -errno; 368 + acb = raw_aio_setup(bs, sector_num, (uint8_t*)buf, nb_sectors, cb, opaque);
  369 + if (!acb)
  370 + return NULL;
  371 + if (aio_write(&acb->aiocb) < 0) {
  372 + qemu_aio_release(acb);
  373 + return NULL;
376 } 374 }
377 - return 0; 375 + return &acb->common;
378 } 376 }
379 377
380 -static void raw_aio_cancel(BlockDriverAIOCB *acb) 378 +static void raw_aio_cancel(BlockDriverAIOCB *blockacb)
381 { 379 {
382 - RawAIOCB *acb1 = acb->opaque;  
383 int ret; 380 int ret;
384 - BlockDriverAIOCB **pacb; 381 + RawAIOCB *acb = (RawAIOCB *)blockacb;
  382 + RawAIOCB **pacb;
385 383
386 - ret = aio_cancel(acb1->aiocb.aio_fildes, &acb1->aiocb); 384 + ret = aio_cancel(acb->aiocb.aio_fildes, &acb->aiocb);
387 if (ret == AIO_NOTCANCELED) { 385 if (ret == AIO_NOTCANCELED) {
388 /* fail safe: if the aio could not be canceled, we wait for 386 /* fail safe: if the aio could not be canceled, we wait for
389 it */ 387 it */
390 - while (aio_error(&acb1->aiocb) == EINPROGRESS); 388 + while (aio_error(&acb->aiocb) == EINPROGRESS);
391 } 389 }
392 390
393 /* remove the callback from the queue */ 391 /* remove the callback from the queue */
@@ -396,22 +394,14 @@ static void raw_aio_cancel(BlockDriverAIOCB *acb) @@ -396,22 +394,14 @@ static void raw_aio_cancel(BlockDriverAIOCB *acb)
396 if (*pacb == NULL) { 394 if (*pacb == NULL) {
397 break; 395 break;
398 } else if (*pacb == acb) { 396 } else if (*pacb == acb) {
399 - acb1->busy = 0;  
400 - *pacb = acb1->next; 397 + *pacb = acb->next;
  398 + qemu_aio_release(acb);
401 break; 399 break;
402 } 400 }
403 - acb1 = (*pacb)->opaque;  
404 - pacb = &acb1->next; 401 + pacb = &acb->next;
405 } 402 }
406 } 403 }
407 404
408 -static void raw_aio_delete(BlockDriverAIOCB *acb)  
409 -{  
410 - RawAIOCB *acb1 = acb->opaque;  
411 - raw_aio_cancel(acb);  
412 - qemu_free(acb1);  
413 -}  
414 -  
415 static void raw_close(BlockDriverState *bs) 405 static void raw_close(BlockDriverState *bs)
416 { 406 {
417 BDRVRawState *s = bs->opaque; 407 BDRVRawState *s = bs->opaque;
@@ -508,11 +498,10 @@ BlockDriver bdrv_raw = { @@ -508,11 +498,10 @@ BlockDriver bdrv_raw = {
508 raw_create, 498 raw_create,
509 raw_flush, 499 raw_flush,
510 500
511 - .bdrv_aio_new = raw_aio_new,  
512 .bdrv_aio_read = raw_aio_read, 501 .bdrv_aio_read = raw_aio_read,
513 .bdrv_aio_write = raw_aio_write, 502 .bdrv_aio_write = raw_aio_write,
514 .bdrv_aio_cancel = raw_aio_cancel, 503 .bdrv_aio_cancel = raw_aio_cancel,
515 - .bdrv_aio_delete = raw_aio_delete, 504 + .aiocb_size = sizeof(RawAIOCB),
516 .protocol_name = "file", 505 .protocol_name = "file",
517 .bdrv_pread = raw_pread, 506 .bdrv_pread = raw_pread,
518 .bdrv_pwrite = raw_pwrite, 507 .bdrv_pwrite = raw_pwrite,
@@ -530,6 +519,7 @@ typedef struct BDRVRawState { @@ -530,6 +519,7 @@ typedef struct BDRVRawState {
530 } BDRVRawState; 519 } BDRVRawState;
531 520
532 typedef struct RawAIOCB { 521 typedef struct RawAIOCB {
  522 + BlockDriverAIOCB common;
533 HANDLE hEvent; 523 HANDLE hEvent;
534 OVERLAPPED ov; 524 OVERLAPPED ov;
535 int count; 525 int count;
@@ -574,6 +564,7 @@ static int raw_open(BlockDriverState *bs, const char *filename, int flags) @@ -574,6 +564,7 @@ static int raw_open(BlockDriverState *bs, const char *filename, int flags)
574 { 564 {
575 BDRVRawState *s = bs->opaque; 565 BDRVRawState *s = bs->opaque;
576 int access_flags, create_flags; 566 int access_flags, create_flags;
  567 + DWORD overlapped;
577 568
578 if ((flags & BDRV_O_ACCESS) == O_RDWR) { 569 if ((flags & BDRV_O_ACCESS) == O_RDWR) {
579 access_flags = GENERIC_READ | GENERIC_WRITE; 570 access_flags = GENERIC_READ | GENERIC_WRITE;
@@ -585,9 +576,14 @@ static int raw_open(BlockDriverState *bs, const char *filename, int flags) @@ -585,9 +576,14 @@ static int raw_open(BlockDriverState *bs, const char *filename, int flags)
585 } else { 576 } else {
586 create_flags = OPEN_EXISTING; 577 create_flags = OPEN_EXISTING;
587 } 578 }
  579 +#ifdef QEMU_TOOL
  580 + overlapped = 0;
  581 +#else
  582 + overlapped = FILE_FLAG_OVERLAPPED;
  583 +#endif
588 s->hfile = CreateFile(filename, access_flags, 584 s->hfile = CreateFile(filename, access_flags,
589 FILE_SHARE_READ, NULL, 585 FILE_SHARE_READ, NULL,
590 - create_flags, FILE_FLAG_OVERLAPPED, 0); 586 + create_flags, overlapped, 0);
591 if (s->hfile == INVALID_HANDLE_VALUE) 587 if (s->hfile == INVALID_HANDLE_VALUE)
592 return -1; 588 return -1;
593 return 0; 589 return 0;
@@ -637,104 +633,107 @@ static int raw_pwrite(BlockDriverState *bs, int64_t offset, @@ -637,104 +633,107 @@ static int raw_pwrite(BlockDriverState *bs, int64_t offset,
637 return ret_count; 633 return ret_count;
638 } 634 }
639 635
640 -static int raw_aio_new(BlockDriverAIOCB *acb)  
641 -{  
642 - RawAIOCB *acb1;  
643 -  
644 - acb1 = qemu_mallocz(sizeof(RawAIOCB));  
645 - if (!acb1)  
646 - return -ENOMEM;  
647 - acb->opaque = acb1;  
648 - acb1->hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);  
649 - if (!acb1->hEvent)  
650 - return -ENOMEM;  
651 - return 0;  
652 -}  
653 #ifndef QEMU_TOOL 636 #ifndef QEMU_TOOL
654 static void raw_aio_cb(void *opaque) 637 static void raw_aio_cb(void *opaque)
655 { 638 {
656 - BlockDriverAIOCB *acb = opaque;  
657 - BlockDriverState *bs = acb->bs; 639 + RawAIOCB *acb = opaque;
  640 + BlockDriverState *bs = acb->common.bs;
658 BDRVRawState *s = bs->opaque; 641 BDRVRawState *s = bs->opaque;
659 - RawAIOCB *acb1 = acb->opaque;  
660 DWORD ret_count; 642 DWORD ret_count;
661 int ret; 643 int ret;
662 644
663 - ret = GetOverlappedResult(s->hfile, &acb1->ov, &ret_count, TRUE);  
664 - if (!ret || ret_count != acb1->count) {  
665 - acb->cb(acb->cb_opaque, -EIO); 645 + ret = GetOverlappedResult(s->hfile, &acb->ov, &ret_count, TRUE);
  646 + if (!ret || ret_count != acb->count) {
  647 + acb->common.cb(acb->common.opaque, -EIO);
666 } else { 648 } else {
667 - acb->cb(acb->cb_opaque, 0); 649 + acb->common.cb(acb->common.opaque, 0);
668 } 650 }
669 } 651 }
670 #endif 652 #endif
671 -static int raw_aio_read(BlockDriverAIOCB *acb, int64_t sector_num,  
672 - uint8_t *buf, int nb_sectors) 653 +
  654 +static RawAIOCB *raw_aio_setup(BlockDriverState *bs,
  655 + int64_t sector_num, uint8_t *buf, int nb_sectors,
  656 + BlockDriverCompletionFunc *cb, void *opaque)
673 { 657 {
674 - BlockDriverState *bs = acb->bs;  
675 - BDRVRawState *s = bs->opaque;  
676 - RawAIOCB *acb1 = acb->opaque;  
677 - int ret; 658 + RawAIOCB *acb;
678 int64_t offset; 659 int64_t offset;
679 660
680 - memset(&acb1->ov, 0, sizeof(acb1->ov)); 661 + acb = qemu_aio_get(bs, cb, opaque);
  662 + if (acb->hEvent) {
  663 + acb->hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
  664 + if (!acb->hEvent) {
  665 + qemu_aio_release(acb);
  666 + return NULL;
  667 + }
  668 + }
  669 + memset(&acb->ov, 0, sizeof(acb->ov));
681 offset = sector_num * 512; 670 offset = sector_num * 512;
682 - acb1->ov.Offset = offset;  
683 - acb1->ov.OffsetHigh = offset >> 32;  
684 - acb1->ov.hEvent = acb1->hEvent;  
685 - acb1->count = nb_sectors * 512; 671 + acb->ov.Offset = offset;
  672 + acb->ov.OffsetHigh = offset >> 32;
  673 + acb->ov.hEvent = acb->hEvent;
  674 + acb->count = nb_sectors * 512;
686 #ifndef QEMU_TOOL 675 #ifndef QEMU_TOOL
687 - qemu_add_wait_object(acb1->ov.hEvent, raw_aio_cb, acb); 676 + qemu_add_wait_object(acb->ov.hEvent, raw_aio_cb, acb);
688 #endif 677 #endif
689 - ret = ReadFile(s->hfile, buf, acb1->count, NULL, &acb1->ov);  
690 - if (!ret)  
691 - return -EIO;  
692 - return 0; 678 + return acb;
693 } 679 }
694 680
695 -static int raw_aio_write(BlockDriverAIOCB *acb, int64_t sector_num,  
696 - uint8_t *buf, int nb_sectors) 681 +static BlockDriverAIOCB *raw_aio_read(BlockDriverState *bs,
  682 + int64_t sector_num, uint8_t *buf, int nb_sectors,
  683 + BlockDriverCompletionFunc *cb, void *opaque)
697 { 684 {
698 - BlockDriverState *bs = acb->bs;  
699 BDRVRawState *s = bs->opaque; 685 BDRVRawState *s = bs->opaque;
700 - RawAIOCB *acb1 = acb->opaque; 686 + RawAIOCB *acb;
701 int ret; 687 int ret;
702 - int64_t offset;  
703 688
704 - memset(&acb1->ov, 0, sizeof(acb1->ov));  
705 - offset = sector_num * 512;  
706 - acb1->ov.Offset = offset;  
707 - acb1->ov.OffsetHigh = offset >> 32;  
708 - acb1->ov.hEvent = acb1->hEvent;  
709 - acb1->count = nb_sectors * 512;  
710 -#ifndef QEMU_TOOL  
711 - qemu_add_wait_object(acb1->ov.hEvent, raw_aio_cb, acb); 689 + acb = raw_aio_setup(bs, sector_num, buf, nb_sectors, cb, opaque);
  690 + if (!acb)
  691 + return NULL;
  692 + ret = ReadFile(s->hfile, buf, acb->count, NULL, &acb->ov);
  693 + if (!ret) {
  694 + qemu_aio_release(acb);
  695 + return NULL;
  696 + }
  697 +#ifdef QEMU_TOOL
  698 + qemu_aio_release(acb);
712 #endif 699 #endif
713 - ret = ReadFile(s->hfile, buf, acb1->count, NULL, &acb1->ov);  
714 - if (!ret)  
715 - return -EIO;  
716 - return 0; 700 + return (BlockDriverAIOCB *)acb;
717 } 701 }
718 702
719 -static void raw_aio_cancel(BlockDriverAIOCB *acb) 703 +static BlockDriverAIOCB *raw_aio_write(BlockDriverState *bs,
  704 + int64_t sector_num, uint8_t *buf, int nb_sectors,
  705 + BlockDriverCompletionFunc *cb, void *opaque)
720 { 706 {
721 - BlockDriverState *bs = acb->bs;  
722 BDRVRawState *s = bs->opaque; 707 BDRVRawState *s = bs->opaque;
723 -#ifndef QEMU_TOOL  
724 - RawAIOCB *acb1 = acb->opaque; 708 + RawAIOCB *acb;
  709 + int ret;
725 710
726 - qemu_del_wait_object(acb1->ov.hEvent, raw_aio_cb, acb); 711 + acb = raw_aio_setup(bs, sector_num, buf, nb_sectors, cb, opaque);
  712 + if (!acb)
  713 + return NULL;
  714 + ret = WriteFile(s->hfile, buf, acb->count, NULL, &acb->ov);
  715 + if (!ret) {
  716 + qemu_aio_release(acb);
  717 + return NULL;
  718 + }
  719 +#ifdef QEMU_TOOL
  720 + qemu_aio_release(acb);
727 #endif 721 #endif
728 - /* XXX: if more than one async I/O it is not correct */  
729 - CancelIo(s->hfile); 722 + return (BlockDriverAIOCB *)acb;
730 } 723 }
731 724
732 -static void raw_aio_delete(BlockDriverAIOCB *acb) 725 +static void raw_aio_cancel(BlockDriverAIOCB *blockacb)
733 { 726 {
734 - RawAIOCB *acb1 = acb->opaque;  
735 - raw_aio_cancel(acb);  
736 - CloseHandle(acb1->hEvent);  
737 - qemu_free(acb1); 727 +#ifndef QEMU_TOOL
  728 + RawAIOCB *acb = (RawAIOCB *)blockacb;
  729 + BlockDriverState *bs = acb->common.bs;
  730 + BDRVRawState *s = bs->opaque;
  731 +
  732 + qemu_del_wait_object(acb->ov.hEvent, raw_aio_cb, acb);
  733 + /* XXX: if more than one async I/O it is not correct */
  734 + CancelIo(s->hfile);
  735 + qemu_aio_release(acb);
  736 +#endif
738 } 737 }
739 738
740 static void raw_flush(BlockDriverState *bs) 739 static void raw_flush(BlockDriverState *bs)
@@ -823,11 +822,10 @@ BlockDriver bdrv_raw = { @@ -823,11 +822,10 @@ BlockDriver bdrv_raw = {
823 raw_flush, 822 raw_flush,
824 823
825 #if 0 824 #if 0
826 - .bdrv_aio_new = raw_aio_new,  
827 .bdrv_aio_read = raw_aio_read, 825 .bdrv_aio_read = raw_aio_read,
828 .bdrv_aio_write = raw_aio_write, 826 .bdrv_aio_write = raw_aio_write,
829 .bdrv_aio_cancel = raw_aio_cancel, 827 .bdrv_aio_cancel = raw_aio_cancel,
830 - .bdrv_aio_delete = raw_aio_delete, 828 + .aiocb_size = sizeof(RawAIOCB);
831 #endif 829 #endif
832 .protocol_name = "file", 830 .protocol_name = "file",
833 .bdrv_pread = raw_pread, 831 .bdrv_pread = raw_pread,
@@ -35,13 +35,13 @@ @@ -35,13 +35,13 @@
35 #define SECTOR_BITS 9 35 #define SECTOR_BITS 9
36 #define SECTOR_SIZE (1 << SECTOR_BITS) 36 #define SECTOR_SIZE (1 << SECTOR_BITS)
37 37
38 -static int bdrv_aio_new_em(BlockDriverAIOCB *acb);  
39 -static int bdrv_aio_read_em(BlockDriverAIOCB *acb, int64_t sector_num,  
40 - uint8_t *buf, int nb_sectors);  
41 -static int bdrv_aio_write_em(BlockDriverAIOCB *acb, int64_t sector_num,  
42 - const uint8_t *buf, int nb_sectors); 38 +static BlockDriverAIOCB *bdrv_aio_read_em(BlockDriverState *bs,
  39 + int64_t sector_num, uint8_t *buf, int nb_sectors,
  40 + BlockDriverCompletionFunc *cb, void *opaque);
  41 +static BlockDriverAIOCB *bdrv_aio_write_em(BlockDriverState *bs,
  42 + int64_t sector_num, const uint8_t *buf, int nb_sectors,
  43 + BlockDriverCompletionFunc *cb, void *opaque);
43 static void bdrv_aio_cancel_em(BlockDriverAIOCB *acb); 44 static void bdrv_aio_cancel_em(BlockDriverAIOCB *acb);
44 -static void bdrv_aio_delete_em(BlockDriverAIOCB *acb);  
45 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num, 45 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
46 uint8_t *buf, int nb_sectors); 46 uint8_t *buf, int nb_sectors);
47 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num, 47 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
@@ -106,13 +106,11 @@ void path_combine(char *dest, int dest_size, @@ -106,13 +106,11 @@ void path_combine(char *dest, int dest_size,
106 106
107 void bdrv_register(BlockDriver *bdrv) 107 void bdrv_register(BlockDriver *bdrv)
108 { 108 {
109 - if (!bdrv->bdrv_aio_new) { 109 + if (!bdrv->bdrv_aio_read) {
110 /* add AIO emulation layer */ 110 /* add AIO emulation layer */
111 - bdrv->bdrv_aio_new = bdrv_aio_new_em;  
112 bdrv->bdrv_aio_read = bdrv_aio_read_em; 111 bdrv->bdrv_aio_read = bdrv_aio_read_em;
113 bdrv->bdrv_aio_write = bdrv_aio_write_em; 112 bdrv->bdrv_aio_write = bdrv_aio_write_em;
114 bdrv->bdrv_aio_cancel = bdrv_aio_cancel_em; 113 bdrv->bdrv_aio_cancel = bdrv_aio_cancel_em;
115 - bdrv->bdrv_aio_delete = bdrv_aio_delete_em;  
116 } else if (!bdrv->bdrv_read && !bdrv->bdrv_pread) { 114 } else if (!bdrv->bdrv_read && !bdrv->bdrv_pread) {
117 /* add synchronous IO emulation layer */ 115 /* add synchronous IO emulation layer */
118 bdrv->bdrv_read = bdrv_read_em; 116 bdrv->bdrv_read = bdrv_read_em;
@@ -964,7 +962,9 @@ char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn) @@ -964,7 +962,9 @@ char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
964 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK"); 962 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
965 } else { 963 } else {
966 ti = sn->date_sec; 964 ti = sn->date_sec;
  965 +#ifndef _WIN32
967 localtime_r(&ti, &tm); 966 localtime_r(&ti, &tm);
  967 +#endif
968 strftime(date_buf, sizeof(date_buf), 968 strftime(date_buf, sizeof(date_buf),
969 "%Y-%m-%d %H:%M:%S", &tm); 969 "%Y-%m-%d %H:%M:%S", &tm);
970 secs = sn->vm_clock_nsec / 1000000000; 970 secs = sn->vm_clock_nsec / 1000000000;
@@ -988,31 +988,14 @@ char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn) @@ -988,31 +988,14 @@ char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
988 /**************************************************************/ 988 /**************************************************************/
989 /* async I/Os */ 989 /* async I/Os */
990 990
991 -BlockDriverAIOCB *bdrv_aio_new(BlockDriverState *bs) 991 +BlockDriverAIOCB *bdrv_aio_read(BlockDriverState *bs, int64_t sector_num,
  992 + uint8_t *buf, int nb_sectors,
  993 + BlockDriverCompletionFunc *cb, void *opaque)
992 { 994 {
993 BlockDriver *drv = bs->drv; 995 BlockDriver *drv = bs->drv;
994 - BlockDriverAIOCB *acb;  
995 - acb = qemu_mallocz(sizeof(BlockDriverAIOCB));  
996 - if (!acb)  
997 - return NULL;  
998 -  
999 - acb->bs = bs;  
1000 - if (drv->bdrv_aio_new(acb) < 0) {  
1001 - qemu_free(acb);  
1002 - return NULL;  
1003 - }  
1004 - return acb;  
1005 -}  
1006 -  
1007 -int bdrv_aio_read(BlockDriverAIOCB *acb, int64_t sector_num,  
1008 - uint8_t *buf, int nb_sectors,  
1009 - BlockDriverCompletionFunc *cb, void *opaque)  
1010 -{  
1011 - BlockDriverState *bs = acb->bs;  
1012 - BlockDriver *drv = bs->drv;  
1013 996
1014 if (!bs->inserted) 997 if (!bs->inserted)
1015 - return -1; 998 + return NULL;
1016 999
1017 /* XXX: we assume that nb_sectors == 0 is suppored by the async read */ 1000 /* XXX: we assume that nb_sectors == 0 is suppored by the async read */
1018 if (sector_num == 0 && bs->boot_sector_enabled && nb_sectors > 0) { 1001 if (sector_num == 0 && bs->boot_sector_enabled && nb_sectors > 0) {
@@ -1022,141 +1005,114 @@ int bdrv_aio_read(BlockDriverAIOCB *acb, int64_t sector_num, @@ -1022,141 +1005,114 @@ int bdrv_aio_read(BlockDriverAIOCB *acb, int64_t sector_num,
1022 buf += 512; 1005 buf += 512;
1023 } 1006 }
1024 1007
1025 - acb->cb = cb;  
1026 - acb->cb_opaque = opaque;  
1027 - return drv->bdrv_aio_read(acb, sector_num, buf, nb_sectors); 1008 + return drv->bdrv_aio_read(bs, sector_num, buf, nb_sectors, cb, opaque);
1028 } 1009 }
1029 1010
1030 -int bdrv_aio_write(BlockDriverAIOCB *acb, int64_t sector_num,  
1031 - const uint8_t *buf, int nb_sectors,  
1032 - BlockDriverCompletionFunc *cb, void *opaque) 1011 +BlockDriverAIOCB *bdrv_aio_write(BlockDriverState *bs, int64_t sector_num,
  1012 + const uint8_t *buf, int nb_sectors,
  1013 + BlockDriverCompletionFunc *cb, void *opaque)
1033 { 1014 {
1034 - BlockDriverState *bs = acb->bs;  
1035 BlockDriver *drv = bs->drv; 1015 BlockDriver *drv = bs->drv;
1036 1016
1037 if (!bs->inserted) 1017 if (!bs->inserted)
1038 - return -1; 1018 + return NULL;
1039 if (bs->read_only) 1019 if (bs->read_only)
1040 - return -1; 1020 + return NULL;
1041 if (sector_num == 0 && bs->boot_sector_enabled && nb_sectors > 0) { 1021 if (sector_num == 0 && bs->boot_sector_enabled && nb_sectors > 0) {
1042 memcpy(bs->boot_sector_data, buf, 512); 1022 memcpy(bs->boot_sector_data, buf, 512);
1043 } 1023 }
1044 1024
1045 - acb->cb = cb;  
1046 - acb->cb_opaque = opaque;  
1047 - return drv->bdrv_aio_write(acb, sector_num, buf, nb_sectors); 1025 + return drv->bdrv_aio_write(bs, sector_num, buf, nb_sectors, cb, opaque);
1048 } 1026 }
1049 1027
1050 void bdrv_aio_cancel(BlockDriverAIOCB *acb) 1028 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
1051 - {  
1052 - BlockDriverState *bs = acb->bs;  
1053 - BlockDriver *drv = bs->drv;  
1054 -  
1055 - drv->bdrv_aio_cancel(acb);  
1056 - }  
1057 -  
1058 -void bdrv_aio_delete(BlockDriverAIOCB *acb)  
1059 { 1029 {
1060 - BlockDriverState *bs = acb->bs;  
1061 - BlockDriver *drv = bs->drv; 1030 + BlockDriver *drv = acb->bs->drv;
1062 1031
1063 - drv->bdrv_aio_delete(acb);  
1064 - qemu_free(acb); 1032 + drv->bdrv_aio_cancel(acb);
1065 } 1033 }
1066 1034
  1035 +
1067 /**************************************************************/ 1036 /**************************************************************/
1068 /* async block device emulation */ 1037 /* async block device emulation */
1069 1038
1070 #ifdef QEMU_TOOL 1039 #ifdef QEMU_TOOL
1071 -static int bdrv_aio_new_em(BlockDriverAIOCB *acb)  
1072 -{  
1073 - return 0;  
1074 -}  
1075 -  
1076 -static int bdrv_aio_read_em(BlockDriverAIOCB *acb, int64_t sector_num,  
1077 - uint8_t *buf, int nb_sectors) 1040 +static BlockDriverAIOCB *bdrv_aio_read_em(BlockDriverState *bs,
  1041 + int64_t sector_num, uint8_t *buf, int nb_sectors,
  1042 + BlockDriverCompletionFunc *cb, void *opaque)
1078 { 1043 {
1079 int ret; 1044 int ret;
1080 - ret = bdrv_read(acb->bs, sector_num, buf, nb_sectors);  
1081 - acb->cb(acb->cb_opaque, ret);  
1082 - return 0; 1045 + ret = bdrv_read(bs, sector_num, buf, nb_sectors);
  1046 + cb(opaque, ret);
  1047 + return NULL;
1083 } 1048 }
1084 1049
1085 -static int bdrv_aio_write_em(BlockDriverAIOCB *acb, int64_t sector_num,  
1086 - const uint8_t *buf, int nb_sectors) 1050 +static BlockDriverAIOCB *bdrv_aio_write_em(BlockDriverState *bs,
  1051 + int64_t sector_num, const uint8_t *buf, int nb_sectors,
  1052 + BlockDriverCompletionFunc *cb, void *opaque)
1087 { 1053 {
1088 int ret; 1054 int ret;
1089 - ret = bdrv_write(acb->bs, sector_num, buf, nb_sectors);  
1090 - acb->cb(acb->cb_opaque, ret);  
1091 - return 0; 1055 + ret = bdrv_write(bs, sector_num, buf, nb_sectors);
  1056 + cb(opaque, ret);
  1057 + return NULL;
1092 } 1058 }
1093 1059
1094 static void bdrv_aio_cancel_em(BlockDriverAIOCB *acb) 1060 static void bdrv_aio_cancel_em(BlockDriverAIOCB *acb)
1095 { 1061 {
1096 } 1062 }
1097 -  
1098 -static void bdrv_aio_delete_em(BlockDriverAIOCB *acb)  
1099 -{  
1100 -}  
1101 #else 1063 #else
1102 typedef struct BlockDriverAIOCBSync { 1064 typedef struct BlockDriverAIOCBSync {
  1065 + BlockDriverAIOCB common;
1103 QEMUBH *bh; 1066 QEMUBH *bh;
1104 int ret; 1067 int ret;
1105 } BlockDriverAIOCBSync; 1068 } BlockDriverAIOCBSync;
1106 1069
1107 -static void bdrv_aio_bh_cb(void *opaque)  
1108 -{  
1109 - BlockDriverAIOCB *acb = opaque;  
1110 - BlockDriverAIOCBSync *acb1 = acb->opaque;  
1111 - acb->cb(acb->cb_opaque, acb1->ret);  
1112 -} 1070 +static BlockDriverAIOCBSync *free_acb = NULL;
1113 1071
1114 -static int bdrv_aio_new_em(BlockDriverAIOCB *acb) 1072 +static void bdrv_aio_bh_cb(void *opaque)
1115 { 1073 {
1116 - BlockDriverAIOCBSync *acb1;  
1117 -  
1118 - acb1 = qemu_mallocz(sizeof(BlockDriverAIOCBSync));  
1119 - if (!acb1)  
1120 - return -1;  
1121 - acb->opaque = acb1;  
1122 - acb1->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);  
1123 - return 0; 1074 + BlockDriverAIOCBSync *acb = opaque;
  1075 + acb->common.cb(acb->common.opaque, acb->ret);
  1076 + qemu_aio_release(acb);
1124 } 1077 }
1125 1078
1126 -static int bdrv_aio_read_em(BlockDriverAIOCB *acb, int64_t sector_num,  
1127 - uint8_t *buf, int nb_sectors) 1079 +static BlockDriverAIOCB *bdrv_aio_read_em(BlockDriverState *bs,
  1080 + int64_t sector_num, uint8_t *buf, int nb_sectors,
  1081 + BlockDriverCompletionFunc *cb, void *opaque)
1128 { 1082 {
1129 - BlockDriverAIOCBSync *acb1 = acb->opaque; 1083 + BlockDriverAIOCBSync *acb;
1130 int ret; 1084 int ret;
1131 -  
1132 - ret = bdrv_read(acb->bs, sector_num, buf, nb_sectors);  
1133 - acb1->ret = ret;  
1134 - qemu_bh_schedule(acb1->bh);  
1135 - return 0; 1085 +
  1086 + acb = qemu_aio_get(bs, cb, opaque);
  1087 + if (!acb->bh)
  1088 + acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
  1089 + ret = bdrv_read(bs, sector_num, buf, nb_sectors);
  1090 + acb->ret = ret;
  1091 + qemu_bh_schedule(acb->bh);
  1092 + return &acb->common;
1136 } 1093 }
1137 1094
1138 -static int bdrv_aio_write_em(BlockDriverAIOCB *acb, int64_t sector_num,  
1139 - const uint8_t *buf, int nb_sectors) 1095 +static BlockDriverAIOCB *bdrv_aio_write_em(BlockDriverState *bs,
  1096 + int64_t sector_num, const uint8_t *buf, int nb_sectors,
  1097 + BlockDriverCompletionFunc *cb, void *opaque)
1140 { 1098 {
1141 - BlockDriverAIOCBSync *acb1 = acb->opaque; 1099 + BlockDriverAIOCBSync *acb;
1142 int ret; 1100 int ret;
1143 -  
1144 - ret = bdrv_write(acb->bs, sector_num, buf, nb_sectors);  
1145 - acb1->ret = ret;  
1146 - qemu_bh_schedule(acb1->bh);  
1147 - return 0;  
1148 -}  
1149 1101
1150 -static void bdrv_aio_cancel_em(BlockDriverAIOCB *acb)  
1151 -{  
1152 - BlockDriverAIOCBSync *acb1 = acb->opaque;  
1153 - qemu_bh_cancel(acb1->bh); 1102 + acb = qemu_aio_get(bs, cb, opaque);
  1103 + if (!acb->bh)
  1104 + acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
  1105 + ret = bdrv_write(bs, sector_num, buf, nb_sectors);
  1106 + acb->ret = ret;
  1107 + qemu_bh_schedule(acb->bh);
  1108 + return &acb->common;
1154 } 1109 }
1155 1110
1156 -static void bdrv_aio_delete_em(BlockDriverAIOCB *acb) 1111 +static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
1157 { 1112 {
1158 - BlockDriverAIOCBSync *acb1 = acb->opaque;  
1159 - qemu_bh_delete(acb1->bh); 1113 + BlockDriverAIOCBSync *acb = (BlockDriverAIOCBSync *)blockacb;
  1114 + qemu_bh_cancel(acb->bh);
  1115 + qemu_aio_release(acb);
1160 } 1116 }
1161 #endif /* !QEMU_TOOL */ 1117 #endif /* !QEMU_TOOL */
1162 1118
@@ -1173,20 +1129,16 @@ static void bdrv_rw_em_cb(void *opaque, int ret) @@ -1173,20 +1129,16 @@ static void bdrv_rw_em_cb(void *opaque, int ret)
1173 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num, 1129 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
1174 uint8_t *buf, int nb_sectors) 1130 uint8_t *buf, int nb_sectors)
1175 { 1131 {
1176 - int async_ret, ret; 1132 + int async_ret;
  1133 + BlockDriverAIOCB *acb;
1177 1134
1178 - if (!bs->sync_aiocb) {  
1179 - bs->sync_aiocb = bdrv_aio_new(bs);  
1180 - if (!bs->sync_aiocb)  
1181 - return -1;  
1182 - }  
1183 async_ret = NOT_DONE; 1135 async_ret = NOT_DONE;
1184 qemu_aio_wait_start(); 1136 qemu_aio_wait_start();
1185 - ret = bdrv_aio_read(bs->sync_aiocb, sector_num, buf, nb_sectors, 1137 + acb = bdrv_aio_read(bs, sector_num, buf, nb_sectors,
1186 bdrv_rw_em_cb, &async_ret); 1138 bdrv_rw_em_cb, &async_ret);
1187 - if (ret < 0) { 1139 + if (acb == NULL) {
1188 qemu_aio_wait_end(); 1140 qemu_aio_wait_end();
1189 - return ret; 1141 + return -1;
1190 } 1142 }
1191 while (async_ret == NOT_DONE) { 1143 while (async_ret == NOT_DONE) {
1192 qemu_aio_wait(); 1144 qemu_aio_wait();
@@ -1198,20 +1150,16 @@ static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num, @@ -1198,20 +1150,16 @@ static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
1198 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num, 1150 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
1199 const uint8_t *buf, int nb_sectors) 1151 const uint8_t *buf, int nb_sectors)
1200 { 1152 {
1201 - int async_ret, ret; 1153 + int async_ret;
  1154 + BlockDriverAIOCB *acb;
1202 1155
1203 - if (!bs->sync_aiocb) {  
1204 - bs->sync_aiocb = bdrv_aio_new(bs);  
1205 - if (!bs->sync_aiocb)  
1206 - return -1;  
1207 - }  
1208 async_ret = NOT_DONE; 1156 async_ret = NOT_DONE;
1209 qemu_aio_wait_start(); 1157 qemu_aio_wait_start();
1210 - ret = bdrv_aio_write(bs->sync_aiocb, sector_num, buf, nb_sectors, 1158 + acb = bdrv_aio_write(bs, sector_num, buf, nb_sectors,
1211 bdrv_rw_em_cb, &async_ret); 1159 bdrv_rw_em_cb, &async_ret);
1212 - if (ret < 0) { 1160 + if (acb == NULL) {
1213 qemu_aio_wait_end(); 1161 qemu_aio_wait_end();
1214 - return ret; 1162 + return -1;
1215 } 1163 }
1216 while (async_ret == NOT_DONE) { 1164 while (async_ret == NOT_DONE) {
1217 qemu_aio_wait(); 1165 qemu_aio_wait();
@@ -1235,3 +1183,32 @@ void bdrv_init(void) @@ -1235,3 +1183,32 @@ void bdrv_init(void)
1235 bdrv_register(&bdrv_vvfat); 1183 bdrv_register(&bdrv_vvfat);
1236 bdrv_register(&bdrv_qcow2); 1184 bdrv_register(&bdrv_qcow2);
1237 } 1185 }
  1186 +
  1187 +void *qemu_aio_get(BlockDriverState *bs, BlockDriverCompletionFunc *cb,
  1188 + void *opaque)
  1189 +{
  1190 + BlockDriver *drv;
  1191 + BlockDriverAIOCB *acb;
  1192 +
  1193 + drv = bs->drv;
  1194 + if (drv->free_aiocb) {
  1195 + acb = drv->free_aiocb;
  1196 + drv->free_aiocb = acb->next;
  1197 + } else {
  1198 + acb = qemu_mallocz(drv->aiocb_size);
  1199 + if (!acb)
  1200 + return NULL;
  1201 + }
  1202 + acb->bs = bs;
  1203 + acb->cb = cb;
  1204 + acb->opaque = opaque;
  1205 + return acb;
  1206 +}
  1207 +
  1208 +void qemu_aio_release(void *p)
  1209 +{
  1210 + BlockDriverAIOCB *acb = p;
  1211 + BlockDriver *drv = acb->bs->drv;
  1212 + acb->next = drv->free_aiocb;
  1213 + drv->free_aiocb = acb;
  1214 +}
block_int.h
@@ -42,13 +42,14 @@ struct BlockDriver { @@ -42,13 +42,14 @@ struct BlockDriver {
42 int (*bdrv_set_key)(BlockDriverState *bs, const char *key); 42 int (*bdrv_set_key)(BlockDriverState *bs, const char *key);
43 int (*bdrv_make_empty)(BlockDriverState *bs); 43 int (*bdrv_make_empty)(BlockDriverState *bs);
44 /* aio */ 44 /* aio */
45 - int (*bdrv_aio_new)(BlockDriverAIOCB *acb);  
46 - int (*bdrv_aio_read)(BlockDriverAIOCB *acb, int64_t sector_num,  
47 - uint8_t *buf, int nb_sectors);  
48 - int (*bdrv_aio_write)(BlockDriverAIOCB *acb, int64_t sector_num,  
49 - const uint8_t *buf, int nb_sectors); 45 + BlockDriverAIOCB *(*bdrv_aio_read)(BlockDriverState *bs,
  46 + int64_t sector_num, uint8_t *buf, int nb_sectors,
  47 + BlockDriverCompletionFunc *cb, void *opaque);
  48 + BlockDriverAIOCB *(*bdrv_aio_write)(BlockDriverState *bs,
  49 + int64_t sector_num, const uint8_t *buf, int nb_sectors,
  50 + BlockDriverCompletionFunc *cb, void *opaque);
50 void (*bdrv_aio_cancel)(BlockDriverAIOCB *acb); 51 void (*bdrv_aio_cancel)(BlockDriverAIOCB *acb);
51 - void (*bdrv_aio_delete)(BlockDriverAIOCB *acb); 52 + int aiocb_size;
52 53
53 const char *protocol_name; 54 const char *protocol_name;
54 int (*bdrv_pread)(BlockDriverState *bs, int64_t offset, 55 int (*bdrv_pread)(BlockDriverState *bs, int64_t offset,
@@ -69,6 +70,7 @@ struct BlockDriver { @@ -69,6 +70,7 @@ struct BlockDriver {
69 QEMUSnapshotInfo **psn_info); 70 QEMUSnapshotInfo **psn_info);
70 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi); 71 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi);
71 72
  73 + BlockDriverAIOCB *free_aiocb;
72 struct BlockDriver *next; 74 struct BlockDriver *next;
73 }; 75 };
74 76
@@ -96,9 +98,9 @@ struct BlockDriverState { @@ -96,9 +98,9 @@ struct BlockDriverState {
96 int is_temporary; 98 int is_temporary;
97 99
98 BlockDriverState *backing_hd; 100 BlockDriverState *backing_hd;
99 - /* sync read/write emulation */ 101 + /* async read/write emulation */
100 102
101 - BlockDriverAIOCB *sync_aiocb; 103 + void *sync_aiocb;
102 104
103 /* NOTE: the following infos are only hints for real hardware 105 /* NOTE: the following infos are only hints for real hardware
104 drivers. They are not used by the block driver */ 106 drivers. They are not used by the block driver */
@@ -111,11 +113,14 @@ struct BlockDriverState { @@ -111,11 +113,14 @@ struct BlockDriverState {
111 struct BlockDriverAIOCB { 113 struct BlockDriverAIOCB {
112 BlockDriverState *bs; 114 BlockDriverState *bs;
113 BlockDriverCompletionFunc *cb; 115 BlockDriverCompletionFunc *cb;
114 - void *cb_opaque;  
115 -  
116 - void *opaque; /* driver opaque */ 116 + void *opaque;
  117 + BlockDriverAIOCB *next;
117 }; 118 };
118 119
119 void get_tmp_filename(char *filename, int size); 120 void get_tmp_filename(char *filename, int size);
120 121
  122 +void *qemu_aio_get(BlockDriverState *bs, BlockDriverCompletionFunc *cb,
  123 + void *opaque);
  124 +void qemu_aio_release(void *p);
  125 +
121 #endif /* BLOCK_INT_H */ 126 #endif /* BLOCK_INT_H */
@@ -569,15 +569,13 @@ void bdrv_set_boot_sector(BlockDriverState *bs, const uint8_t *data, int size); @@ -569,15 +569,13 @@ void bdrv_set_boot_sector(BlockDriverState *bs, const uint8_t *data, int size);
569 typedef struct BlockDriverAIOCB BlockDriverAIOCB; 569 typedef struct BlockDriverAIOCB BlockDriverAIOCB;
570 typedef void BlockDriverCompletionFunc(void *opaque, int ret); 570 typedef void BlockDriverCompletionFunc(void *opaque, int ret);
571 571
572 -BlockDriverAIOCB *bdrv_aio_new(BlockDriverState *bs);  
573 -int bdrv_aio_read(BlockDriverAIOCB *acb, int64_t sector_num,  
574 - uint8_t *buf, int nb_sectors,  
575 - BlockDriverCompletionFunc *cb, void *opaque);  
576 -int bdrv_aio_write(BlockDriverAIOCB *acb, int64_t sector_num,  
577 - const uint8_t *buf, int nb_sectors,  
578 - BlockDriverCompletionFunc *cb, void *opaque); 572 +BlockDriverAIOCB *bdrv_aio_read(BlockDriverState *bs, int64_t sector_num,
  573 + uint8_t *buf, int nb_sectors,
  574 + BlockDriverCompletionFunc *cb, void *opaque);
  575 +BlockDriverAIOCB *bdrv_aio_write(BlockDriverState *bs, int64_t sector_num,
  576 + const uint8_t *buf, int nb_sectors,
  577 + BlockDriverCompletionFunc *cb, void *opaque);
579 void bdrv_aio_cancel(BlockDriverAIOCB *acb); 578 void bdrv_aio_cancel(BlockDriverAIOCB *acb);
580 -void bdrv_aio_delete(BlockDriverAIOCB *acb);  
581 579
582 void qemu_aio_init(void); 580 void qemu_aio_init(void);
583 void qemu_aio_poll(void); 581 void qemu_aio_poll(void);