Commit 37b7842c2fb405c270efdce714425c17af3c78cd

Authored by aliguori
1 parent 3fb94d56

Move block dma helpers aiocb to store dma state (Avi Kivity)

Use the dedicated dma aiocb to store intermediate state for dma block
transactions.

Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6874 c046a42c-6fe2-441c-8c8c-71466251a162
Showing 1 changed file with 20 additions and 17 deletions
dma-helpers.c
@@ -39,6 +39,7 @@ void qemu_sglist_destroy(QEMUSGList *qsg) @@ -39,6 +39,7 @@ void qemu_sglist_destroy(QEMUSGList *qsg)
39 } 39 }
40 40
41 typedef struct { 41 typedef struct {
  42 + BlockDriverAIOCB common;
42 BlockDriverState *bs; 43 BlockDriverState *bs;
43 BlockDriverAIOCB *acb; 44 BlockDriverAIOCB *acb;
44 QEMUSGList *sg; 45 QEMUSGList *sg;
@@ -48,13 +49,13 @@ typedef struct { @@ -48,13 +49,13 @@ typedef struct {
48 target_phys_addr_t sg_cur_byte; 49 target_phys_addr_t sg_cur_byte;
49 QEMUIOVector iov; 50 QEMUIOVector iov;
50 QEMUBH *bh; 51 QEMUBH *bh;
51 -} DMABlockState; 52 +} DMAAIOCB;
52 53
53 static void dma_bdrv_cb(void *opaque, int ret); 54 static void dma_bdrv_cb(void *opaque, int ret);
54 55
55 static void reschedule_dma(void *opaque) 56 static void reschedule_dma(void *opaque)
56 { 57 {
57 - DMABlockState *dbs = (DMABlockState *)opaque; 58 + DMAAIOCB *dbs = (DMAAIOCB *)opaque;
58 59
59 qemu_bh_delete(dbs->bh); 60 qemu_bh_delete(dbs->bh);
60 dbs->bh = NULL; 61 dbs->bh = NULL;
@@ -63,7 +64,7 @@ static void reschedule_dma(void *opaque) @@ -63,7 +64,7 @@ static void reschedule_dma(void *opaque)
63 64
64 static void continue_after_map_failure(void *opaque) 65 static void continue_after_map_failure(void *opaque)
65 { 66 {
66 - DMABlockState *dbs = (DMABlockState *)opaque; 67 + DMAAIOCB *dbs = (DMAAIOCB *)opaque;
67 68
68 dbs->bh = qemu_bh_new(reschedule_dma, dbs); 69 dbs->bh = qemu_bh_new(reschedule_dma, dbs);
69 qemu_bh_schedule(dbs->bh); 70 qemu_bh_schedule(dbs->bh);
@@ -71,11 +72,12 @@ static void continue_after_map_failure(void *opaque) @@ -71,11 +72,12 @@ static void continue_after_map_failure(void *opaque)
71 72
72 static void dma_bdrv_cb(void *opaque, int ret) 73 static void dma_bdrv_cb(void *opaque, int ret)
73 { 74 {
74 - DMABlockState *dbs = (DMABlockState *)opaque; 75 + DMAAIOCB *dbs = (DMAAIOCB *)opaque;
75 target_phys_addr_t cur_addr, cur_len; 76 target_phys_addr_t cur_addr, cur_len;
76 void *mem; 77 void *mem;
77 int i; 78 int i;
78 79
  80 + dbs->acb = NULL;
79 dbs->sector_num += dbs->iov.size / 512; 81 dbs->sector_num += dbs->iov.size / 512;
80 for (i = 0; i < dbs->iov.niov; ++i) { 82 for (i = 0; i < dbs->iov.niov; ++i) {
81 cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base, 83 cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
@@ -85,10 +87,9 @@ static void dma_bdrv_cb(void *opaque, int ret) @@ -85,10 +87,9 @@ static void dma_bdrv_cb(void *opaque, int ret)
85 qemu_iovec_reset(&dbs->iov); 87 qemu_iovec_reset(&dbs->iov);
86 88
87 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { 89 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
88 - dbs->acb->cb(dbs->acb->opaque, ret); 90 + dbs->common.cb(dbs->common.opaque, ret);
89 qemu_iovec_destroy(&dbs->iov); 91 qemu_iovec_destroy(&dbs->iov);
90 - qemu_aio_release(dbs->acb);  
91 - qemu_free(dbs); 92 + qemu_aio_release(dbs);
92 return; 93 return;
93 } 94 }
94 95
@@ -112,11 +113,11 @@ static void dma_bdrv_cb(void *opaque, int ret) @@ -112,11 +113,11 @@ static void dma_bdrv_cb(void *opaque, int ret)
112 } 113 }
113 114
114 if (dbs->is_write) { 115 if (dbs->is_write) {
115 - bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,  
116 - dbs->iov.size / 512, dma_bdrv_cb, dbs); 116 + dbs->acb = bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
  117 + dbs->iov.size / 512, dma_bdrv_cb, dbs);
117 } else { 118 } else {
118 - bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,  
119 - dbs->iov.size / 512, dma_bdrv_cb, dbs); 119 + dbs->acb = bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
  120 + dbs->iov.size / 512, dma_bdrv_cb, dbs);
120 } 121 }
121 } 122 }
122 123
@@ -125,10 +126,10 @@ static BlockDriverAIOCB *dma_bdrv_io( @@ -125,10 +126,10 @@ static BlockDriverAIOCB *dma_bdrv_io(
125 BlockDriverCompletionFunc *cb, void *opaque, 126 BlockDriverCompletionFunc *cb, void *opaque,
126 int is_write) 127 int is_write)
127 { 128 {
128 - DMABlockState *dbs = qemu_malloc(sizeof(*dbs)); 129 + DMAAIOCB *dbs = qemu_aio_get_pool(&dma_aio_pool, bs, cb, opaque);
129 130
  131 + dbs->acb = NULL;
130 dbs->bs = bs; 132 dbs->bs = bs;
131 - dbs->acb = qemu_aio_get_pool(&dma_aio_pool, bs, cb, opaque);  
132 dbs->sg = sg; 133 dbs->sg = sg;
133 dbs->sector_num = sector_num; 134 dbs->sector_num = sector_num;
134 dbs->sg_cur_index = 0; 135 dbs->sg_cur_index = 0;
@@ -137,7 +138,7 @@ static BlockDriverAIOCB *dma_bdrv_io( @@ -137,7 +138,7 @@ static BlockDriverAIOCB *dma_bdrv_io(
137 dbs->bh = NULL; 138 dbs->bh = NULL;
138 qemu_iovec_init(&dbs->iov, sg->nsg); 139 qemu_iovec_init(&dbs->iov, sg->nsg);
139 dma_bdrv_cb(dbs, 0); 140 dma_bdrv_cb(dbs, 0);
140 - return dbs->acb; 141 + return &dbs->common;
141 } 142 }
142 143
143 144
@@ -157,12 +158,14 @@ BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, @@ -157,12 +158,14 @@ BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
157 158
158 static void dma_aio_cancel(BlockDriverAIOCB *acb) 159 static void dma_aio_cancel(BlockDriverAIOCB *acb)
159 { 160 {
160 - DMABlockState *dbs = (DMABlockState *)acb->opaque; 161 + DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
161 162
162 - bdrv_aio_cancel(dbs->acb); 163 + if (dbs->acb) {
  164 + bdrv_aio_cancel(dbs->acb);
  165 + }
163 } 166 }
164 167
165 void dma_helper_init(void) 168 void dma_helper_init(void)
166 { 169 {
167 - aio_pool_init(&dma_aio_pool, sizeof(BlockDriverAIOCB), dma_aio_cancel); 170 + aio_pool_init(&dma_aio_pool, sizeof(DMAAIOCB), dma_aio_cancel);
168 } 171 }