Commit 59a703ebaa9cb5aeda986dc8f627b8d23e3297cd

Authored by aliguori
1 parent be959463

Introduce block dma helpers (Avi Kivity)

These helpers perform read/write requests on entire scatter/gather lists,
relieving the device emulation code from mapping and unmapping physical
memory, and from looping when map resources are exhausted.

Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6524 c046a42c-6fe2-441c-8c8c-71466251a162
Showing 2 changed files with 126 additions and 1 deletions
dma-helpers.c
... ... @@ -8,7 +8,7 @@
8 8 */
9 9  
10 10 #include "dma.h"
11   -
  11 +#include "block_int.h"
12 12  
13 13 void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
14 14 {
... ... @@ -36,3 +36,120 @@ void qemu_sglist_destroy(QEMUSGList *qsg)
36 36 qemu_free(qsg->sg);
37 37 }
38 38  
  39 +typedef struct {
  40 + BlockDriverState *bs;
  41 + BlockDriverAIOCB *acb;
  42 + QEMUSGList *sg;
  43 + uint64_t sector_num;
  44 + int is_write;
  45 + int sg_cur_index;
  46 + target_phys_addr_t sg_cur_byte;
  47 + QEMUIOVector iov;
  48 + QEMUBH *bh;
  49 +} DMABlockState;
  50 +
  51 +static void dma_bdrv_cb(void *opaque, int ret);
  52 +
  53 +static void reschedule_dma(void *opaque)
  54 +{
  55 + DMABlockState *dbs = (DMABlockState *)opaque;
  56 +
  57 + qemu_bh_delete(dbs->bh);
  58 + dbs->bh = NULL;
  59 + dma_bdrv_cb(opaque, 0);
  60 +}
  61 +
  62 +static void continue_after_map_failure(void *opaque)
  63 +{
  64 + DMABlockState *dbs = (DMABlockState *)opaque;
  65 +
  66 + dbs->bh = qemu_bh_new(reschedule_dma, dbs);
  67 + qemu_bh_schedule(dbs->bh);
  68 +}
  69 +
  70 +static void dma_bdrv_cb(void *opaque, int ret)
  71 +{
  72 + DMABlockState *dbs = (DMABlockState *)opaque;
  73 + target_phys_addr_t cur_addr, cur_len;
  74 + void *mem;
  75 + int i;
  76 +
  77 + dbs->sector_num += dbs->iov.size / 512;
  78 + for (i = 0; i < dbs->iov.niov; ++i) {
  79 + cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
  80 + dbs->iov.iov[i].iov_len, !dbs->is_write,
  81 + dbs->iov.iov[i].iov_len);
  82 + }
  83 + qemu_iovec_reset(&dbs->iov);
  84 +
  85 + if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
  86 + dbs->acb->cb(dbs->acb->opaque, ret);
  87 + qemu_iovec_destroy(&dbs->iov);
  88 + qemu_aio_release(dbs->acb);
  89 + qemu_free(dbs);
  90 + return;
  91 + }
  92 +
  93 + while (dbs->sg_cur_index < dbs->sg->nsg) {
  94 + cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
  95 + cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
  96 + mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
  97 + if (!mem)
  98 + break;
  99 + qemu_iovec_add(&dbs->iov, mem, cur_len);
  100 + dbs->sg_cur_byte += cur_len;
  101 + if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
  102 + dbs->sg_cur_byte = 0;
  103 + ++dbs->sg_cur_index;
  104 + }
  105 + }
  106 +
  107 + if (dbs->iov.size == 0) {
  108 + cpu_register_map_client(dbs, continue_after_map_failure);
  109 + return;
  110 + }
  111 +
  112 + if (dbs->is_write) {
  113 + bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
  114 + dbs->iov.size / 512, dma_bdrv_cb, dbs);
  115 + } else {
  116 + bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
  117 + dbs->iov.size / 512, dma_bdrv_cb, dbs);
  118 + }
  119 +}
  120 +
  121 +static BlockDriverAIOCB *dma_bdrv_io(
  122 + BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
  123 + BlockDriverCompletionFunc *cb, void *opaque,
  124 + int is_write)
  125 +{
  126 + DMABlockState *dbs = qemu_malloc(sizeof(*dbs));
  127 +
  128 + dbs->bs = bs;
  129 + dbs->acb = qemu_aio_get(bs, cb, opaque);
  130 + dbs->sg = sg;
  131 + dbs->sector_num = sector_num;
  132 + dbs->sg_cur_index = 0;
  133 + dbs->sg_cur_byte = 0;
  134 + dbs->is_write = is_write;
  135 + dbs->bh = NULL;
  136 + qemu_iovec_init(&dbs->iov, sg->nsg);
  137 + dma_bdrv_cb(dbs, 0);
  138 + return dbs->acb;
  139 +}
  140 +
  141 +
  142 +BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
  143 + QEMUSGList *sg, uint64_t sector,
  144 + void (*cb)(void *opaque, int ret), void *opaque)
  145 +{
  146 + return dma_bdrv_io(bs, sg, sector, cb, opaque, 0);
  147 +}
  148 +
  149 +BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
  150 + QEMUSGList *sg, uint64_t sector,
  151 + void (*cb)(void *opaque, int ret), void *opaque)
  152 +{
  153 + return dma_bdrv_io(bs, sg, sector, cb, opaque, 1);
  154 +}
  155 +
... ...
... ... @@ -12,6 +12,7 @@
12 12  
13 13 #include <stdio.h>
14 14 #include "cpu.h"
  15 +#include "block.h"
15 16  
16 17 typedef struct {
17 18 target_phys_addr_t base;
... ... @@ -30,4 +31,11 @@ void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
30 31 target_phys_addr_t len);
31 32 void qemu_sglist_destroy(QEMUSGList *qsg);
32 33  
  34 +BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
  35 + QEMUSGList *sg, uint64_t sector,
  36 + BlockDriverCompletionFunc *cb, void *opaque);
  37 +BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
  38 + QEMUSGList *sg, uint64_t sector,
  39 + BlockDriverCompletionFunc *cb, void *opaque);
  40 +
33 41 #endif
... ...