Commit 1fb8648d4a4e67df16fe0392590cb5ede3296387
1 parent
59a703eb
Convert IDE to use new dma helpers (Avi Kivity)
Use the new dma block helpers to perform dma disk I/O. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6525 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
1 changed file
with
10 additions
and
66 deletions
hw/ide.c
@@ -33,6 +33,7 @@ | @@ -33,6 +33,7 @@ | ||
33 | #include "ppc_mac.h" | 33 | #include "ppc_mac.h" |
34 | #include "mac_dbdma.h" | 34 | #include "mac_dbdma.h" |
35 | #include "sh.h" | 35 | #include "sh.h" |
36 | +#include "dma.h" | ||
36 | 37 | ||
37 | /* debug IDE devices */ | 38 | /* debug IDE devices */ |
38 | //#define DEBUG_IDE | 39 | //#define DEBUG_IDE |
@@ -423,7 +424,7 @@ typedef struct IDEState { | @@ -423,7 +424,7 @@ typedef struct IDEState { | ||
423 | int atapi_dma; /* true if dma is requested for the packet cmd */ | 424 | int atapi_dma; /* true if dma is requested for the packet cmd */ |
424 | /* ATA DMA state */ | 425 | /* ATA DMA state */ |
425 | int io_buffer_size; | 426 | int io_buffer_size; |
426 | - QEMUIOVector iovec; | 427 | + QEMUSGList sg; |
427 | /* PIO transfer handling */ | 428 | /* PIO transfer handling */ |
428 | int req_nb_sectors; /* number of sectors per interrupt */ | 429 | int req_nb_sectors; /* number of sectors per interrupt */ |
429 | EndTransferFunc *end_transfer_func; | 430 | EndTransferFunc *end_transfer_func; |
@@ -876,10 +877,8 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write) | @@ -876,10 +877,8 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write) | ||
876 | uint32_t size; | 877 | uint32_t size; |
877 | } prd; | 878 | } prd; |
878 | int l, len; | 879 | int l, len; |
879 | - void *mem; | ||
880 | - target_phys_addr_t l1; | ||
881 | 880 | ||
882 | - qemu_iovec_init(&s->iovec, s->nsector / (TARGET_PAGE_SIZE/512) + 1); | 881 | + qemu_sglist_init(&s->sg, s->nsector / (TARGET_PAGE_SIZE/512) + 1); |
883 | s->io_buffer_size = 0; | 882 | s->io_buffer_size = 0; |
884 | for(;;) { | 883 | for(;;) { |
885 | if (bm->cur_prd_len == 0) { | 884 | if (bm->cur_prd_len == 0) { |
@@ -900,15 +899,10 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write) | @@ -900,15 +899,10 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write) | ||
900 | } | 899 | } |
901 | l = bm->cur_prd_len; | 900 | l = bm->cur_prd_len; |
902 | if (l > 0) { | 901 | if (l > 0) { |
903 | - l1 = l; | ||
904 | - mem = cpu_physical_memory_map(bm->cur_prd_addr, &l1, is_write); | ||
905 | - if (!mem) { | ||
906 | - break; | ||
907 | - } | ||
908 | - qemu_iovec_add(&s->iovec, mem, l1); | ||
909 | - bm->cur_prd_addr += l1; | ||
910 | - bm->cur_prd_len -= l1; | ||
911 | - s->io_buffer_size += l1; | 902 | + qemu_sglist_add(&s->sg, bm->cur_prd_addr, l); |
903 | + bm->cur_prd_addr += l; | ||
904 | + bm->cur_prd_len -= l; | ||
905 | + s->io_buffer_size += l; | ||
912 | } | 906 | } |
913 | } | 907 | } |
914 | return 1; | 908 | return 1; |
@@ -916,14 +910,7 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write) | @@ -916,14 +910,7 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write) | ||
916 | 910 | ||
917 | static void dma_buf_commit(IDEState *s, int is_write) | 911 | static void dma_buf_commit(IDEState *s, int is_write) |
918 | { | 912 | { |
919 | - int i; | ||
920 | - | ||
921 | - for (i = 0; i < s->iovec.niov; ++i) { | ||
922 | - cpu_physical_memory_unmap(s->iovec.iov[i].iov_base, | ||
923 | - s->iovec.iov[i].iov_len, is_write, | ||
924 | - s->iovec.iov[i].iov_len); | ||
925 | - } | ||
926 | - qemu_iovec_destroy(&s->iovec); | 913 | + qemu_sglist_destroy(&s->sg); |
927 | } | 914 | } |
928 | 915 | ||
929 | static void ide_dma_error(IDEState *s) | 916 | static void ide_dma_error(IDEState *s) |
@@ -1006,39 +993,6 @@ static int dma_buf_rw(BMDMAState *bm, int is_write) | @@ -1006,39 +993,6 @@ static int dma_buf_rw(BMDMAState *bm, int is_write) | ||
1006 | return 1; | 993 | return 1; |
1007 | } | 994 | } |
1008 | 995 | ||
1009 | -typedef struct { | ||
1010 | - BMDMAState *bm; | ||
1011 | - void (*cb)(void *opaque, int ret); | ||
1012 | - QEMUBH *bh; | ||
1013 | -} MapFailureContinuation; | ||
1014 | - | ||
1015 | -static void reschedule_dma(void *opaque) | ||
1016 | -{ | ||
1017 | - MapFailureContinuation *cont = opaque; | ||
1018 | - | ||
1019 | - cont->cb(cont->bm, 0); | ||
1020 | - qemu_bh_delete(cont->bh); | ||
1021 | - qemu_free(cont); | ||
1022 | -} | ||
1023 | - | ||
1024 | -static void continue_after_map_failure(void *opaque) | ||
1025 | -{ | ||
1026 | - MapFailureContinuation *cont = opaque; | ||
1027 | - | ||
1028 | - cont->bh = qemu_bh_new(reschedule_dma, opaque); | ||
1029 | - qemu_bh_schedule(cont->bh); | ||
1030 | -} | ||
1031 | - | ||
1032 | -static void wait_for_bounce_buffer(BMDMAState *bmdma, | ||
1033 | - void (*cb)(void *opaque, int ret)) | ||
1034 | -{ | ||
1035 | - MapFailureContinuation *cont = qemu_malloc(sizeof(*cont)); | ||
1036 | - | ||
1037 | - cont->bm = bmdma; | ||
1038 | - cont->cb = cb; | ||
1039 | - cpu_register_map_client(cont, continue_after_map_failure); | ||
1040 | -} | ||
1041 | - | ||
1042 | static void ide_read_dma_cb(void *opaque, int ret) | 996 | static void ide_read_dma_cb(void *opaque, int ret) |
1043 | { | 997 | { |
1044 | BMDMAState *bm = opaque; | 998 | BMDMAState *bm = opaque; |
@@ -1080,15 +1034,10 @@ static void ide_read_dma_cb(void *opaque, int ret) | @@ -1080,15 +1034,10 @@ static void ide_read_dma_cb(void *opaque, int ret) | ||
1080 | s->io_buffer_size = n * 512; | 1034 | s->io_buffer_size = n * 512; |
1081 | if (dma_buf_prepare(bm, 1) == 0) | 1035 | if (dma_buf_prepare(bm, 1) == 0) |
1082 | goto eot; | 1036 | goto eot; |
1083 | - if (!s->iovec.niov) { | ||
1084 | - wait_for_bounce_buffer(bm, ide_read_dma_cb); | ||
1085 | - return; | ||
1086 | - } | ||
1087 | #ifdef DEBUG_AIO | 1037 | #ifdef DEBUG_AIO |
1088 | printf("aio_read: sector_num=%" PRId64 " n=%d\n", sector_num, n); | 1038 | printf("aio_read: sector_num=%" PRId64 " n=%d\n", sector_num, n); |
1089 | #endif | 1039 | #endif |
1090 | - bm->aiocb = bdrv_aio_readv(s->bs, sector_num, &s->iovec, n, | ||
1091 | - ide_read_dma_cb, bm); | 1040 | + bm->aiocb = dma_bdrv_read(s->bs, &s->sg, sector_num, ide_read_dma_cb, bm); |
1092 | ide_dma_submit_check(s, ide_read_dma_cb, bm); | 1041 | ide_dma_submit_check(s, ide_read_dma_cb, bm); |
1093 | } | 1042 | } |
1094 | 1043 | ||
@@ -1209,15 +1158,10 @@ static void ide_write_dma_cb(void *opaque, int ret) | @@ -1209,15 +1158,10 @@ static void ide_write_dma_cb(void *opaque, int ret) | ||
1209 | /* launch next transfer */ | 1158 | /* launch next transfer */ |
1210 | if (dma_buf_prepare(bm, 0) == 0) | 1159 | if (dma_buf_prepare(bm, 0) == 0) |
1211 | goto eot; | 1160 | goto eot; |
1212 | - if (!s->iovec.niov) { | ||
1213 | - wait_for_bounce_buffer(bm, ide_write_dma_cb); | ||
1214 | - return; | ||
1215 | - } | ||
1216 | #ifdef DEBUG_AIO | 1161 | #ifdef DEBUG_AIO |
1217 | printf("aio_write: sector_num=%" PRId64 " n=%d\n", sector_num, n); | 1162 | printf("aio_write: sector_num=%" PRId64 " n=%d\n", sector_num, n); |
1218 | #endif | 1163 | #endif |
1219 | - bm->aiocb = bdrv_aio_writev(s->bs, sector_num, &s->iovec, n, | ||
1220 | - ide_write_dma_cb, bm); | 1164 | + bm->aiocb = dma_bdrv_write(s->bs, &s->sg, sector_num, ide_write_dma_cb, bm); |
1221 | ide_dma_submit_check(s, ide_write_dma_cb, bm); | 1165 | ide_dma_submit_check(s, ide_write_dma_cb, bm); |
1222 | } | 1166 | } |
1223 | 1167 |