Commit 862c928064cf0f079d81b24db932a093e49d101d

Authored by aurel32
1 parent 3c4cf535

DB-DMA IDE asynchronous I/O

Signed-off-by: Laurent Vivier <Laurent@vivier.eu>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6681 c046a42c-6fe2-441c-8c8c-71466251a162
hw/ide.c
@@ -3429,110 +3429,143 @@ void pci_piix4_ide_init(PCIBus *bus, BlockDriverState **hd_table, int devfn, @@ -3429,110 +3429,143 @@ void pci_piix4_ide_init(PCIBus *bus, BlockDriverState **hd_table, int devfn,
3429 3429
3430 typedef struct MACIOIDEState { 3430 typedef struct MACIOIDEState {
3431 IDEState ide_if[2]; 3431 IDEState ide_if[2];
3432 - int stream_index; 3432 + BlockDriverAIOCB *aiocb;
3433 } MACIOIDEState; 3433 } MACIOIDEState;
3434 3434
3435 -static void pmac_atapi_read(DBDMA_io *io) 3435 +static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
3436 { 3436 {
  3437 + DBDMA_io *io = opaque;
3437 MACIOIDEState *m = io->opaque; 3438 MACIOIDEState *m = io->opaque;
3438 IDEState *s = m->ide_if->cur_drive; 3439 IDEState *s = m->ide_if->cur_drive;
3439 - int ret, len;  
3440 -  
3441 - while (io->len > 0 &&  
3442 - s->packet_transfer_size > 0) {  
3443 3440
3444 - len = s->cd_sector_size;  
3445 - ret = cd_read_sector(s->bs, s->lba, s->io_buffer, len);  
3446 - if (ret < 0) {  
3447 - io->dma_end(io);  
3448 - ide_transfer_stop(s);  
3449 - ide_atapi_io_error(s, ret);  
3450 - return;  
3451 - } 3441 + if (ret < 0) {
  3442 + m->aiocb = NULL;
  3443 + qemu_sglist_destroy(&s->sg);
  3444 + ide_atapi_io_error(s, ret);
  3445 + io->dma_end(opaque);
  3446 + return;
  3447 + }
3452 3448
3453 - if (len > io->len)  
3454 - len = io->len; 3449 + if (s->io_buffer_size > 0) {
  3450 + m->aiocb = NULL;
  3451 + qemu_sglist_destroy(&s->sg);
3455 3452
3456 - cpu_physical_memory_write(io->addr,  
3457 - s->io_buffer + m->stream_index, len); 3453 + s->packet_transfer_size -= s->io_buffer_size;
3458 3454
3459 - /* db-dma can ask for 512 bytes whereas block size is 2048... */ 3455 + s->io_buffer_index += s->io_buffer_size;
  3456 + s->lba += s->io_buffer_index >> 11;
  3457 + s->io_buffer_index &= 0x7ff;
  3458 + }
3460 3459
3461 - m->stream_index += len;  
3462 - s->lba += m->stream_index / s->cd_sector_size;  
3463 - m->stream_index %= s->cd_sector_size; 3460 + if (s->packet_transfer_size <= 0)
  3461 + ide_atapi_cmd_ok(s);
3464 3462
3465 - io->len -= len;  
3466 - io->addr += len;  
3467 - s->packet_transfer_size -= len; 3463 + if (io->len == 0) {
  3464 + io->dma_end(opaque);
  3465 + return;
3468 } 3466 }
3469 3467
3470 - if (io->len <= 0)  
3471 - io->dma_end(io); 3468 + /* launch next transfer */
3472 3469
3473 - if (s->packet_transfer_size <= 0) {  
3474 - s->status = READY_STAT | SEEK_STAT;  
3475 - s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO  
3476 - | ATAPI_INT_REASON_CD;  
3477 - ide_set_irq(s); 3470 + s->io_buffer_size = io->len;
  3471 +
  3472 + qemu_sglist_init(&s->sg, io->len / TARGET_PAGE_SIZE + 1);
  3473 + qemu_sglist_add(&s->sg, io->addr, io->len);
  3474 + io->addr += io->len;
  3475 + io->len = 0;
  3476 +
  3477 + m->aiocb = dma_bdrv_read(s->bs, &s->sg,
  3478 + (int64_t)(s->lba << 2) + (s->io_buffer_index >> 9),
  3479 + pmac_ide_atapi_transfer_cb, io);
  3480 + if (!m->aiocb) {
  3481 + qemu_sglist_destroy(&s->sg);
  3482 + /* Note: media not present is the most likely case */
  3483 + ide_atapi_cmd_error(s, SENSE_NOT_READY,
  3484 + ASC_MEDIUM_NOT_PRESENT);
  3485 + io->dma_end(opaque);
  3486 + return;
3478 } 3487 }
3479 } 3488 }
3480 3489
3481 -static void pmac_ide_transfer(DBDMA_io *io) 3490 +static void pmac_ide_transfer_cb(void *opaque, int ret)
3482 { 3491 {
  3492 + DBDMA_io *io = opaque;
3483 MACIOIDEState *m = io->opaque; 3493 MACIOIDEState *m = io->opaque;
3484 IDEState *s = m->ide_if->cur_drive; 3494 IDEState *s = m->ide_if->cur_drive;
  3495 + int n;
3485 int64_t sector_num; 3496 int64_t sector_num;
3486 - int ret, n;  
3487 - int len;  
3488 3497
3489 - if (s->is_cdrom) {  
3490 - pmac_atapi_read(io); 3498 + if (ret < 0) {
  3499 + m->aiocb = NULL;
  3500 + qemu_sglist_destroy(&s->sg);
  3501 + ide_dma_error(s);
  3502 + io->dma_end(io);
3491 return; 3503 return;
3492 } 3504 }
3493 3505
3494 - while (io->len > 0 && s->nsector > 0) { 3506 + sector_num = ide_get_sector(s);
  3507 + if (s->io_buffer_size > 0) {
  3508 + m->aiocb = NULL;
  3509 + qemu_sglist_destroy(&s->sg);
  3510 + n = (s->io_buffer_size + 0x1ff) >> 9;
  3511 + sector_num += n;
  3512 + ide_set_sector(s, sector_num);
  3513 + s->nsector -= n;
  3514 + }
  3515 +
  3516 + /* end of transfer ? */
  3517 + if (s->nsector == 0) {
  3518 + s->status = READY_STAT | SEEK_STAT;
  3519 + ide_set_irq(s);
  3520 + }
3495 3521
3496 - sector_num = ide_get_sector(s); 3522 + /* end of DMA ? */
3497 3523
3498 - n = s->nsector;  
3499 - if (n > IDE_DMA_BUF_SECTORS)  
3500 - n = IDE_DMA_BUF_SECTORS; 3524 + if (io->len == 0) {
  3525 + io->dma_end(io);
  3526 + return;
  3527 + }
3501 3528
3502 - len = n << 9;  
3503 - if (len > io->len)  
3504 - len = io->len;  
3505 - n = (len + 511) >> 9; 3529 + /* launch next transfer */
3506 3530
3507 - if (s->is_read) {  
3508 - ret = bdrv_read(s->bs, sector_num, s->io_buffer, n);  
3509 - cpu_physical_memory_write(io->addr, s->io_buffer, len);  
3510 - } else {  
3511 - cpu_physical_memory_read(io->addr, s->io_buffer, len);  
3512 - ret = bdrv_write(s->bs, sector_num, s->io_buffer, n);  
3513 - } 3531 + s->io_buffer_index = 0;
  3532 + s->io_buffer_size = io->len;
3514 3533
3515 - if (ret != 0) {  
3516 - io->dma_end(io);  
3517 - ide_rw_error(s);  
3518 - return;  
3519 - } 3534 + qemu_sglist_init(&s->sg, io->len / TARGET_PAGE_SIZE + 1);
  3535 + qemu_sglist_add(&s->sg, io->addr, io->len);
  3536 + io->addr += io->len;
  3537 + io->len = 0;
3520 3538
3521 - io->len -= len;  
3522 - io->addr += len;  
3523 - ide_set_sector(s, sector_num + n);  
3524 - s->nsector -= n;  
3525 - } 3539 + if (s->is_read)
  3540 + m->aiocb = dma_bdrv_read(s->bs, &s->sg, sector_num,
  3541 + pmac_ide_transfer_cb, io);
  3542 + else
  3543 + m->aiocb = dma_bdrv_write(s->bs, &s->sg, sector_num,
  3544 + pmac_ide_transfer_cb, io);
  3545 + if (!m->aiocb)
  3546 + pmac_ide_transfer_cb(io, -1);
  3547 +}
3526 3548
3527 - if (io->len <= 0)  
3528 - io->dma_end(io); 3549 +static void pmac_ide_transfer(DBDMA_io *io)
  3550 +{
  3551 + MACIOIDEState *m = io->opaque;
  3552 + IDEState *s = m->ide_if->cur_drive;
3529 3553
3530 - if (s->nsector <= 0) {  
3531 - s->status = READY_STAT | SEEK_STAT;  
3532 - ide_set_irq(s); 3554 + s->io_buffer_size = 0;
  3555 + if (s->is_cdrom) {
  3556 + pmac_ide_atapi_transfer_cb(io, 0);
  3557 + return;
3533 } 3558 }
3534 3559
3535 - return; 3560 + pmac_ide_transfer_cb(io, 0);
  3561 +}
  3562 +
  3563 +static void pmac_ide_flush(DBDMA_io *io)
  3564 +{
  3565 + MACIOIDEState *m = io->opaque;
  3566 +
  3567 + if (m->aiocb)
  3568 + qemu_aio_flush();
3536 } 3569 }
3537 3570
3538 /* PowerMac IDE memory IO */ 3571 /* PowerMac IDE memory IO */
@@ -3712,7 +3745,7 @@ int pmac_ide_init (BlockDriverState **hd_table, qemu_irq irq, @@ -3712,7 +3745,7 @@ int pmac_ide_init (BlockDriverState **hd_table, qemu_irq irq,
3712 ide_init2(d->ide_if, hd_table[0], hd_table[1], irq); 3745 ide_init2(d->ide_if, hd_table[0], hd_table[1], irq);
3713 3746
3714 if (dbdma) 3747 if (dbdma)
3715 - DBDMA_register_channel(dbdma, channel, dma_irq, pmac_ide_transfer, d); 3748 + DBDMA_register_channel(dbdma, channel, dma_irq, pmac_ide_transfer, pmac_ide_flush, d);
3716 3749
3717 pmac_ide_memory = cpu_register_io_memory(0, pmac_ide_read, 3750 pmac_ide_memory = cpu_register_io_memory(0, pmac_ide_read,
3718 pmac_ide_write, d); 3751 pmac_ide_write, d);
hw/mac_dbdma.c
@@ -160,6 +160,7 @@ typedef struct DBDMA_channel { @@ -160,6 +160,7 @@ typedef struct DBDMA_channel {
160 qemu_irq irq; 160 qemu_irq irq;
161 DBDMA_io io; 161 DBDMA_io io;
162 DBDMA_rw rw; 162 DBDMA_rw rw;
  163 + DBDMA_flush flush;
163 dbdma_cmd current; 164 dbdma_cmd current;
164 int processing; 165 int processing;
165 } DBDMA_channel; 166 } DBDMA_channel;
@@ -367,7 +368,8 @@ static void dbdma_end(DBDMA_io *io) @@ -367,7 +368,8 @@ static void dbdma_end(DBDMA_io *io)
367 current->xfer_status = cpu_to_le16(be32_to_cpu(ch->regs[DBDMA_STATUS])); 368 current->xfer_status = cpu_to_le16(be32_to_cpu(ch->regs[DBDMA_STATUS]));
368 current->res_count = cpu_to_le16(be32_to_cpu(io->len)); 369 current->res_count = cpu_to_le16(be32_to_cpu(io->len));
369 dbdma_cmdptr_save(ch); 370 dbdma_cmdptr_save(ch);
370 - ch->regs[DBDMA_STATUS] &= cpu_to_be32(~FLUSH); 371 + if (io->is_last)
  372 + ch->regs[DBDMA_STATUS] &= cpu_to_be32(~FLUSH);
371 373
372 conditional_interrupt(ch); 374 conditional_interrupt(ch);
373 conditional_branch(ch); 375 conditional_branch(ch);
@@ -632,7 +634,7 @@ static void DBDMA_run_bh(void *opaque) @@ -632,7 +634,7 @@ static void DBDMA_run_bh(void *opaque)
632 } 634 }
633 635
634 void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq, 636 void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq,
635 - DBDMA_rw rw, 637 + DBDMA_rw rw, DBDMA_flush flush,
636 void *opaque) 638 void *opaque)
637 { 639 {
638 DBDMA_channel *ch = ( DBDMA_channel *)dbdma + nchan; 640 DBDMA_channel *ch = ( DBDMA_channel *)dbdma + nchan;
@@ -642,6 +644,7 @@ void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq, @@ -642,6 +644,7 @@ void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq,
642 ch->irq = irq; 644 ch->irq = irq;
643 ch->channel = nchan; 645 ch->channel = nchan;
644 ch->rw = rw; 646 ch->rw = rw;
  647 + ch->flush = flush;
645 ch->io.opaque = opaque; 648 ch->io.opaque = opaque;
646 ch->io.channel = ch; 649 ch->io.channel = ch;
647 } 650 }
@@ -687,6 +690,8 @@ dbdma_control_write(DBDMA_channel *ch) @@ -687,6 +690,8 @@ dbdma_control_write(DBDMA_channel *ch)
687 690
688 if (status & ACTIVE) 691 if (status & ACTIVE)
689 qemu_bh_schedule(dbdma_bh); 692 qemu_bh_schedule(dbdma_bh);
  693 + if (status & FLUSH)
  694 + ch->flush(&ch->io);
690 } 695 }
691 696
692 static void dbdma_writel (void *opaque, 697 static void dbdma_writel (void *opaque,
hw/mac_dbdma.h
@@ -22,6 +22,7 @@ @@ -22,6 +22,7 @@
22 22
23 typedef struct DBDMA_io DBDMA_io; 23 typedef struct DBDMA_io DBDMA_io;
24 24
  25 +typedef void (*DBDMA_flush)(DBDMA_io *io);
25 typedef void (*DBDMA_rw)(DBDMA_io *io); 26 typedef void (*DBDMA_rw)(DBDMA_io *io);
26 typedef void (*DBDMA_end)(DBDMA_io *io); 27 typedef void (*DBDMA_end)(DBDMA_io *io);
27 struct DBDMA_io { 28 struct DBDMA_io {
@@ -36,7 +37,7 @@ struct DBDMA_io { @@ -36,7 +37,7 @@ struct DBDMA_io {
36 37
37 38
38 void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq, 39 void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq,
39 - DBDMA_rw rw, 40 + DBDMA_rw rw, DBDMA_flush flush,
40 void *opaque); 41 void *opaque);
41 void DBDMA_schedule(void); 42 void DBDMA_schedule(void);
42 void* DBDMA_init (int *dbdma_mem_index); 43 void* DBDMA_init (int *dbdma_mem_index);