1
2
/*
* QEMU DMA emulation
3
4
5
*
* Copyright ( c ) 2003 - 2004 Vassili Karpov ( malc )
*
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
* Permission is hereby granted , free of charge , to any person obtaining a copy
* of this software and associated documentation files ( the "Software" ), to deal
* in the Software without restriction , including without limitation the rights
* to use , copy , modify , merge , publish , distribute , sublicense , and / or sell
* copies of the Software , and to permit persons to whom the Software is
* furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED "AS IS" , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM ,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE .
*/
24
25
# include "hw.h"
# include "isa.h"
26
27
/* #define DEBUG_DMA */
28
29
# define dolog (...) fprintf ( stderr , "dma: " __VA_ARGS__ )
30
31
32
33
34
35
36
37
38
39
40
41
42
# ifdef DEBUG_DMA
# define linfo (...) fprintf ( stderr , "dma: " __VA_ARGS__ )
# define ldebug (...) fprintf ( stderr , "dma: " __VA_ARGS__ )
# else
# define linfo (...)
# define ldebug (...)
# endif
struct dma_regs {
int now [ 2 ];
uint16_t base [ 2 ];
uint8_t mode ;
uint8_t page ;
43
uint8_t pageh ;
44
45
uint8_t dack ;
uint8_t eop ;
46
47
DMA_transfer_handler transfer_handler ;
void * opaque ;
48
49
50
51
52
53
54
55
56
57
};
# define ADDR 0
# define COUNT 1
static struct dma_cont {
uint8_t status ;
uint8_t command ;
uint8_t mask ;
uint8_t flip_flop ;
58
int dshift ;
59
60
61
62
struct dma_regs regs [ 4 ];
} dma_controllers [ 2 ];
enum {
63
64
65
66
67
68
69
70
71
72
73
CMD_MEMORY_TO_MEMORY = 0x01 ,
CMD_FIXED_ADDRESS = 0x02 ,
CMD_BLOCK_CONTROLLER = 0x04 ,
CMD_COMPRESSED_TIME = 0x08 ,
CMD_CYCLIC_PRIORITY = 0x10 ,
CMD_EXTENDED_WRITE = 0x20 ,
CMD_LOW_DREQ = 0x40 ,
CMD_LOW_DACK = 0x80 ,
CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
| CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
| CMD_LOW_DREQ | CMD_LOW_DACK
74
75
76
};
77
78
static void DMA_run ( void );
79
80
static int channels [ 8 ] = { - 1 , 2 , 3 , 1 , - 1 , - 1 , - 1 , 0 };
81
static void write_page ( void * opaque , uint32_t nport , uint32_t data )
82
{
83
struct dma_cont * d = opaque ;
84
85
int ichan ;
86
ichan = channels [ nport & 7 ];
87
if ( - 1 == ichan ) {
88
dolog ( "invalid channel %#x %#x \n " , nport , data );
89
90
return ;
}
91
92
93
d -> regs [ ichan ]. page = data ;
}
94
static void write_pageh ( void * opaque , uint32_t nport , uint32_t data )
95
96
97
{
struct dma_cont * d = opaque ;
int ichan ;
98
99
ichan = channels [ nport & 7 ];
100
if ( - 1 == ichan ) {
101
dolog ( "invalid channel %#x %#x \n " , nport , data );
102
103
104
105
return ;
}
d -> regs [ ichan ]. pageh = data ;
}
106
107
108
109
110
111
112
static uint32_t read_page ( void * opaque , uint32_t nport )
{
struct dma_cont * d = opaque ;
int ichan ;
ichan = channels [ nport & 7 ];
113
if ( - 1 == ichan ) {
114
dolog ( "invalid channel read %#x \n " , nport );
115
116
117
return 0 ;
}
return d -> regs [ ichan ]. page ;
118
119
}
120
121
122
123
124
125
126
static uint32_t read_pageh ( void * opaque , uint32_t nport )
{
struct dma_cont * d = opaque ;
int ichan ;
ichan = channels [ nport & 7 ];
if ( - 1 == ichan ) {
127
dolog ( "invalid channel read %#x \n " , nport );
128
129
130
131
132
return 0 ;
}
return d -> regs [ ichan ]. pageh ;
}
133
static inline void init_chan ( struct dma_cont * d , int ichan )
134
135
136
{
struct dma_regs * r ;
137
r = d -> regs + ichan ;
138
r -> now [ ADDR ] = r -> base [ ADDR ] << d -> dshift ;
139
140
141
r -> now [ COUNT ] = 0 ;
}
142
static inline int getff ( struct dma_cont * d )
143
144
145
{
int ff ;
146
147
ff = d -> flip_flop ;
d -> flip_flop = ! ff ;
148
149
150
return ff ;
}
151
static uint32_t read_chan ( void * opaque , uint32_t nport )
152
{
153
struct dma_cont * d = opaque ;
154
int ichan , nreg , iport , ff , val , dir ;
155
156
struct dma_regs * r ;
157
158
159
160
iport = ( nport >> d -> dshift ) & 0x0f ;
ichan = iport >> 1 ;
nreg = iport & 1 ;
r = d -> regs + ichan ;
161
162
dir = (( r -> mode >> 5 ) & 1 ) ? - 1 : 1 ;
163
ff = getff ( d );
164
if ( nreg )
165
val = ( r -> base [ COUNT ] << d -> dshift ) - r -> now [ COUNT ];
166
else
167
val = r -> now [ ADDR ] + r -> now [ COUNT ] * dir ;
168
169
ldebug ( "read_chan %#x -> %d \n " , iport , val );
170
return ( val >> ( d -> dshift + ( ff << 3 ))) & 0xff ;
171
172
}
173
static void write_chan ( void * opaque , uint32_t nport , uint32_t data )
174
{
175
176
struct dma_cont * d = opaque ;
int iport , ichan , nreg ;
177
178
struct dma_regs * r ;
179
180
181
182
183
iport = ( nport >> d -> dshift ) & 0x0f ;
ichan = iport >> 1 ;
nreg = iport & 1 ;
r = d -> regs + ichan ;
if ( getff ( d )) {
184
r -> base [ nreg ] = ( r -> base [ nreg ] & 0xff ) | (( data << 8 ) & 0xff00 );
185
init_chan ( d , ichan );
186
187
} else {
r -> base [ nreg ] = ( r -> base [ nreg ] & 0xff00 ) | ( data & 0xff );
188
189
190
}
}
191
static void write_cont ( void * opaque , uint32_t nport , uint32_t data )
192
{
193
struct dma_cont * d = opaque ;
194
int iport , ichan = 0 ;
195
196
iport = ( nport >> d -> dshift ) & 0x0f ;
197
switch ( iport ) {
198
case 0x08 : /* command */
199
if (( data != 0 ) && ( data & CMD_NOT_SUPPORTED )) {
200
dolog ( "command %#x not supported \n " , data );
201
return ;
202
203
204
205
}
d -> command = data ;
break ;
206
case 0x09 :
207
208
209
210
211
212
213
214
ichan = data & 3 ;
if ( data & 4 ) {
d -> status |= 1 << ( ichan + 4 );
}
else {
d -> status &= ~ ( 1 << ( ichan + 4 ));
}
d -> status &= ~ ( 1 << ichan );
215
DMA_run ();
216
217
break ;
218
case 0x0a : /* single mask */
219
220
221
222
if ( data & 4 )
d -> mask |= 1 << ( data & 3 );
else
d -> mask &= ~ ( 1 << ( data & 3 ));
223
DMA_run ();
224
225
break ;
226
case 0x0b : /* mode */
227
{
228
229
ichan = data & 3 ;
# ifdef DEBUG_DMA
230
231
{
int op , ai , dir , opmode ;
232
233
234
235
op = ( data >> 2 ) & 3 ;
ai = ( data >> 4 ) & 1 ;
dir = ( data >> 5 ) & 1 ;
opmode = ( data >> 6 ) & 3 ;
236
237
238
linfo ( "ichan %d, op %d, ai %d, dir %d, opmode %d \n " ,
ichan , op , ai , dir , opmode );
239
}
240
241
242
243
244
# endif
d -> regs [ ichan ]. mode = data ;
break ;
}
245
case 0x0c : /* clear flip flop */
246
247
248
d -> flip_flop = 0 ;
break ;
249
case 0x0d : /* reset */
250
251
252
253
254
255
d -> flip_flop = 0 ;
d -> mask = ~ 0 ;
d -> status = 0 ;
d -> command = 0 ;
break ;
256
case 0x0e : /* clear mask for all channels */
257
d -> mask = 0 ;
258
DMA_run ();
259
260
break ;
261
case 0x0f : /* write mask for all channels */
262
d -> mask = data ;
263
DMA_run ();
264
265
266
break ;
default :
267
dolog ( "unknown iport %#x \n " , iport );
268
break ;
269
270
}
271
# ifdef DEBUG_DMA
272
if ( 0xc != iport ) {
273
linfo ( "write_cont: nport %#06x, ichan % 2d, val %#06x \n " ,
274
nport , ichan , data );
275
276
277
278
}
# endif
}
279
280
281
282
static uint32_t read_cont ( void * opaque , uint32_t nport )
{
struct dma_cont * d = opaque ;
int iport , val ;
283
284
285
iport = ( nport >> d -> dshift ) & 0x0f ;
switch ( iport ) {
286
case 0x08 : /* status */
287
288
289
val = d -> status ;
d -> status &= 0xf0 ;
break ;
290
case 0x0f : /* mask */
291
292
293
294
295
296
val = d -> mask ;
break ;
default :
val = 0 ;
break ;
}
297
298
ldebug ( "read_cont: nport %#06x, iport %#04x val %#x \n " , nport , iport , val );
299
300
301
return val ;
}
302
303
304
305
306
307
308
309
310
311
312
313
314
int DMA_get_channel_mode ( int nchan )
{
return dma_controllers [ nchan > 3 ]. regs [ nchan & 3 ]. mode ;
}
void DMA_hold_DREQ ( int nchan )
{
int ncont , ichan ;
ncont = nchan > 3 ;
ichan = nchan & 3 ;
linfo ( "held cont=%d chan=%d \n " , ncont , ichan );
dma_controllers [ ncont ]. status |= 1 << ( ichan + 4 );
315
DMA_run ();
316
317
318
319
320
321
322
323
324
325
}
void DMA_release_DREQ ( int nchan )
{
int ncont , ichan ;
ncont = nchan > 3 ;
ichan = nchan & 3 ;
linfo ( "released cont=%d chan=%d \n " , ncont , ichan );
dma_controllers [ ncont ]. status &= ~ ( 1 << ( ichan + 4 ));
326
DMA_run ();
327
328
329
330
331
}
static void channel_run ( int ncont , int ichan )
{
int n ;
332
333
334
struct dma_regs * r = & dma_controllers [ ncont ]. regs [ ichan ];
# ifdef DEBUG_DMA
int dir , opmode ;
335
336
337
dir = ( r -> mode >> 5 ) & 1 ;
opmode = ( r -> mode >> 6 ) & 3 ;
338
339
340
341
342
343
344
345
if ( dir ) {
dolog ( "DMA in address decrement mode \n " );
}
if ( opmode != 1 ) {
dolog ( "DMA not in single mode select %#x \n " , opmode );
}
# endif
346
347
348
349
350
351
r = dma_controllers [ ncont ]. regs + ichan ;
n = r -> transfer_handler ( r -> opaque , ichan + ( ncont << 2 ),
r -> now [ COUNT ], ( r -> base [ COUNT ] + 1 ) << ncont );
r -> now [ COUNT ] = n ;
ldebug ( "dma_pos %d size %d \n " , n , ( r -> base [ COUNT ] + 1 ) << ncont );
352
353
}
354
355
356
static QEMUBH * dma_bh ;
static void DMA_run ( void )
357
358
359
{
struct dma_cont * d ;
int icont , ichan ;
360
int rearm = 0 ;
361
362
363
364
365
366
367
368
369
d = dma_controllers ;
for ( icont = 0 ; icont < 2 ; icont ++ , d ++ ) {
for ( ichan = 0 ; ichan < 4 ; ichan ++ ) {
int mask ;
mask = 1 << ichan ;
370
if (( 0 == ( d -> mask & mask )) && ( 0 != ( d -> status & ( mask << 4 )))) {
371
channel_run ( icont , ichan );
372
373
rearm = 1 ;
}
374
375
}
}
376
377
378
379
380
381
382
383
if ( rearm )
qemu_bh_schedule_idle ( dma_bh );
}
static void DMA_run_bh ( void * unused )
{
DMA_run ();
384
385
386
}
void DMA_register_channel ( int nchan ,
387
DMA_transfer_handler transfer_handler ,
388
void * opaque )
389
390
391
392
393
394
395
396
{
struct dma_regs * r ;
int ichan , ncont ;
ncont = nchan > 3 ;
ichan = nchan & 3 ;
r = dma_controllers [ ncont ]. regs + ichan ;
397
398
399
400
r -> transfer_handler = transfer_handler ;
r -> opaque = opaque ;
}
401
402
403
int DMA_read_memory ( int nchan , void * buf , int pos , int len )
{
struct dma_regs * r = & dma_controllers [ nchan > 3 ]. regs [ nchan & 3 ];
404
target_phys_addr_t addr = (( r -> pageh & 0x7f ) << 24 ) | ( r -> page << 16 ) | r -> now [ ADDR ];
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
if ( r -> mode & 0x20 ) {
int i ;
uint8_t * p = buf ;
cpu_physical_memory_read ( addr - pos - len , buf , len );
/* What about 16bit transfers? */
for ( i = 0 ; i < len >> 1 ; i ++ ) {
uint8_t b = p [ len - i - 1 ];
p [ i ] = b ;
}
}
else
cpu_physical_memory_read ( addr + pos , buf , len );
return len ;
}
int DMA_write_memory ( int nchan , void * buf , int pos , int len )
{
struct dma_regs * r = & dma_controllers [ nchan > 3 ]. regs [ nchan & 3 ];
426
target_phys_addr_t addr = (( r -> pageh & 0x7f ) << 24 ) | ( r -> page << 16 ) | r -> now [ ADDR ];
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
if ( r -> mode & 0x20 ) {
int i ;
uint8_t * p = buf ;
cpu_physical_memory_write ( addr - pos - len , buf , len );
/* What about 16bit transfers? */
for ( i = 0 ; i < len ; i ++ ) {
uint8_t b = p [ len - i - 1 ];
p [ i ] = b ;
}
}
else
cpu_physical_memory_write ( addr + pos , buf , len );
return len ;
}
445
446
447
/* request the emulator to transfer a new DMA memory block ASAP */
void DMA_schedule ( int nchan )
{
448
449
CPUState * env = cpu_single_env ;
if ( env )
450
cpu_exit ( env );
451
452
}
453
454
455
456
457
458
static void dma_reset ( void * opaque )
{
struct dma_cont * d = opaque ;
write_cont ( d , ( 0x0d << d -> dshift ), 0 );
}
459
460
461
462
463
464
465
static int dma_phony_handler ( void * opaque , int nchan , int dma_pos , int dma_len )
{
dolog ( "unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d \n " ,
nchan , dma_pos , dma_len );
return dma_pos ;
}
466
/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
467
static void dma_init2 ( struct dma_cont * d , int base , int dshift ,
468
int page_base , int pageh_base )
469
{
470
static const int page_port_list [] = { 0x1 , 0x2 , 0x3 , 0x7 };
471
472
int i ;
473
d -> dshift = dshift ;
474
for ( i = 0 ; i < 8 ; i ++ ) {
475
476
register_ioport_write ( base + ( i << dshift ), 1 , 1 , write_chan , d );
register_ioport_read ( base + ( i << dshift ), 1 , 1 , read_chan , d );
477
}
malc
authored
16 years ago
478
for ( i = 0 ; i < ARRAY_SIZE ( page_port_list ); i ++ ) {
479
register_ioport_write ( page_base + page_port_list [ i ], 1 , 1 ,
480
write_page , d );
481
register_ioport_read ( page_base + page_port_list [ i ], 1 , 1 ,
482
read_page , d );
483
if ( pageh_base >= 0 ) {
484
register_ioport_write ( pageh_base + page_port_list [ i ], 1 , 1 ,
485
write_pageh , d );
486
register_ioport_read ( pageh_base + page_port_list [ i ], 1 , 1 ,
487
488
read_pageh , d );
}
489
490
}
for ( i = 0 ; i < 8 ; i ++ ) {
491
register_ioport_write ( base + (( i + 8 ) << dshift ), 1 , 1 ,
492
write_cont , d );
493
register_ioport_read ( base + (( i + 8 ) << dshift ), 1 , 1 ,
494
read_cont , d );
495
}
496
497
qemu_register_reset ( dma_reset , d );
dma_reset ( d );
malc
authored
16 years ago
498
for ( i = 0 ; i < ARRAY_SIZE ( d -> regs ); ++ i ) {
499
500
d -> regs [ i ]. transfer_handler = dma_phony_handler ;
}
501
}
502
503
504
505
506
507
508
509
510
511
static void dma_save ( QEMUFile * f , void * opaque )
{
struct dma_cont * d = opaque ;
int i ;
/* qemu_put_8s (f, &d->status); */
qemu_put_8s ( f , & d -> command );
qemu_put_8s ( f , & d -> mask );
qemu_put_8s ( f , & d -> flip_flop );
ths
authored
17 years ago
512
qemu_put_be32 ( f , d -> dshift );
513
514
515
for ( i = 0 ; i < 4 ; ++ i ) {
struct dma_regs * r = & d -> regs [ i ];
ths
authored
17 years ago
516
517
qemu_put_be32 ( f , r -> now [ 0 ]);
qemu_put_be32 ( f , r -> now [ 1 ]);
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
qemu_put_be16s ( f , & r -> base [ 0 ]);
qemu_put_be16s ( f , & r -> base [ 1 ]);
qemu_put_8s ( f , & r -> mode );
qemu_put_8s ( f , & r -> page );
qemu_put_8s ( f , & r -> pageh );
qemu_put_8s ( f , & r -> dack );
qemu_put_8s ( f , & r -> eop );
}
}
static int dma_load ( QEMUFile * f , void * opaque , int version_id )
{
struct dma_cont * d = opaque ;
int i ;
if ( version_id != 1 )
return - EINVAL ;
/* qemu_get_8s (f, &d->status); */
qemu_get_8s ( f , & d -> command );
qemu_get_8s ( f , & d -> mask );
qemu_get_8s ( f , & d -> flip_flop );
ths
authored
17 years ago
540
d -> dshift = qemu_get_be32 ( f );
541
542
543
for ( i = 0 ; i < 4 ; ++ i ) {
struct dma_regs * r = & d -> regs [ i ];
ths
authored
17 years ago
544
545
r -> now [ 0 ] = qemu_get_be32 ( f );
r -> now [ 1 ] = qemu_get_be32 ( f );
546
547
548
549
550
551
552
553
qemu_get_be16s ( f , & r -> base [ 0 ]);
qemu_get_be16s ( f , & r -> base [ 1 ]);
qemu_get_8s ( f , & r -> mode );
qemu_get_8s ( f , & r -> page );
qemu_get_8s ( f , & r -> pageh );
qemu_get_8s ( f , & r -> dack );
qemu_get_8s ( f , & r -> eop );
}
554
555
556
DMA_run ();
557
558
559
return 0 ;
}
560
void DMA_init ( int high_page_enable )
561
{
562
dma_init2 ( & dma_controllers [ 0 ], 0x00 , 0 , 0x80 ,
563
564
565
high_page_enable ? 0x480 : - 1 );
dma_init2 ( & dma_controllers [ 1 ], 0xc0 , 1 , 0x88 ,
high_page_enable ? 0x488 : - 1 );
566
567
register_savevm ( "dma" , 0 , 1 , dma_save , dma_load , & dma_controllers [ 0 ]);
register_savevm ( "dma" , 1 , 1 , dma_save , dma_load , & dma_controllers [ 1 ]);
568
569
dma_bh = qemu_bh_new ( DMA_run_bh , NULL );
570
}