1
2
/*
* QEMU DMA emulation
3
4
5
*
* Copyright ( c ) 2003 - 2004 Vassili Karpov ( malc )
*
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
* Permission is hereby granted , free of charge , to any person obtaining a copy
* of this software and associated documentation files ( the "Software" ), to deal
* in the Software without restriction , including without limitation the rights
* to use , copy , modify , merge , publish , distribute , sublicense , and / or sell
* copies of the Software , and to permit persons to whom the Software is
* furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED "AS IS" , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM ,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE .
*/
24
25
# include "hw.h"
# include "isa.h"
26
27
/* #define DEBUG_DMA */
28
29
# define dolog (...) fprintf ( stderr , "dma: " __VA_ARGS__ )
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# ifdef DEBUG_DMA
# define lwarn (...) fprintf ( stderr , "dma: " __VA_ARGS__ )
# define linfo (...) fprintf ( stderr , "dma: " __VA_ARGS__ )
# define ldebug (...) fprintf ( stderr , "dma: " __VA_ARGS__ )
# else
# define lwarn (...)
# define linfo (...)
# define ldebug (...)
# endif
struct dma_regs {
int now [ 2 ];
uint16_t base [ 2 ];
uint8_t mode ;
uint8_t page ;
45
uint8_t pageh ;
46
47
uint8_t dack ;
uint8_t eop ;
48
49
DMA_transfer_handler transfer_handler ;
void * opaque ;
50
51
52
53
54
55
56
57
58
59
};
# define ADDR 0
# define COUNT 1
static struct dma_cont {
uint8_t status ;
uint8_t command ;
uint8_t mask ;
uint8_t flip_flop ;
60
int dshift ;
61
62
63
64
struct dma_regs regs [ 4 ];
} dma_controllers [ 2 ];
enum {
65
66
67
68
69
70
71
72
73
74
75
CMD_MEMORY_TO_MEMORY = 0x01 ,
CMD_FIXED_ADDRESS = 0x02 ,
CMD_BLOCK_CONTROLLER = 0x04 ,
CMD_COMPRESSED_TIME = 0x08 ,
CMD_CYCLIC_PRIORITY = 0x10 ,
CMD_EXTENDED_WRITE = 0x20 ,
CMD_LOW_DREQ = 0x40 ,
CMD_LOW_DACK = 0x80 ,
CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
| CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
| CMD_LOW_DREQ | CMD_LOW_DACK
76
77
78
};
79
80
static void DMA_run ( void );
81
82
static int channels [ 8 ] = { - 1 , 2 , 3 , 1 , - 1 , - 1 , - 1 , 0 };
83
static void write_page ( void * opaque , uint32_t nport , uint32_t data )
84
{
85
struct dma_cont * d = opaque ;
86
87
int ichan ;
88
ichan = channels [ nport & 7 ];
89
if ( - 1 == ichan ) {
90
dolog ( "invalid channel %#x %#x \n " , nport , data );
91
92
return ;
}
93
94
95
d -> regs [ ichan ]. page = data ;
}
96
static void write_pageh ( void * opaque , uint32_t nport , uint32_t data )
97
98
99
{
struct dma_cont * d = opaque ;
int ichan ;
100
101
ichan = channels [ nport & 7 ];
102
if ( - 1 == ichan ) {
103
dolog ( "invalid channel %#x %#x \n " , nport , data );
104
105
106
107
return ;
}
d -> regs [ ichan ]. pageh = data ;
}
108
109
110
111
112
113
114
static uint32_t read_page ( void * opaque , uint32_t nport )
{
struct dma_cont * d = opaque ;
int ichan ;
ichan = channels [ nport & 7 ];
115
if ( - 1 == ichan ) {
116
dolog ( "invalid channel read %#x \n " , nport );
117
118
119
return 0 ;
}
return d -> regs [ ichan ]. page ;
120
121
}
122
123
124
125
126
127
128
static uint32_t read_pageh ( void * opaque , uint32_t nport )
{
struct dma_cont * d = opaque ;
int ichan ;
ichan = channels [ nport & 7 ];
if ( - 1 == ichan ) {
129
dolog ( "invalid channel read %#x \n " , nport );
130
131
132
133
134
return 0 ;
}
return d -> regs [ ichan ]. pageh ;
}
135
static inline void init_chan ( struct dma_cont * d , int ichan )
136
137
138
{
struct dma_regs * r ;
139
r = d -> regs + ichan ;
140
r -> now [ ADDR ] = r -> base [ ADDR ] << d -> dshift ;
141
142
143
r -> now [ COUNT ] = 0 ;
}
144
static inline int getff ( struct dma_cont * d )
145
146
147
{
int ff ;
148
149
ff = d -> flip_flop ;
d -> flip_flop = ! ff ;
150
151
152
return ff ;
}
153
static uint32_t read_chan ( void * opaque , uint32_t nport )
154
{
155
struct dma_cont * d = opaque ;
156
int ichan , nreg , iport , ff , val , dir ;
157
158
struct dma_regs * r ;
159
160
161
162
iport = ( nport >> d -> dshift ) & 0x0f ;
ichan = iport >> 1 ;
nreg = iport & 1 ;
r = d -> regs + ichan ;
163
164
dir = (( r -> mode >> 5 ) & 1 ) ? - 1 : 1 ;
165
ff = getff ( d );
166
if ( nreg )
167
val = ( r -> base [ COUNT ] << d -> dshift ) - r -> now [ COUNT ];
168
else
169
val = r -> now [ ADDR ] + r -> now [ COUNT ] * dir ;
170
171
ldebug ( "read_chan %#x -> %d \n " , iport , val );
172
return ( val >> ( d -> dshift + ( ff << 3 ))) & 0xff ;
173
174
}
175
static void write_chan ( void * opaque , uint32_t nport , uint32_t data )
176
{
177
178
struct dma_cont * d = opaque ;
int iport , ichan , nreg ;
179
180
struct dma_regs * r ;
181
182
183
184
185
iport = ( nport >> d -> dshift ) & 0x0f ;
ichan = iport >> 1 ;
nreg = iport & 1 ;
r = d -> regs + ichan ;
if ( getff ( d )) {
186
r -> base [ nreg ] = ( r -> base [ nreg ] & 0xff ) | (( data << 8 ) & 0xff00 );
187
init_chan ( d , ichan );
188
189
} else {
r -> base [ nreg ] = ( r -> base [ nreg ] & 0xff00 ) | ( data & 0xff );
190
191
192
}
}
193
static void write_cont ( void * opaque , uint32_t nport , uint32_t data )
194
{
195
struct dma_cont * d = opaque ;
196
int iport , ichan = 0 ;
197
198
iport = ( nport >> d -> dshift ) & 0x0f ;
199
switch ( iport ) {
200
case 0x08 : /* command */
201
if (( data != 0 ) && ( data & CMD_NOT_SUPPORTED )) {
202
dolog ( "command %#x not supported \n " , data );
203
return ;
204
205
206
207
}
d -> command = data ;
break ;
208
case 0x09 :
209
210
211
212
213
214
215
216
ichan = data & 3 ;
if ( data & 4 ) {
d -> status |= 1 << ( ichan + 4 );
}
else {
d -> status &= ~ ( 1 << ( ichan + 4 ));
}
d -> status &= ~ ( 1 << ichan );
217
DMA_run ();
218
219
break ;
220
case 0x0a : /* single mask */
221
222
223
224
if ( data & 4 )
d -> mask |= 1 << ( data & 3 );
else
d -> mask &= ~ ( 1 << ( data & 3 ));
225
DMA_run ();
226
227
break ;
228
case 0x0b : /* mode */
229
{
230
231
ichan = data & 3 ;
# ifdef DEBUG_DMA
232
233
{
int op , ai , dir , opmode ;
234
235
236
237
op = ( data >> 2 ) & 3 ;
ai = ( data >> 4 ) & 1 ;
dir = ( data >> 5 ) & 1 ;
opmode = ( data >> 6 ) & 3 ;
238
239
240
linfo ( "ichan %d, op %d, ai %d, dir %d, opmode %d \n " ,
ichan , op , ai , dir , opmode );
241
}
242
243
244
245
246
# endif
d -> regs [ ichan ]. mode = data ;
break ;
}
247
case 0x0c : /* clear flip flop */
248
249
250
d -> flip_flop = 0 ;
break ;
251
case 0x0d : /* reset */
252
253
254
255
256
257
d -> flip_flop = 0 ;
d -> mask = ~ 0 ;
d -> status = 0 ;
d -> command = 0 ;
break ;
258
case 0x0e : /* clear mask for all channels */
259
d -> mask = 0 ;
260
DMA_run ();
261
262
break ;
263
case 0x0f : /* write mask for all channels */
264
d -> mask = data ;
265
DMA_run ();
266
267
268
break ;
default :
269
dolog ( "unknown iport %#x \n " , iport );
270
break ;
271
272
}
273
# ifdef DEBUG_DMA
274
if ( 0xc != iport ) {
275
linfo ( "write_cont: nport %#06x, ichan % 2d, val %#06x \n " ,
276
nport , ichan , data );
277
278
279
280
}
# endif
}
281
282
283
284
static uint32_t read_cont ( void * opaque , uint32_t nport )
{
struct dma_cont * d = opaque ;
int iport , val ;
285
286
287
iport = ( nport >> d -> dshift ) & 0x0f ;
switch ( iport ) {
288
case 0x08 : /* status */
289
290
291
val = d -> status ;
d -> status &= 0xf0 ;
break ;
292
case 0x0f : /* mask */
293
294
295
296
297
298
val = d -> mask ;
break ;
default :
val = 0 ;
break ;
}
299
300
ldebug ( "read_cont: nport %#06x, iport %#04x val %#x \n " , nport , iport , val );
301
302
303
return val ;
}
304
305
306
307
308
309
310
311
312
313
314
315
316
int DMA_get_channel_mode ( int nchan )
{
return dma_controllers [ nchan > 3 ]. regs [ nchan & 3 ]. mode ;
}
void DMA_hold_DREQ ( int nchan )
{
int ncont , ichan ;
ncont = nchan > 3 ;
ichan = nchan & 3 ;
linfo ( "held cont=%d chan=%d \n " , ncont , ichan );
dma_controllers [ ncont ]. status |= 1 << ( ichan + 4 );
317
DMA_run ();
318
319
320
321
322
323
324
325
326
327
}
void DMA_release_DREQ ( int nchan )
{
int ncont , ichan ;
ncont = nchan > 3 ;
ichan = nchan & 3 ;
linfo ( "released cont=%d chan=%d \n " , ncont , ichan );
dma_controllers [ ncont ]. status &= ~ ( 1 << ( ichan + 4 ));
328
DMA_run ();
329
330
331
332
333
}
static void channel_run ( int ncont , int ichan )
{
int n ;
334
335
336
struct dma_regs * r = & dma_controllers [ ncont ]. regs [ ichan ];
# ifdef DEBUG_DMA
int dir , opmode ;
337
338
339
dir = ( r -> mode >> 5 ) & 1 ;
opmode = ( r -> mode >> 6 ) & 3 ;
340
341
342
343
344
345
346
347
if ( dir ) {
dolog ( "DMA in address decrement mode \n " );
}
if ( opmode != 1 ) {
dolog ( "DMA not in single mode select %#x \n " , opmode );
}
# endif
348
349
350
351
352
353
r = dma_controllers [ ncont ]. regs + ichan ;
n = r -> transfer_handler ( r -> opaque , ichan + ( ncont << 2 ),
r -> now [ COUNT ], ( r -> base [ COUNT ] + 1 ) << ncont );
r -> now [ COUNT ] = n ;
ldebug ( "dma_pos %d size %d \n " , n , ( r -> base [ COUNT ] + 1 ) << ncont );
354
355
}
356
357
358
static QEMUBH * dma_bh ;
static void DMA_run ( void )
359
360
361
{
struct dma_cont * d ;
int icont , ichan ;
362
int rearm = 0 ;
363
364
365
366
367
368
369
370
371
d = dma_controllers ;
for ( icont = 0 ; icont < 2 ; icont ++ , d ++ ) {
for ( ichan = 0 ; ichan < 4 ; ichan ++ ) {
int mask ;
mask = 1 << ichan ;
372
if (( 0 == ( d -> mask & mask )) && ( 0 != ( d -> status & ( mask << 4 )))) {
373
channel_run ( icont , ichan );
374
375
rearm = 1 ;
}
376
377
}
}
378
379
380
381
382
383
384
385
if ( rearm )
qemu_bh_schedule_idle ( dma_bh );
}
static void DMA_run_bh ( void * unused )
{
DMA_run ();
386
387
388
}
void DMA_register_channel ( int nchan ,
389
DMA_transfer_handler transfer_handler ,
390
void * opaque )
391
392
393
394
395
396
397
398
{
struct dma_regs * r ;
int ichan , ncont ;
ncont = nchan > 3 ;
ichan = nchan & 3 ;
r = dma_controllers [ ncont ]. regs + ichan ;
399
400
401
402
r -> transfer_handler = transfer_handler ;
r -> opaque = opaque ;
}
403
404
405
int DMA_read_memory ( int nchan , void * buf , int pos , int len )
{
struct dma_regs * r = & dma_controllers [ nchan > 3 ]. regs [ nchan & 3 ];
406
target_phys_addr_t addr = (( r -> pageh & 0x7f ) << 24 ) | ( r -> page << 16 ) | r -> now [ ADDR ];
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
if ( r -> mode & 0x20 ) {
int i ;
uint8_t * p = buf ;
cpu_physical_memory_read ( addr - pos - len , buf , len );
/* What about 16bit transfers? */
for ( i = 0 ; i < len >> 1 ; i ++ ) {
uint8_t b = p [ len - i - 1 ];
p [ i ] = b ;
}
}
else
cpu_physical_memory_read ( addr + pos , buf , len );
return len ;
}
int DMA_write_memory ( int nchan , void * buf , int pos , int len )
{
struct dma_regs * r = & dma_controllers [ nchan > 3 ]. regs [ nchan & 3 ];
428
target_phys_addr_t addr = (( r -> pageh & 0x7f ) << 24 ) | ( r -> page << 16 ) | r -> now [ ADDR ];
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
if ( r -> mode & 0x20 ) {
int i ;
uint8_t * p = buf ;
cpu_physical_memory_write ( addr - pos - len , buf , len );
/* What about 16bit transfers? */
for ( i = 0 ; i < len ; i ++ ) {
uint8_t b = p [ len - i - 1 ];
p [ i ] = b ;
}
}
else
cpu_physical_memory_write ( addr + pos , buf , len );
return len ;
}
447
448
449
/* request the emulator to transfer a new DMA memory block ASAP */
void DMA_schedule ( int nchan )
{
450
451
CPUState * env = cpu_single_env ;
if ( env )
452
cpu_exit ( env );
453
454
}
455
456
457
458
459
460
static void dma_reset ( void * opaque )
{
struct dma_cont * d = opaque ;
write_cont ( d , ( 0x0d << d -> dshift ), 0 );
}
461
462
463
464
465
466
467
static int dma_phony_handler ( void * opaque , int nchan , int dma_pos , int dma_len )
{
dolog ( "unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d \n " ,
nchan , dma_pos , dma_len );
return dma_pos ;
}
468
/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
469
static void dma_init2 ( struct dma_cont * d , int base , int dshift ,
470
int page_base , int pageh_base )
471
{
472
static const int page_port_list [] = { 0x1 , 0x2 , 0x3 , 0x7 };
473
474
int i ;
475
d -> dshift = dshift ;
476
for ( i = 0 ; i < 8 ; i ++ ) {
477
478
register_ioport_write ( base + ( i << dshift ), 1 , 1 , write_chan , d );
register_ioport_read ( base + ( i << dshift ), 1 , 1 , read_chan , d );
479
}
malc
authored
16 years ago
480
for ( i = 0 ; i < ARRAY_SIZE ( page_port_list ); i ++ ) {
481
register_ioport_write ( page_base + page_port_list [ i ], 1 , 1 ,
482
write_page , d );
483
register_ioport_read ( page_base + page_port_list [ i ], 1 , 1 ,
484
read_page , d );
485
if ( pageh_base >= 0 ) {
486
register_ioport_write ( pageh_base + page_port_list [ i ], 1 , 1 ,
487
write_pageh , d );
488
register_ioport_read ( pageh_base + page_port_list [ i ], 1 , 1 ,
489
490
read_pageh , d );
}
491
492
}
for ( i = 0 ; i < 8 ; i ++ ) {
493
register_ioport_write ( base + (( i + 8 ) << dshift ), 1 , 1 ,
494
write_cont , d );
495
register_ioport_read ( base + (( i + 8 ) << dshift ), 1 , 1 ,
496
read_cont , d );
497
}
498
499
qemu_register_reset ( dma_reset , d );
dma_reset ( d );
malc
authored
16 years ago
500
for ( i = 0 ; i < ARRAY_SIZE ( d -> regs ); ++ i ) {
501
502
d -> regs [ i ]. transfer_handler = dma_phony_handler ;
}
503
}
504
505
506
507
508
509
510
511
512
513
static void dma_save ( QEMUFile * f , void * opaque )
{
struct dma_cont * d = opaque ;
int i ;
/* qemu_put_8s (f, &d->status); */
qemu_put_8s ( f , & d -> command );
qemu_put_8s ( f , & d -> mask );
qemu_put_8s ( f , & d -> flip_flop );
ths
authored
17 years ago
514
qemu_put_be32 ( f , d -> dshift );
515
516
517
for ( i = 0 ; i < 4 ; ++ i ) {
struct dma_regs * r = & d -> regs [ i ];
ths
authored
17 years ago
518
519
qemu_put_be32 ( f , r -> now [ 0 ]);
qemu_put_be32 ( f , r -> now [ 1 ]);
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
qemu_put_be16s ( f , & r -> base [ 0 ]);
qemu_put_be16s ( f , & r -> base [ 1 ]);
qemu_put_8s ( f , & r -> mode );
qemu_put_8s ( f , & r -> page );
qemu_put_8s ( f , & r -> pageh );
qemu_put_8s ( f , & r -> dack );
qemu_put_8s ( f , & r -> eop );
}
}
static int dma_load ( QEMUFile * f , void * opaque , int version_id )
{
struct dma_cont * d = opaque ;
int i ;
if ( version_id != 1 )
return - EINVAL ;
/* qemu_get_8s (f, &d->status); */
qemu_get_8s ( f , & d -> command );
qemu_get_8s ( f , & d -> mask );
qemu_get_8s ( f , & d -> flip_flop );
ths
authored
17 years ago
542
d -> dshift = qemu_get_be32 ( f );
543
544
545
for ( i = 0 ; i < 4 ; ++ i ) {
struct dma_regs * r = & d -> regs [ i ];
ths
authored
17 years ago
546
547
r -> now [ 0 ] = qemu_get_be32 ( f );
r -> now [ 1 ] = qemu_get_be32 ( f );
548
549
550
551
552
553
554
555
qemu_get_be16s ( f , & r -> base [ 0 ]);
qemu_get_be16s ( f , & r -> base [ 1 ]);
qemu_get_8s ( f , & r -> mode );
qemu_get_8s ( f , & r -> page );
qemu_get_8s ( f , & r -> pageh );
qemu_get_8s ( f , & r -> dack );
qemu_get_8s ( f , & r -> eop );
}
556
557
558
DMA_run ();
559
560
561
return 0 ;
}
562
void DMA_init ( int high_page_enable )
563
{
564
dma_init2 ( & dma_controllers [ 0 ], 0x00 , 0 , 0x80 ,
565
566
567
high_page_enable ? 0x480 : - 1 );
dma_init2 ( & dma_controllers [ 1 ], 0xc0 , 1 , 0x88 ,
high_page_enable ? 0x488 : - 1 );
568
569
register_savevm ( "dma" , 0 , 1 , dma_save , dma_load , & dma_controllers [ 0 ]);
register_savevm ( "dma" , 1 , 1 , dma_save , dma_load , & dma_controllers [ 1 ]);
570
571
dma_bh = qemu_bh_new ( DMA_run_bh , NULL );
572
}