1
2
/*
* QEMU DMA emulation
3
4
5
*
* Copyright ( c ) 2003 - 2004 Vassili Karpov ( malc )
*
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
* Permission is hereby granted , free of charge , to any person obtaining a copy
* of this software and associated documentation files ( the "Software" ), to deal
* in the Software without restriction , including without limitation the rights
* to use , copy , modify , merge , publish , distribute , sublicense , and / or sell
* copies of the Software , and to permit persons to whom the Software is
* furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED "AS IS" , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM ,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE .
*/
24
25
# include "hw.h"
# include "isa.h"
26
27
/* #define DEBUG_DMA */
28
29
# define dolog (...) fprintf ( stderr , "dma: " __VA_ARGS__ )
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# ifdef DEBUG_DMA
# define lwarn (...) fprintf ( stderr , "dma: " __VA_ARGS__ )
# define linfo (...) fprintf ( stderr , "dma: " __VA_ARGS__ )
# define ldebug (...) fprintf ( stderr , "dma: " __VA_ARGS__ )
# else
# define lwarn (...)
# define linfo (...)
# define ldebug (...)
# endif
# define LENOFA ( a ) (( int ) ( sizeof ( a ) / sizeof ( a [ 0 ])))
struct dma_regs {
int now [ 2 ];
uint16_t base [ 2 ];
uint8_t mode ;
uint8_t page ;
47
uint8_t pageh ;
48
49
uint8_t dack ;
uint8_t eop ;
50
51
DMA_transfer_handler transfer_handler ;
void * opaque ;
52
53
54
55
56
57
58
59
60
61
};
# define ADDR 0
# define COUNT 1
static struct dma_cont {
uint8_t status ;
uint8_t command ;
uint8_t mask ;
uint8_t flip_flop ;
62
int dshift ;
63
64
65
66
struct dma_regs regs [ 4 ];
} dma_controllers [ 2 ];
enum {
67
68
69
70
71
72
73
74
75
76
77
CMD_MEMORY_TO_MEMORY = 0x01 ,
CMD_FIXED_ADDRESS = 0x02 ,
CMD_BLOCK_CONTROLLER = 0x04 ,
CMD_COMPRESSED_TIME = 0x08 ,
CMD_CYCLIC_PRIORITY = 0x10 ,
CMD_EXTENDED_WRITE = 0x20 ,
CMD_LOW_DREQ = 0x40 ,
CMD_LOW_DACK = 0x80 ,
CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
| CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
| CMD_LOW_DREQ | CMD_LOW_DACK
78
79
80
};
81
82
static int channels [ 8 ] = { - 1 , 2 , 3 , 1 , - 1 , - 1 , - 1 , 0 };
83
static void write_page ( void * opaque , uint32_t nport , uint32_t data )
84
{
85
struct dma_cont * d = opaque ;
86
87
int ichan ;
88
ichan = channels [ nport & 7 ];
89
if ( - 1 == ichan ) {
90
dolog ( "invalid channel %#x %#x \n " , nport , data );
91
92
return ;
}
93
94
95
d -> regs [ ichan ]. page = data ;
}
96
static void write_pageh ( void * opaque , uint32_t nport , uint32_t data )
97
98
99
{
struct dma_cont * d = opaque ;
int ichan ;
100
101
ichan = channels [ nport & 7 ];
102
if ( - 1 == ichan ) {
103
dolog ( "invalid channel %#x %#x \n " , nport , data );
104
105
106
107
return ;
}
d -> regs [ ichan ]. pageh = data ;
}
108
109
110
111
112
113
114
static uint32_t read_page ( void * opaque , uint32_t nport )
{
struct dma_cont * d = opaque ;
int ichan ;
ichan = channels [ nport & 7 ];
115
if ( - 1 == ichan ) {
116
dolog ( "invalid channel read %#x \n " , nport );
117
118
119
return 0 ;
}
return d -> regs [ ichan ]. page ;
120
121
}
122
123
124
125
126
127
128
static uint32_t read_pageh ( void * opaque , uint32_t nport )
{
struct dma_cont * d = opaque ;
int ichan ;
ichan = channels [ nport & 7 ];
if ( - 1 == ichan ) {
129
dolog ( "invalid channel read %#x \n " , nport );
130
131
132
133
134
return 0 ;
}
return d -> regs [ ichan ]. pageh ;
}
135
static inline void init_chan ( struct dma_cont * d , int ichan )
136
137
138
{
struct dma_regs * r ;
139
r = d -> regs + ichan ;
140
r -> now [ ADDR ] = r -> base [ ADDR ] << d -> dshift ;
141
142
143
r -> now [ COUNT ] = 0 ;
}
144
static inline int getff ( struct dma_cont * d )
145
146
147
{
int ff ;
148
149
ff = d -> flip_flop ;
d -> flip_flop = ! ff ;
150
151
152
return ff ;
}
153
static uint32_t read_chan ( void * opaque , uint32_t nport )
154
{
155
struct dma_cont * d = opaque ;
156
int ichan , nreg , iport , ff , val , dir ;
157
158
struct dma_regs * r ;
159
160
161
162
iport = ( nport >> d -> dshift ) & 0x0f ;
ichan = iport >> 1 ;
nreg = iport & 1 ;
r = d -> regs + ichan ;
163
164
dir = (( r -> mode >> 5 ) & 1 ) ? - 1 : 1 ;
165
ff = getff ( d );
166
if ( nreg )
167
val = ( r -> base [ COUNT ] << d -> dshift ) - r -> now [ COUNT ];
168
else
169
val = r -> now [ ADDR ] + r -> now [ COUNT ] * dir ;
170
171
ldebug ( "read_chan %#x -> %d \n " , iport , val );
172
return ( val >> ( d -> dshift + ( ff << 3 ))) & 0xff ;
173
174
}
175
static void write_chan ( void * opaque , uint32_t nport , uint32_t data )
176
{
177
178
struct dma_cont * d = opaque ;
int iport , ichan , nreg ;
179
180
struct dma_regs * r ;
181
182
183
184
185
iport = ( nport >> d -> dshift ) & 0x0f ;
ichan = iport >> 1 ;
nreg = iport & 1 ;
r = d -> regs + ichan ;
if ( getff ( d )) {
186
r -> base [ nreg ] = ( r -> base [ nreg ] & 0xff ) | (( data << 8 ) & 0xff00 );
187
init_chan ( d , ichan );
188
189
} else {
r -> base [ nreg ] = ( r -> base [ nreg ] & 0xff00 ) | ( data & 0xff );
190
191
192
}
}
193
static void write_cont ( void * opaque , uint32_t nport , uint32_t data )
194
{
195
struct dma_cont * d = opaque ;
196
int iport , ichan = 0 ;
197
198
iport = ( nport >> d -> dshift ) & 0x0f ;
199
switch ( iport ) {
200
case 0x08 : /* command */
201
if (( data != 0 ) && ( data & CMD_NOT_SUPPORTED )) {
202
dolog ( "command %#x not supported \n " , data );
203
return ;
204
205
206
207
}
d -> command = data ;
break ;
208
case 0x09 :
209
210
211
212
213
214
215
216
217
218
ichan = data & 3 ;
if ( data & 4 ) {
d -> status |= 1 << ( ichan + 4 );
}
else {
d -> status &= ~ ( 1 << ( ichan + 4 ));
}
d -> status &= ~ ( 1 << ichan );
break ;
219
case 0x0a : /* single mask */
220
221
222
223
224
225
if ( data & 4 )
d -> mask |= 1 << ( data & 3 );
else
d -> mask &= ~ ( 1 << ( data & 3 ));
break ;
226
case 0x0b : /* mode */
227
{
228
229
ichan = data & 3 ;
# ifdef DEBUG_DMA
230
231
{
int op , ai , dir , opmode ;
232
233
234
235
op = ( data >> 2 ) & 3 ;
ai = ( data >> 4 ) & 1 ;
dir = ( data >> 5 ) & 1 ;
opmode = ( data >> 6 ) & 3 ;
236
237
238
linfo ( "ichan %d, op %d, ai %d, dir %d, opmode %d \n " ,
ichan , op , ai , dir , opmode );
239
}
240
241
242
243
244
# endif
d -> regs [ ichan ]. mode = data ;
break ;
}
245
case 0x0c : /* clear flip flop */
246
247
248
d -> flip_flop = 0 ;
break ;
249
case 0x0d : /* reset */
250
251
252
253
254
255
d -> flip_flop = 0 ;
d -> mask = ~ 0 ;
d -> status = 0 ;
d -> command = 0 ;
break ;
256
case 0x0e : /* clear mask for all channels */
257
258
259
d -> mask = 0 ;
break ;
260
case 0x0f : /* write mask for all channels */
261
262
263
264
d -> mask = data ;
break ;
default :
265
dolog ( "unknown iport %#x \n " , iport );
266
break ;
267
268
}
269
# ifdef DEBUG_DMA
270
if ( 0xc != iport ) {
271
linfo ( "write_cont: nport %#06x, ichan % 2d, val %#06x \n " ,
272
nport , ichan , data );
273
274
275
276
}
# endif
}
277
278
279
280
static uint32_t read_cont ( void * opaque , uint32_t nport )
{
struct dma_cont * d = opaque ;
int iport , val ;
281
282
283
iport = ( nport >> d -> dshift ) & 0x0f ;
switch ( iport ) {
284
case 0x08 : /* status */
285
286
287
val = d -> status ;
d -> status &= 0xf0 ;
break ;
288
case 0x0f : /* mask */
289
290
291
292
293
294
val = d -> mask ;
break ;
default :
val = 0 ;
break ;
}
295
296
ldebug ( "read_cont: nport %#06x, iport %#04x val %#x \n " , nport , iport , val );
297
298
299
return val ;
}
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
int DMA_get_channel_mode ( int nchan )
{
return dma_controllers [ nchan > 3 ]. regs [ nchan & 3 ]. mode ;
}
void DMA_hold_DREQ ( int nchan )
{
int ncont , ichan ;
ncont = nchan > 3 ;
ichan = nchan & 3 ;
linfo ( "held cont=%d chan=%d \n " , ncont , ichan );
dma_controllers [ ncont ]. status |= 1 << ( ichan + 4 );
}
void DMA_release_DREQ ( int nchan )
{
int ncont , ichan ;
ncont = nchan > 3 ;
ichan = nchan & 3 ;
linfo ( "released cont=%d chan=%d \n " , ncont , ichan );
dma_controllers [ ncont ]. status &= ~ ( 1 << ( ichan + 4 ));
}
static void channel_run ( int ncont , int ichan )
{
int n ;
328
329
330
struct dma_regs * r = & dma_controllers [ ncont ]. regs [ ichan ];
# ifdef DEBUG_DMA
int dir , opmode ;
331
332
333
dir = ( r -> mode >> 5 ) & 1 ;
opmode = ( r -> mode >> 6 ) & 3 ;
334
335
336
337
338
339
340
341
if ( dir ) {
dolog ( "DMA in address decrement mode \n " );
}
if ( opmode != 1 ) {
dolog ( "DMA not in single mode select %#x \n " , opmode );
}
# endif
342
343
344
345
346
347
r = dma_controllers [ ncont ]. regs + ichan ;
n = r -> transfer_handler ( r -> opaque , ichan + ( ncont << 2 ),
r -> now [ COUNT ], ( r -> base [ COUNT ] + 1 ) << ncont );
r -> now [ COUNT ] = n ;
ldebug ( "dma_pos %d size %d \n " , n , ( r -> base [ COUNT ] + 1 ) << ncont );
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
}
void DMA_run ( void )
{
struct dma_cont * d ;
int icont , ichan ;
d = dma_controllers ;
for ( icont = 0 ; icont < 2 ; icont ++ , d ++ ) {
for ( ichan = 0 ; ichan < 4 ; ichan ++ ) {
int mask ;
mask = 1 << ichan ;
if (( 0 == ( d -> mask & mask )) && ( 0 != ( d -> status & ( mask << 4 ))))
channel_run ( icont , ichan );
}
}
}
void DMA_register_channel ( int nchan ,
370
DMA_transfer_handler transfer_handler ,
371
void * opaque )
372
373
374
375
376
377
378
379
{
struct dma_regs * r ;
int ichan , ncont ;
ncont = nchan > 3 ;
ichan = nchan & 3 ;
r = dma_controllers [ ncont ]. regs + ichan ;
380
381
382
383
r -> transfer_handler = transfer_handler ;
r -> opaque = opaque ;
}
384
385
386
int DMA_read_memory ( int nchan , void * buf , int pos , int len )
{
struct dma_regs * r = & dma_controllers [ nchan > 3 ]. regs [ nchan & 3 ];
387
target_phys_addr_t addr = (( r -> pageh & 0x7f ) << 24 ) | ( r -> page << 16 ) | r -> now [ ADDR ];
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
if ( r -> mode & 0x20 ) {
int i ;
uint8_t * p = buf ;
cpu_physical_memory_read ( addr - pos - len , buf , len );
/* What about 16bit transfers? */
for ( i = 0 ; i < len >> 1 ; i ++ ) {
uint8_t b = p [ len - i - 1 ];
p [ i ] = b ;
}
}
else
cpu_physical_memory_read ( addr + pos , buf , len );
return len ;
}
int DMA_write_memory ( int nchan , void * buf , int pos , int len )
{
struct dma_regs * r = & dma_controllers [ nchan > 3 ]. regs [ nchan & 3 ];
409
target_phys_addr_t addr = (( r -> pageh & 0x7f ) << 24 ) | ( r -> page << 16 ) | r -> now [ ADDR ];
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
if ( r -> mode & 0x20 ) {
int i ;
uint8_t * p = buf ;
cpu_physical_memory_write ( addr - pos - len , buf , len );
/* What about 16bit transfers? */
for ( i = 0 ; i < len ; i ++ ) {
uint8_t b = p [ len - i - 1 ];
p [ i ] = b ;
}
}
else
cpu_physical_memory_write ( addr + pos , buf , len );
return len ;
}
428
429
430
/* request the emulator to transfer a new DMA memory block ASAP */
void DMA_schedule ( int nchan )
{
431
432
433
CPUState * env = cpu_single_env ;
if ( env )
cpu_interrupt ( env , CPU_INTERRUPT_EXIT );
434
435
}
436
437
438
439
440
441
static void dma_reset ( void * opaque )
{
struct dma_cont * d = opaque ;
write_cont ( d , ( 0x0d << d -> dshift ), 0 );
}
442
443
444
445
446
447
448
static int dma_phony_handler ( void * opaque , int nchan , int dma_pos , int dma_len )
{
dolog ( "unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d \n " ,
nchan , dma_pos , dma_len );
return dma_pos ;
}
449
/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
450
static void dma_init2 ( struct dma_cont * d , int base , int dshift ,
451
int page_base , int pageh_base )
452
{
453
const static int page_port_list [] = { 0x1 , 0x2 , 0x3 , 0x7 };
454
455
int i ;
456
d -> dshift = dshift ;
457
for ( i = 0 ; i < 8 ; i ++ ) {
458
459
register_ioport_write ( base + ( i << dshift ), 1 , 1 , write_chan , d );
register_ioport_read ( base + ( i << dshift ), 1 , 1 , read_chan , d );
460
461
}
for ( i = 0 ; i < LENOFA ( page_port_list ); i ++ ) {
462
register_ioport_write ( page_base + page_port_list [ i ], 1 , 1 ,
463
write_page , d );
464
register_ioport_read ( page_base + page_port_list [ i ], 1 , 1 ,
465
read_page , d );
466
if ( pageh_base >= 0 ) {
467
register_ioport_write ( pageh_base + page_port_list [ i ], 1 , 1 ,
468
write_pageh , d );
469
register_ioport_read ( pageh_base + page_port_list [ i ], 1 , 1 ,
470
471
read_pageh , d );
}
472
473
}
for ( i = 0 ; i < 8 ; i ++ ) {
474
register_ioport_write ( base + (( i + 8 ) << dshift ), 1 , 1 ,
475
write_cont , d );
476
register_ioport_read ( base + (( i + 8 ) << dshift ), 1 , 1 ,
477
read_cont , d );
478
}
479
480
qemu_register_reset ( dma_reset , d );
dma_reset ( d );
481
482
483
for ( i = 0 ; i < LENOFA ( d -> regs ); ++ i ) {
d -> regs [ i ]. transfer_handler = dma_phony_handler ;
}
484
}
485
486
487
488
489
490
491
492
493
494
static void dma_save ( QEMUFile * f , void * opaque )
{
struct dma_cont * d = opaque ;
int i ;
/* qemu_put_8s (f, &d->status); */
qemu_put_8s ( f , & d -> command );
qemu_put_8s ( f , & d -> mask );
qemu_put_8s ( f , & d -> flip_flop );
ths
authored
17 years ago
495
qemu_put_be32 ( f , d -> dshift );
496
497
498
for ( i = 0 ; i < 4 ; ++ i ) {
struct dma_regs * r = & d -> regs [ i ];
ths
authored
17 years ago
499
500
qemu_put_be32 ( f , r -> now [ 0 ]);
qemu_put_be32 ( f , r -> now [ 1 ]);
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
qemu_put_be16s ( f , & r -> base [ 0 ]);
qemu_put_be16s ( f , & r -> base [ 1 ]);
qemu_put_8s ( f , & r -> mode );
qemu_put_8s ( f , & r -> page );
qemu_put_8s ( f , & r -> pageh );
qemu_put_8s ( f , & r -> dack );
qemu_put_8s ( f , & r -> eop );
}
}
static int dma_load ( QEMUFile * f , void * opaque , int version_id )
{
struct dma_cont * d = opaque ;
int i ;
if ( version_id != 1 )
return - EINVAL ;
/* qemu_get_8s (f, &d->status); */
qemu_get_8s ( f , & d -> command );
qemu_get_8s ( f , & d -> mask );
qemu_get_8s ( f , & d -> flip_flop );
ths
authored
17 years ago
523
d -> dshift = qemu_get_be32 ( f );
524
525
526
for ( i = 0 ; i < 4 ; ++ i ) {
struct dma_regs * r = & d -> regs [ i ];
ths
authored
17 years ago
527
528
r -> now [ 0 ] = qemu_get_be32 ( f );
r -> now [ 1 ] = qemu_get_be32 ( f );
529
530
531
532
533
534
535
536
537
538
539
qemu_get_be16s ( f , & r -> base [ 0 ]);
qemu_get_be16s ( f , & r -> base [ 1 ]);
qemu_get_8s ( f , & r -> mode );
qemu_get_8s ( f , & r -> page );
qemu_get_8s ( f , & r -> pageh );
qemu_get_8s ( f , & r -> dack );
qemu_get_8s ( f , & r -> eop );
}
return 0 ;
}
540
void DMA_init ( int high_page_enable )
541
{
542
dma_init2 ( & dma_controllers [ 0 ], 0x00 , 0 , 0x80 ,
543
544
545
high_page_enable ? 0x480 : - 1 );
dma_init2 ( & dma_controllers [ 1 ], 0xc0 , 1 , 0x88 ,
high_page_enable ? 0x488 : - 1 );
546
547
register_savevm ( "dma" , 0 , 1 , dma_save , dma_load , & dma_controllers [ 0 ]);
register_savevm ( "dma" , 1 , 1 , dma_save , dma_load , & dma_controllers [ 1 ]);
548
}