|
1
2
|
/*
* QEMU DMA emulation
|
|
3
4
5
|
*
* Copyright (c) 2003-2004 Vassili Karpov (malc)
*
|
|
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
|
|
24
|
#include "vl.h"
|
|
25
|
|
|
26
|
/* #define DEBUG_DMA */
|
|
27
|
|
|
28
|
#define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
|
|
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
|
#ifdef DEBUG_DMA
#define lwarn(...) fprintf (stderr, "dma: " __VA_ARGS__)
#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
#else
#define lwarn(...)
#define linfo(...)
#define ldebug(...)
#endif
#define LENOFA(a) ((int) (sizeof(a)/sizeof(a[0])))
struct dma_regs {
int now[2];
uint16_t base[2];
uint8_t mode;
uint8_t page;
|
|
46
|
uint8_t pageh;
|
|
47
48
|
uint8_t dack;
uint8_t eop;
|
|
49
50
|
DMA_transfer_handler transfer_handler;
void *opaque;
|
|
51
52
53
54
55
56
57
58
59
60
|
};
#define ADDR 0
#define COUNT 1
static struct dma_cont {
uint8_t status;
uint8_t command;
uint8_t mask;
uint8_t flip_flop;
|
|
61
|
int dshift;
|
|
62
63
64
65
|
struct dma_regs regs[4];
} dma_controllers[2];
enum {
|
|
66
67
68
69
70
71
72
73
74
75
76
|
CMD_MEMORY_TO_MEMORY = 0x01,
CMD_FIXED_ADDRESS = 0x02,
CMD_BLOCK_CONTROLLER = 0x04,
CMD_COMPRESSED_TIME = 0x08,
CMD_CYCLIC_PRIORITY = 0x10,
CMD_EXTENDED_WRITE = 0x20,
CMD_LOW_DREQ = 0x40,
CMD_LOW_DACK = 0x80,
CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
| CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
| CMD_LOW_DREQ | CMD_LOW_DACK
|
|
77
78
79
|
};
|
|
80
81
|
static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
|
|
82
|
static void write_page (void *opaque, uint32_t nport, uint32_t data)
|
|
83
|
{
|
|
84
|
struct dma_cont *d = opaque;
|
|
85
86
|
int ichan;
|
|
87
|
ichan = channels[nport & 7];
|
|
88
|
if (-1 == ichan) {
|
|
89
|
dolog ("invalid channel %#x %#x\n", nport, data);
|
|
90
91
|
return;
}
|
|
92
93
94
|
d->regs[ichan].page = data;
}
|
|
95
|
static void write_pageh (void *opaque, uint32_t nport, uint32_t data)
|
|
96
97
98
|
{
struct dma_cont *d = opaque;
int ichan;
|
|
99
|
|
|
100
|
ichan = channels[nport & 7];
|
|
101
|
if (-1 == ichan) {
|
|
102
|
dolog ("invalid channel %#x %#x\n", nport, data);
|
|
103
104
105
106
|
return;
}
d->regs[ichan].pageh = data;
}
|
|
107
|
|
|
108
109
110
111
112
113
|
static uint32_t read_page (void *opaque, uint32_t nport)
{
struct dma_cont *d = opaque;
int ichan;
ichan = channels[nport & 7];
|
|
114
|
if (-1 == ichan) {
|
|
115
|
dolog ("invalid channel read %#x\n", nport);
|
|
116
117
118
|
return 0;
}
return d->regs[ichan].page;
|
|
119
120
|
}
|
|
121
122
123
124
125
126
127
|
static uint32_t read_pageh (void *opaque, uint32_t nport)
{
struct dma_cont *d = opaque;
int ichan;
ichan = channels[nport & 7];
if (-1 == ichan) {
|
|
128
|
dolog ("invalid channel read %#x\n", nport);
|
|
129
130
131
132
133
|
return 0;
}
return d->regs[ichan].pageh;
}
|
|
134
|
static inline void init_chan (struct dma_cont *d, int ichan)
|
|
135
136
137
|
{
struct dma_regs *r;
|
|
138
|
r = d->regs + ichan;
|
|
139
|
r->now[ADDR] = r->base[ADDR] << d->dshift;
|
|
140
141
142
|
r->now[COUNT] = 0;
}
|
|
143
|
static inline int getff (struct dma_cont *d)
|
|
144
145
146
|
{
int ff;
|
|
147
148
|
ff = d->flip_flop;
d->flip_flop = !ff;
|
|
149
150
151
|
return ff;
}
|
|
152
|
static uint32_t read_chan (void *opaque, uint32_t nport)
|
|
153
|
{
|
|
154
|
struct dma_cont *d = opaque;
|
|
155
|
int ichan, nreg, iport, ff, val, dir;
|
|
156
157
|
struct dma_regs *r;
|
|
158
159
160
161
|
iport = (nport >> d->dshift) & 0x0f;
ichan = iport >> 1;
nreg = iport & 1;
r = d->regs + ichan;
|
|
162
|
|
|
163
|
dir = ((r->mode >> 5) & 1) ? -1 : 1;
|
|
164
|
ff = getff (d);
|
|
165
|
if (nreg)
|
|
166
|
val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
|
|
167
|
else
|
|
168
|
val = r->now[ADDR] + r->now[COUNT] * dir;
|
|
169
|
|
|
170
|
ldebug ("read_chan %#x -> %d\n", iport, val);
|
|
171
|
return (val >> (d->dshift + (ff << 3))) & 0xff;
|
|
172
173
|
}
|
|
174
|
static void write_chan (void *opaque, uint32_t nport, uint32_t data)
|
|
175
|
{
|
|
176
177
|
struct dma_cont *d = opaque;
int iport, ichan, nreg;
|
|
178
179
|
struct dma_regs *r;
|
|
180
181
182
183
184
|
iport = (nport >> d->dshift) & 0x0f;
ichan = iport >> 1;
nreg = iport & 1;
r = d->regs + ichan;
if (getff (d)) {
|
|
185
|
r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
|
|
186
|
init_chan (d, ichan);
|
|
187
188
|
} else {
r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
|
|
189
190
191
|
}
}
|
|
192
|
static void write_cont (void *opaque, uint32_t nport, uint32_t data)
|
|
193
|
{
|
|
194
|
struct dma_cont *d = opaque;
|
|
195
|
int iport, ichan = 0;
|
|
196
|
|
|
197
|
iport = (nport >> d->dshift) & 0x0f;
|
|
198
|
switch (iport) {
|
|
199
|
case 0x08: /* command */
|
|
200
|
if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
|
|
201
|
dolog ("command %#x not supported\n", data);
|
|
202
|
return;
|
|
203
204
205
206
|
}
d->command = data;
break;
|
|
207
|
case 0x09:
|
|
208
209
210
211
212
213
214
215
216
217
|
ichan = data & 3;
if (data & 4) {
d->status |= 1 << (ichan + 4);
}
else {
d->status &= ~(1 << (ichan + 4));
}
d->status &= ~(1 << ichan);
break;
|
|
218
|
case 0x0a: /* single mask */
|
|
219
220
221
222
223
224
|
if (data & 4)
d->mask |= 1 << (data & 3);
else
d->mask &= ~(1 << (data & 3));
break;
|
|
225
|
case 0x0b: /* mode */
|
|
226
|
{
|
|
227
228
|
ichan = data & 3;
#ifdef DEBUG_DMA
|
|
229
230
|
{
int op, ai, dir, opmode;
|
|
231
232
233
234
|
op = (data >> 2) & 3;
ai = (data >> 4) & 1;
dir = (data >> 5) & 1;
opmode = (data >> 6) & 3;
|
|
235
|
|
|
236
237
|
linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
ichan, op, ai, dir, opmode);
|
|
238
|
}
|
|
239
240
241
242
243
|
#endif
d->regs[ichan].mode = data;
break;
}
|
|
244
|
case 0x0c: /* clear flip flop */
|
|
245
246
247
|
d->flip_flop = 0;
break;
|
|
248
|
case 0x0d: /* reset */
|
|
249
250
251
252
253
254
|
d->flip_flop = 0;
d->mask = ~0;
d->status = 0;
d->command = 0;
break;
|
|
255
|
case 0x0e: /* clear mask for all channels */
|
|
256
257
258
|
d->mask = 0;
break;
|
|
259
|
case 0x0f: /* write mask for all channels */
|
|
260
261
262
263
|
d->mask = data;
break;
default:
|
|
264
|
dolog ("unknown iport %#x\n", iport);
|
|
265
|
break;
|
|
266
267
|
}
|
|
268
|
#ifdef DEBUG_DMA
|
|
269
|
if (0xc != iport) {
|
|
270
|
linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
|
|
271
|
nport, ichan, data);
|
|
272
273
274
275
|
}
#endif
}
|
|
276
277
278
279
|
static uint32_t read_cont (void *opaque, uint32_t nport)
{
struct dma_cont *d = opaque;
int iport, val;
|
|
280
|
|
|
281
282
|
iport = (nport >> d->dshift) & 0x0f;
switch (iport) {
|
|
283
|
case 0x08: /* status */
|
|
284
285
286
|
val = d->status;
d->status &= 0xf0;
break;
|
|
287
|
case 0x0f: /* mask */
|
|
288
289
290
291
292
293
|
val = d->mask;
break;
default:
val = 0;
break;
}
|
|
294
295
|
ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
|
|
296
297
298
|
return val;
}
|
|
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
|
int DMA_get_channel_mode (int nchan)
{
return dma_controllers[nchan > 3].regs[nchan & 3].mode;
}
void DMA_hold_DREQ (int nchan)
{
int ncont, ichan;
ncont = nchan > 3;
ichan = nchan & 3;
linfo ("held cont=%d chan=%d\n", ncont, ichan);
dma_controllers[ncont].status |= 1 << (ichan + 4);
}
void DMA_release_DREQ (int nchan)
{
int ncont, ichan;
ncont = nchan > 3;
ichan = nchan & 3;
linfo ("released cont=%d chan=%d\n", ncont, ichan);
dma_controllers[ncont].status &= ~(1 << (ichan + 4));
}
static void channel_run (int ncont, int ichan)
{
int n;
|
|
327
328
329
|
struct dma_regs *r = &dma_controllers[ncont].regs[ichan];
#ifdef DEBUG_DMA
int dir, opmode;
|
|
330
|
|
|
331
332
|
dir = (r->mode >> 5) & 1;
opmode = (r->mode >> 6) & 3;
|
|
333
|
|
|
334
335
336
337
338
339
340
|
if (dir) {
dolog ("DMA in address decrement mode\n");
}
if (opmode != 1) {
dolog ("DMA not in single mode select %#x\n", opmode);
}
#endif
|
|
341
|
|
|
342
343
344
345
346
|
r = dma_controllers[ncont].regs + ichan;
n = r->transfer_handler (r->opaque, ichan + (ncont << 2),
r->now[COUNT], (r->base[COUNT] + 1) << ncont);
r->now[COUNT] = n;
ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
|
|
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
|
}
void DMA_run (void)
{
struct dma_cont *d;
int icont, ichan;
d = dma_controllers;
for (icont = 0; icont < 2; icont++, d++) {
for (ichan = 0; ichan < 4; ichan++) {
int mask;
mask = 1 << ichan;
if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4))))
channel_run (icont, ichan);
}
}
}
void DMA_register_channel (int nchan,
|
|
369
|
DMA_transfer_handler transfer_handler,
|
|
370
|
void *opaque)
|
|
371
372
373
374
375
376
377
378
|
{
struct dma_regs *r;
int ichan, ncont;
ncont = nchan > 3;
ichan = nchan & 3;
r = dma_controllers[ncont].regs + ichan;
|
|
379
380
381
382
|
r->transfer_handler = transfer_handler;
r->opaque = opaque;
}
|
|
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
|
int DMA_read_memory (int nchan, void *buf, int pos, int len)
{
struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
target_ulong addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
if (r->mode & 0x20) {
int i;
uint8_t *p = buf;
cpu_physical_memory_read (addr - pos - len, buf, len);
/* What about 16bit transfers? */
for (i = 0; i < len >> 1; i++) {
uint8_t b = p[len - i - 1];
p[i] = b;
}
}
else
cpu_physical_memory_read (addr + pos, buf, len);
return len;
}
int DMA_write_memory (int nchan, void *buf, int pos, int len)
{
struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
target_ulong addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
if (r->mode & 0x20) {
int i;
uint8_t *p = buf;
cpu_physical_memory_write (addr - pos - len, buf, len);
/* What about 16bit transfers? */
for (i = 0; i < len; i++) {
uint8_t b = p[len - i - 1];
p[i] = b;
}
}
else
cpu_physical_memory_write (addr + pos, buf, len);
return len;
}
|
|
427
428
429
|
/* request the emulator to transfer a new DMA memory block ASAP */
void DMA_schedule(int nchan)
{
|
|
430
431
432
|
CPUState *env = cpu_single_env;
if (env)
cpu_interrupt(env, CPU_INTERRUPT_EXIT);
|
|
433
434
|
}
|
|
435
436
437
438
439
440
|
static void dma_reset(void *opaque)
{
struct dma_cont *d = opaque;
write_cont (d, (0x0d << d->dshift), 0);
}
|
|
441
|
/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
|
|
442
|
static void dma_init2(struct dma_cont *d, int base, int dshift,
|
|
443
|
int page_base, int pageh_base)
|
|
444
|
{
|
|
445
|
const static int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 };
|
|
446
447
|
int i;
|
|
448
|
d->dshift = dshift;
|
|
449
|
for (i = 0; i < 8; i++) {
|
|
450
451
|
register_ioport_write (base + (i << dshift), 1, 1, write_chan, d);
register_ioport_read (base + (i << dshift), 1, 1, read_chan, d);
|
|
452
453
|
}
for (i = 0; i < LENOFA (page_port_list); i++) {
|
|
454
|
register_ioport_write (page_base + page_port_list[i], 1, 1,
|
|
455
|
write_page, d);
|
|
456
|
register_ioport_read (page_base + page_port_list[i], 1, 1,
|
|
457
|
read_page, d);
|
|
458
|
if (pageh_base >= 0) {
|
|
459
|
register_ioport_write (pageh_base + page_port_list[i], 1, 1,
|
|
460
|
write_pageh, d);
|
|
461
|
register_ioport_read (pageh_base + page_port_list[i], 1, 1,
|
|
462
463
|
read_pageh, d);
}
|
|
464
465
|
}
for (i = 0; i < 8; i++) {
|
|
466
|
register_ioport_write (base + ((i + 8) << dshift), 1, 1,
|
|
467
|
write_cont, d);
|
|
468
|
register_ioport_read (base + ((i + 8) << dshift), 1, 1,
|
|
469
|
read_cont, d);
|
|
470
|
}
|
|
471
472
|
qemu_register_reset(dma_reset, d);
dma_reset(d);
|
|
473
|
}
|
|
474
|
|
|
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
|
static void dma_save (QEMUFile *f, void *opaque)
{
struct dma_cont *d = opaque;
int i;
/* qemu_put_8s (f, &d->status); */
qemu_put_8s (f, &d->command);
qemu_put_8s (f, &d->mask);
qemu_put_8s (f, &d->flip_flop);
qemu_put_be32s (f, &d->dshift);
for (i = 0; i < 4; ++i) {
struct dma_regs *r = &d->regs[i];
qemu_put_be32s (f, &r->now[0]);
qemu_put_be32s (f, &r->now[1]);
qemu_put_be16s (f, &r->base[0]);
qemu_put_be16s (f, &r->base[1]);
qemu_put_8s (f, &r->mode);
qemu_put_8s (f, &r->page);
qemu_put_8s (f, &r->pageh);
qemu_put_8s (f, &r->dack);
qemu_put_8s (f, &r->eop);
}
}
static int dma_load (QEMUFile *f, void *opaque, int version_id)
{
struct dma_cont *d = opaque;
int i;
if (version_id != 1)
return -EINVAL;
/* qemu_get_8s (f, &d->status); */
qemu_get_8s (f, &d->command);
qemu_get_8s (f, &d->mask);
qemu_get_8s (f, &d->flip_flop);
qemu_get_be32s (f, &d->dshift);
for (i = 0; i < 4; ++i) {
struct dma_regs *r = &d->regs[i];
qemu_get_be32s (f, &r->now[0]);
qemu_get_be32s (f, &r->now[1]);
qemu_get_be16s (f, &r->base[0]);
qemu_get_be16s (f, &r->base[1]);
qemu_get_8s (f, &r->mode);
qemu_get_8s (f, &r->page);
qemu_get_8s (f, &r->pageh);
qemu_get_8s (f, &r->dack);
qemu_get_8s (f, &r->eop);
}
return 0;
}
|
|
529
|
void DMA_init (int high_page_enable)
|
|
530
|
{
|
|
531
|
dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
|
|
532
533
534
|
high_page_enable ? 0x480 : -1);
dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
high_page_enable ? 0x488 : -1);
|
|
535
536
|
register_savevm ("dma", 0, 1, dma_save, dma_load, &dma_controllers[0]);
register_savevm ("dma", 1, 1, dma_save, dma_load, &dma_controllers[1]);
|
|
537
|
}
|