ths
authored
18 years ago
1
/*
2
* Arm PrimeCell PL080 / PL081 DMA controller
3
4
5
6
7
8
9
*
* Copyright ( c ) 2006 CodeSourcery .
* Written by Paul Brook
*
* This code is licenced under the GPL .
*/
10
# include "sysbus.h"
11
12
# define PL080_MAX_CHANNELS 8
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# define PL080_CONF_E 0x1
# define PL080_CONF_M1 0x2
# define PL080_CONF_M2 0x4
# define PL080_CCONF_H 0x40000
# define PL080_CCONF_A 0x20000
# define PL080_CCONF_L 0x10000
# define PL080_CCONF_ITC 0x08000
# define PL080_CCONF_IE 0x04000
# define PL080_CCONF_E 0x00001
# define PL080_CCTRL_I 0x80000000
# define PL080_CCTRL_DI 0x08000000
# define PL080_CCTRL_SI 0x04000000
# define PL080_CCTRL_D 0x02000000
# define PL080_CCTRL_S 0x01000000
typedef struct {
uint32_t src ;
uint32_t dest ;
uint32_t lli ;
uint32_t ctrl ;
uint32_t conf ;
} pl080_channel ;
typedef struct {
39
SysBusDevice busdev ;
40
41
42
43
44
45
46
47
uint8_t tc_int ;
uint8_t tc_mask ;
uint8_t err_int ;
uint8_t err_mask ;
uint32_t conf ;
uint32_t sync ;
uint32_t req_single ;
uint32_t req_burst ;
48
49
pl080_channel chan [ PL080_MAX_CHANNELS ];
int nchannels ;
50
51
/* Flag to avoid recursive DMA invocations. */
int running ;
52
qemu_irq irq ;
53
54
55
56
57
} pl080_state ;
static const unsigned char pl080_id [] =
{ 0x80 , 0x10 , 0x04 , 0x0a , 0x0d , 0xf0 , 0x05 , 0xb1 };
58
59
60
static const unsigned char pl081_id [] =
{ 0x81 , 0x10 , 0x04 , 0x0a , 0x0d , 0xf0 , 0x05 , 0xb1 };
61
62
63
64
static void pl080_update ( pl080_state * s )
{
if (( s -> tc_int & s -> tc_mask )
|| ( s -> err_int & s -> err_mask ))
65
qemu_irq_raise ( s -> irq );
66
else
67
qemu_irq_lower ( s -> irq );
68
69
70
71
72
73
74
75
76
77
78
79
80
81
}
static void pl080_run ( pl080_state * s )
{
int c ;
int flow ;
pl080_channel * ch ;
int swidth ;
int dwidth ;
int xsize ;
int n ;
int src_id ;
int dest_id ;
int size ;
82
uint8_t buff [ 4 ];
83
84
85
uint32_t req ;
s -> tc_mask = 0 ;
86
for ( c = 0 ; c < s -> nchannels ; c ++ ) {
87
88
89
90
91
92
93
94
95
if ( s -> chan [ c ]. conf & PL080_CCONF_ITC )
s -> tc_mask |= 1 << c ;
if ( s -> chan [ c ]. conf & PL080_CCONF_IE )
s -> err_mask |= 1 << c ;
}
if (( s -> conf & PL080_CONF_E ) == 0 )
return ;
96
hw_error ( "DMA active \n " );
97
98
99
100
101
102
103
104
/* If we are already in the middle of a DMA operation then indicate that
there may be new DMA requests and return immediately . */
if ( s -> running ) {
s -> running ++ ;
return ;
}
s -> running = 1 ;
while ( s -> running ) {
105
for ( c = 0 ; c < s -> nchannels ; c ++ ) {
106
107
108
109
110
111
112
113
ch = & s -> chan [ c ];
again :
/* Test if thiws channel has any pending DMA requests. */
if (( ch -> conf & ( PL080_CCONF_H | PL080_CCONF_E ))
!= PL080_CCONF_E )
continue ;
flow = ( ch -> conf >> 11 ) & 7 ;
if ( flow >= 4 ) {
114
hw_error (
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
"pl080_run: Peripheral flow control not implemented \n " );
}
src_id = ( ch -> conf >> 1 ) & 0x1f ;
dest_id = ( ch -> conf >> 6 ) & 0x1f ;
size = ch -> ctrl & 0xfff ;
req = s -> req_single | s -> req_burst ;
switch ( flow ) {
case 0 :
break ;
case 1 :
if (( req & ( 1u << dest_id )) == 0 )
size = 0 ;
break ;
case 2 :
if (( req & ( 1u << src_id )) == 0 )
size = 0 ;
break ;
case 3 :
if (( req & ( 1u << src_id )) == 0
|| ( req & ( 1u << dest_id )) == 0 )
size = 0 ;
break ;
}
if ( ! size )
continue ;
/* Transfer one element. */
/* ??? Should transfer multiple elements for a burst request. */
/* ??? Unclear what the proper behavior is when source and
destination widths are different . */
swidth = 1 << (( ch -> ctrl >> 18 ) & 7 );
dwidth = 1 << (( ch -> ctrl >> 21 ) & 7 );
for ( n = 0 ; n < dwidth ; n += swidth ) {
cpu_physical_memory_read ( ch -> src , buff + n , swidth );
if ( ch -> ctrl & PL080_CCTRL_SI )
ch -> src += swidth ;
}
xsize = ( dwidth < swidth ) ? swidth : dwidth ;
/* ??? This may pad the value incorrectly for dwidth < 32. */
for ( n = 0 ; n < xsize ; n += dwidth ) {
cpu_physical_memory_write ( ch -> dest + n , buff + n , dwidth );
if ( ch -> ctrl & PL080_CCTRL_DI )
ch -> dest += swidth ;
}
size -- ;
ch -> ctrl = ( ch -> ctrl & 0xfffff000 ) | size ;
if ( size == 0 ) {
/* Transfer complete. */
if ( ch -> lli ) {
ch -> src = ldl_phys ( ch -> lli );
ch -> dest = ldl_phys ( ch -> lli + 4 );
ch -> ctrl = ldl_phys ( ch -> lli + 12 );
ch -> lli = ldl_phys ( ch -> lli + 8 );
} else {
ch -> conf &= ~ PL080_CCONF_E ;
}
if ( ch -> ctrl & PL080_CCTRL_I ) {
s -> tc_int |= 1 << c ;
}
}
goto again ;
}
if ( -- s -> running )
s -> running = 1 ;
}
}
static uint32_t pl080_read ( void * opaque , target_phys_addr_t offset )
{
pl080_state * s = ( pl080_state * ) opaque ;
uint32_t i ;
uint32_t mask ;
if ( offset >= 0xfe0 && offset < 0x1000 ) {
190
191
192
193
194
if ( s -> nchannels == 8 ) {
return pl080_id [( offset - 0xfe0 ) >> 2 ];
} else {
return pl081_id [( offset - 0xfe0 ) >> 2 ];
}
195
196
197
}
if ( offset >= 0x100 && offset < 0x200 ) {
i = ( offset & 0xe0 ) >> 5 ;
198
199
if ( i >= s -> nchannels )
goto bad_offset ;
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
switch ( offset >> 2 ) {
case 0 : /* SrcAddr */
return s -> chan [ i ]. src ;
case 1 : /* DestAddr */
return s -> chan [ i ]. dest ;
case 2 : /* LLI */
return s -> chan [ i ]. lli ;
case 3 : /* Control */
return s -> chan [ i ]. ctrl ;
case 4 : /* Configuration */
return s -> chan [ i ]. conf ;
default :
goto bad_offset ;
}
}
switch ( offset >> 2 ) {
case 0 : /* IntStatus */
return ( s -> tc_int & s -> tc_mask ) | ( s -> err_int & s -> err_mask );
case 1 : /* IntTCStatus */
return ( s -> tc_int & s -> tc_mask );
case 3 : /* IntErrorStatus */
return ( s -> err_int & s -> err_mask );
case 5 : /* RawIntTCStatus */
return s -> tc_int ;
case 6 : /* RawIntErrorStatus */
return s -> err_int ;
case 7 : /* EnbldChns */
mask = 0 ;
228
for ( i = 0 ; i < s -> nchannels ; i ++ ) {
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
if ( s -> chan [ i ]. conf & PL080_CCONF_E )
mask |= 1 << i ;
}
return mask ;
case 8 : /* SoftBReq */
case 9 : /* SoftSReq */
case 10 : /* SoftLBReq */
case 11 : /* SoftLSReq */
/* ??? Implement these. */
return 0 ;
case 12 : /* Configuration */
return s -> conf ;
case 13 : /* Sync */
return s -> sync ;
default :
bad_offset :
245
hw_error ( "pl080_read: Bad offset %x \n " , ( int ) offset );
246
247
248
249
250
251
252
253
254
255
256
257
return 0 ;
}
}
static void pl080_write ( void * opaque , target_phys_addr_t offset ,
uint32_t value )
{
pl080_state * s = ( pl080_state * ) opaque ;
int i ;
if ( offset >= 0x100 && offset < 0x200 ) {
i = ( offset & 0xe0 ) >> 5 ;
258
259
if ( i >= s -> nchannels )
goto bad_offset ;
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
switch ( offset >> 2 ) {
case 0 : /* SrcAddr */
s -> chan [ i ]. src = value ;
break ;
case 1 : /* DestAddr */
s -> chan [ i ]. dest = value ;
break ;
case 2 : /* LLI */
s -> chan [ i ]. lli = value ;
break ;
case 3 : /* Control */
s -> chan [ i ]. ctrl = value ;
break ;
case 4 : /* Configuration */
s -> chan [ i ]. conf = value ;
pl080_run ( s );
break ;
}
}
switch ( offset >> 2 ) {
case 2 : /* IntTCClear */
s -> tc_int &= ~ value ;
break ;
case 4 : /* IntErrorClear */
s -> err_int &= ~ value ;
break ;
case 8 : /* SoftBReq */
case 9 : /* SoftSReq */
case 10 : /* SoftLBReq */
case 11 : /* SoftLSReq */
/* ??? Implement these. */
291
hw_error ( "pl080_write: Soft DMA not implemented \n " );
292
293
294
295
break ;
case 12 : /* Configuration */
s -> conf = value ;
if ( s -> conf & ( PL080_CONF_M1 | PL080_CONF_M1 )) {
296
hw_error ( "pl080_write: Big-endian DMA not implemented \n " );
297
298
299
300
301
302
303
}
pl080_run ( s );
break ;
case 13 : /* Sync */
s -> sync = value ;
break ;
default :
304
bad_offset :
305
hw_error ( "pl080_write: Bad offset %x \n " , ( int ) offset );
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
}
pl080_update ( s );
}
static CPUReadMemoryFunc * pl080_readfn [] = {
pl080_read ,
pl080_read ,
pl080_read
};
static CPUWriteMemoryFunc * pl080_writefn [] = {
pl080_write ,
pl080_write ,
pl080_write
};
322
static void pl08x_init ( SysBusDevice * dev , int nchannels )
323
324
{
int iomemtype ;
325
pl080_state * s = FROM_SYSBUS ( pl080_state , dev );
326
327
328
iomemtype = cpu_register_io_memory ( 0 , pl080_readfn ,
pl080_writefn , s );
329
330
sysbus_init_mmio ( dev , 0x1000 , iomemtype );
sysbus_init_irq ( dev , & s -> irq );
331
s -> nchannels = nchannels ;
332
333
/* ??? Save/restore. */
}
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
static void pl080_init ( SysBusDevice * dev )
{
pl08x_init ( dev , 8 );
}
static void pl081_init ( SysBusDevice * dev )
{
pl08x_init ( dev , 2 );
}
/* The PL080 and PL081 are the same except for the number of channels
they implement ( 8 and 2 respectively ). */
static void pl080_register_devices ( void )
{
sysbus_register_dev ( "pl080" , sizeof ( pl080_state ), pl080_init );
sysbus_register_dev ( "pl081" , sizeof ( pl080_state ), pl081_init );
}
device_init ( pl080_register_devices )