Commit 1ba13a5dfc555dae3173f0da0bf6ef6ab042215d
1 parent
eb173de6
Add support for parts of the etraxfs dma controller.
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4428 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
2 changed files
with
718 additions
and
0 deletions
hw/etraxfs_dma.c
0 → 100644
1 | +/* | ||
2 | + * QEMU ETRAX DMA Controller. | ||
3 | + * | ||
4 | + * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB. | ||
5 | + * | ||
6 | + * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
7 | + * of this software and associated documentation files (the "Software"), to deal | ||
8 | + * in the Software without restriction, including without limitation the rights | ||
9 | + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
10 | + * copies of the Software, and to permit persons to whom the Software is | ||
11 | + * furnished to do so, subject to the following conditions: | ||
12 | + * | ||
13 | + * The above copyright notice and this permission notice shall be included in | ||
14 | + * all copies or substantial portions of the Software. | ||
15 | + * | ||
16 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
21 | + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
22 | + * THE SOFTWARE. | ||
23 | + */ | ||
24 | +#include <stdio.h> | ||
25 | +#include <sys/time.h> | ||
26 | +#include "hw.h" | ||
27 | + | ||
28 | +#include "etraxfs_dma.h" | ||
29 | + | ||
30 | +#define D(x) | ||
31 | + | ||
32 | +#define RW_DATA 0x0 | ||
33 | +#define RW_SAVED_DATA 0x58 | ||
34 | +#define RW_SAVED_DATA_BUF 0x5c | ||
35 | +#define RW_GROUP 0x60 | ||
36 | +#define RW_GROUP_DOWN 0x7c | ||
37 | +#define RW_CMD 0x80 | ||
38 | +#define RW_CFG 0x84 | ||
39 | +#define RW_STAT 0x88 | ||
40 | +#define RW_INTR_MASK 0x8c | ||
41 | +#define RW_ACK_INTR 0x90 | ||
42 | +#define R_INTR 0x94 | ||
43 | +#define R_MASKED_INTR 0x98 | ||
44 | +#define RW_STREAM_CMD 0x9c | ||
45 | + | ||
46 | +#define DMA_REG_MAX 0x100 | ||
47 | + | ||
48 | +/* descriptors */ | ||
49 | + | ||
50 | +// ------------------------------------------------------------ dma_descr_group | ||
51 | +typedef struct dma_descr_group { | ||
52 | + struct dma_descr_group *next; | ||
53 | + unsigned eol : 1; | ||
54 | + unsigned tol : 1; | ||
55 | + unsigned bol : 1; | ||
56 | + unsigned : 1; | ||
57 | + unsigned intr : 1; | ||
58 | + unsigned : 2; | ||
59 | + unsigned en : 1; | ||
60 | + unsigned : 7; | ||
61 | + unsigned dis : 1; | ||
62 | + unsigned md : 16; | ||
63 | + struct dma_descr_group *up; | ||
64 | + union { | ||
65 | + struct dma_descr_context *context; | ||
66 | + struct dma_descr_group *group; | ||
67 | + } down; | ||
68 | +} dma_descr_group; | ||
69 | + | ||
70 | +// ---------------------------------------------------------- dma_descr_context | ||
71 | +typedef struct dma_descr_context { | ||
72 | + struct dma_descr_context *next; | ||
73 | + unsigned eol : 1; | ||
74 | + unsigned : 3; | ||
75 | + unsigned intr : 1; | ||
76 | + unsigned : 1; | ||
77 | + unsigned store_mode : 1; | ||
78 | + unsigned en : 1; | ||
79 | + unsigned : 7; | ||
80 | + unsigned dis : 1; | ||
81 | + unsigned md0 : 16; | ||
82 | + unsigned md1; | ||
83 | + unsigned md2; | ||
84 | + unsigned md3; | ||
85 | + unsigned md4; | ||
86 | + struct dma_descr_data *saved_data; | ||
87 | + char *saved_data_buf; | ||
88 | +} dma_descr_context; | ||
89 | + | ||
90 | +// ------------------------------------------------------------- dma_descr_data | ||
91 | +typedef struct dma_descr_data { | ||
92 | + struct dma_descr_data *next; | ||
93 | + char *buf; | ||
94 | + unsigned eol : 1; | ||
95 | + unsigned : 2; | ||
96 | + unsigned out_eop : 1; | ||
97 | + unsigned intr : 1; | ||
98 | + unsigned wait : 1; | ||
99 | + unsigned : 2; | ||
100 | + unsigned : 3; | ||
101 | + unsigned in_eop : 1; | ||
102 | + unsigned : 4; | ||
103 | + unsigned md : 16; | ||
104 | + char *after; | ||
105 | +} dma_descr_data; | ||
106 | + | ||
107 | +/* Constants */ | ||
108 | +enum { | ||
109 | + regk_dma_ack_pkt = 0x00000100, | ||
110 | + regk_dma_anytime = 0x00000001, | ||
111 | + regk_dma_array = 0x00000008, | ||
112 | + regk_dma_burst = 0x00000020, | ||
113 | + regk_dma_client = 0x00000002, | ||
114 | + regk_dma_copy_next = 0x00000010, | ||
115 | + regk_dma_copy_up = 0x00000020, | ||
116 | + regk_dma_data_at_eol = 0x00000001, | ||
117 | + regk_dma_dis_c = 0x00000010, | ||
118 | + regk_dma_dis_g = 0x00000020, | ||
119 | + regk_dma_idle = 0x00000001, | ||
120 | + regk_dma_intern = 0x00000004, | ||
121 | + regk_dma_load_c = 0x00000200, | ||
122 | + regk_dma_load_c_n = 0x00000280, | ||
123 | + regk_dma_load_c_next = 0x00000240, | ||
124 | + regk_dma_load_d = 0x00000140, | ||
125 | + regk_dma_load_g = 0x00000300, | ||
126 | + regk_dma_load_g_down = 0x000003c0, | ||
127 | + regk_dma_load_g_next = 0x00000340, | ||
128 | + regk_dma_load_g_up = 0x00000380, | ||
129 | + regk_dma_next_en = 0x00000010, | ||
130 | + regk_dma_next_pkt = 0x00000010, | ||
131 | + regk_dma_no = 0x00000000, | ||
132 | + regk_dma_only_at_wait = 0x00000000, | ||
133 | + regk_dma_restore = 0x00000020, | ||
134 | + regk_dma_rst = 0x00000001, | ||
135 | + regk_dma_running = 0x00000004, | ||
136 | + regk_dma_rw_cfg_default = 0x00000000, | ||
137 | + regk_dma_rw_cmd_default = 0x00000000, | ||
138 | + regk_dma_rw_intr_mask_default = 0x00000000, | ||
139 | + regk_dma_rw_stat_default = 0x00000101, | ||
140 | + regk_dma_rw_stream_cmd_default = 0x00000000, | ||
141 | + regk_dma_save_down = 0x00000020, | ||
142 | + regk_dma_save_up = 0x00000020, | ||
143 | + regk_dma_set_reg = 0x00000050, | ||
144 | + regk_dma_set_w_size1 = 0x00000190, | ||
145 | + regk_dma_set_w_size2 = 0x000001a0, | ||
146 | + regk_dma_set_w_size4 = 0x000001c0, | ||
147 | + regk_dma_stopped = 0x00000002, | ||
148 | + regk_dma_store_c = 0x00000002, | ||
149 | + regk_dma_store_descr = 0x00000000, | ||
150 | + regk_dma_store_g = 0x00000004, | ||
151 | + regk_dma_store_md = 0x00000001, | ||
152 | + regk_dma_sw = 0x00000008, | ||
153 | + regk_dma_update_down = 0x00000020, | ||
154 | + regk_dma_yes = 0x00000001 | ||
155 | +}; | ||
156 | + | ||
157 | +enum dma_ch_state | ||
158 | +{ | ||
159 | + RST = 0, | ||
160 | + STOPPED = 2, | ||
161 | + RUNNING = 4 | ||
162 | +}; | ||
163 | + | ||
164 | +struct fs_dma_channel | ||
165 | +{ | ||
166 | + int regmap; | ||
167 | + qemu_irq *irq; | ||
168 | + struct etraxfs_dma_client *client; | ||
169 | + | ||
170 | + | ||
171 | + /* Internal status. */ | ||
172 | + int stream_cmd_src; | ||
173 | + enum dma_ch_state state; | ||
174 | + | ||
175 | + unsigned int input : 1; | ||
176 | + unsigned int eol : 1; | ||
177 | + | ||
178 | + struct dma_descr_group current_g; | ||
179 | + struct dma_descr_context current_c; | ||
180 | + struct dma_descr_data current_d; | ||
181 | + | ||
182 | + /* Controll registers. */ | ||
183 | + uint32_t regs[DMA_REG_MAX]; | ||
184 | +}; | ||
185 | + | ||
186 | +struct fs_dma_ctrl | ||
187 | +{ | ||
188 | + CPUState *env; | ||
189 | + target_phys_addr_t base; | ||
190 | + | ||
191 | + int nr_channels; | ||
192 | + struct fs_dma_channel *channels; | ||
193 | +}; | ||
194 | + | ||
195 | +static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg) | ||
196 | +{ | ||
197 | + return ctrl->channels[c].regs[reg]; | ||
198 | +} | ||
199 | + | ||
200 | +static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c) | ||
201 | +{ | ||
202 | + return channel_reg(ctrl, c, RW_CFG) & 2; | ||
203 | +} | ||
204 | + | ||
205 | +static inline int channel_en(struct fs_dma_ctrl *ctrl, int c) | ||
206 | +{ | ||
207 | + return (channel_reg(ctrl, c, RW_CFG) & 1) | ||
208 | + && ctrl->channels[c].client; | ||
209 | +} | ||
210 | + | ||
211 | +static inline int fs_channel(target_phys_addr_t base, target_phys_addr_t addr) | ||
212 | +{ | ||
213 | + /* Every channel has a 0x2000 ctrl register map. */ | ||
214 | + return (addr - base) >> 13; | ||
215 | +} | ||
216 | + | ||
217 | +static void channel_load_g(struct fs_dma_ctrl *ctrl, int c) | ||
218 | +{ | ||
219 | + target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP); | ||
220 | + | ||
221 | + /* Load and decode. FIXME: handle endianness. */ | ||
222 | + cpu_physical_memory_read (addr, | ||
223 | + (void *) &ctrl->channels[c].current_g, | ||
224 | + sizeof ctrl->channels[c].current_g); | ||
225 | +} | ||
226 | + | ||
227 | +static void dump_c(int ch, struct dma_descr_context *c) | ||
228 | +{ | ||
229 | + printf("%s ch=%d\n", __func__, ch); | ||
230 | + printf("next=%x\n", (uint32_t) c->next); | ||
231 | + printf("saved_data=%x\n", (uint32_t) c->saved_data); | ||
232 | + printf("saved_data_buf=%x\n", (uint32_t) c->saved_data_buf); | ||
233 | + printf("eol=%x\n", (uint32_t) c->eol); | ||
234 | +} | ||
235 | + | ||
236 | +static void dump_d(int ch, struct dma_descr_data *d) | ||
237 | +{ | ||
238 | + printf("%s ch=%d\n", __func__, ch); | ||
239 | + printf("next=%x\n", (uint32_t) d->next); | ||
240 | + printf("buf=%x\n", (uint32_t) d->buf); | ||
241 | + printf("after=%x\n", (uint32_t) d->after); | ||
242 | + printf("intr=%x\n", (uint32_t) d->intr); | ||
243 | + printf("out_eop=%x\n", (uint32_t) d->out_eop); | ||
244 | + printf("in_eop=%x\n", (uint32_t) d->in_eop); | ||
245 | + printf("eol=%x\n", (uint32_t) d->eol); | ||
246 | +} | ||
247 | + | ||
248 | +static void channel_load_c(struct fs_dma_ctrl *ctrl, int c) | ||
249 | +{ | ||
250 | + target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP_DOWN); | ||
251 | + | ||
252 | + /* Load and decode. FIXME: handle endianness. */ | ||
253 | + cpu_physical_memory_read (addr, | ||
254 | + (void *) &ctrl->channels[c].current_c, | ||
255 | + sizeof ctrl->channels[c].current_c); | ||
256 | + | ||
257 | + D(dump_c(c, &ctrl->channels[c].current_c)); | ||
258 | + /* I guess this should update the current pos. */ | ||
259 | + ctrl->channels[c].regs[RW_SAVED_DATA] = | ||
260 | + (uint32_t)ctrl->channels[c].current_c.saved_data; | ||
261 | + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = | ||
262 | + (uint32_t)ctrl->channels[c].current_c.saved_data_buf; | ||
263 | +} | ||
264 | + | ||
265 | +static void channel_load_d(struct fs_dma_ctrl *ctrl, int c) | ||
266 | +{ | ||
267 | + target_phys_addr_t addr = channel_reg(ctrl, c, RW_SAVED_DATA); | ||
268 | + | ||
269 | + /* Load and decode. FIXME: handle endianness. */ | ||
270 | + D(printf("%s addr=%x\n", __func__, addr)); | ||
271 | + cpu_physical_memory_read (addr, | ||
272 | + (void *) &ctrl->channels[c].current_d, | ||
273 | + sizeof ctrl->channels[c].current_d); | ||
274 | + | ||
275 | + D(dump_d(c, &ctrl->channels[c].current_d)); | ||
276 | + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = | ||
277 | + (uint32_t)ctrl->channels[c].current_d.buf; | ||
278 | +} | ||
279 | + | ||
280 | +static void channel_store_d(struct fs_dma_ctrl *ctrl, int c) | ||
281 | +{ | ||
282 | + target_phys_addr_t addr = channel_reg(ctrl, c, RW_SAVED_DATA); | ||
283 | + | ||
284 | + /* Load and decode. FIXME: handle endianness. */ | ||
285 | + D(printf("%s addr=%x\n", __func__, addr)); | ||
286 | + cpu_physical_memory_write (addr, | ||
287 | + (void *) &ctrl->channels[c].current_d, | ||
288 | + sizeof ctrl->channels[c].current_d); | ||
289 | +} | ||
290 | + | ||
291 | +static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c) | ||
292 | +{ | ||
293 | + /* FIXME: */ | ||
294 | +} | ||
295 | + | ||
296 | +static inline void channel_start(struct fs_dma_ctrl *ctrl, int c) | ||
297 | +{ | ||
298 | + if (ctrl->channels[c].client) | ||
299 | + { | ||
300 | + ctrl->channels[c].eol = 0; | ||
301 | + ctrl->channels[c].state = RUNNING; | ||
302 | + } else | ||
303 | + printf("WARNING: starting DMA ch %d with no client\n", c); | ||
304 | +} | ||
305 | + | ||
306 | +static void channel_continue(struct fs_dma_ctrl *ctrl, int c) | ||
307 | +{ | ||
308 | + if (!channel_en(ctrl, c) | ||
309 | + || channel_stopped(ctrl, c) | ||
310 | + || ctrl->channels[c].state != RUNNING | ||
311 | + /* Only reload the current data descriptor if it has eol set. */ | ||
312 | + || !ctrl->channels[c].current_d.eol) { | ||
313 | + D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n", | ||
314 | + c, ctrl->channels[c].state, | ||
315 | + channel_stopped(ctrl, c), | ||
316 | + channel_en(ctrl,c), | ||
317 | + ctrl->channels[c].eol)); | ||
318 | + D(dump_d(c, &ctrl->channels[c].current_d)); | ||
319 | + return; | ||
320 | + } | ||
321 | + | ||
322 | + /* Reload the current descriptor. */ | ||
323 | + channel_load_d(ctrl, c); | ||
324 | + | ||
325 | + /* If the current descriptor cleared the eol flag and we had already | ||
326 | + reached eol state, do the continue. */ | ||
327 | + if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) { | ||
328 | + D(printf("continue %d ok %x\n", c, | ||
329 | + ctrl->channels[c].current_d.next)); | ||
330 | + ctrl->channels[c].regs[RW_SAVED_DATA] = | ||
331 | + (uint32_t) ctrl->channels[c].current_d.next; | ||
332 | + channel_load_d(ctrl, c); | ||
333 | + channel_start(ctrl, c); | ||
334 | + } | ||
335 | +} | ||
336 | + | ||
337 | +static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v) | ||
338 | +{ | ||
339 | + unsigned int cmd = v & ((1 << 10) - 1); | ||
340 | + | ||
341 | + D(printf("%s cmd=%x\n", __func__, cmd)); | ||
342 | + if (cmd & regk_dma_load_d) { | ||
343 | + channel_load_d(ctrl, c); | ||
344 | + if (cmd & regk_dma_burst) | ||
345 | + channel_start(ctrl, c); | ||
346 | + } | ||
347 | + | ||
348 | + if (cmd & regk_dma_load_c) { | ||
349 | + channel_load_c(ctrl, c); | ||
350 | + } | ||
351 | +} | ||
352 | + | ||
353 | +static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c) | ||
354 | +{ | ||
355 | + D(printf("%s %d\n", __func__, c)); | ||
356 | + ctrl->channels[c].regs[R_INTR] &= | ||
357 | + ~(ctrl->channels[c].regs[RW_ACK_INTR]); | ||
358 | + | ||
359 | + ctrl->channels[c].regs[R_MASKED_INTR] = | ||
360 | + ctrl->channels[c].regs[R_INTR] | ||
361 | + & ctrl->channels[c].regs[RW_INTR_MASK]; | ||
362 | + | ||
363 | + D(printf("%s: chan=%d masked_intr=%x\n", __func__, | ||
364 | + c, | ||
365 | + ctrl->channels[c].regs[R_MASKED_INTR])); | ||
366 | + | ||
367 | + if (ctrl->channels[c].regs[R_MASKED_INTR]) | ||
368 | + qemu_irq_raise(ctrl->channels[c].irq[0]); | ||
369 | + else | ||
370 | + qemu_irq_lower(ctrl->channels[c].irq[0]); | ||
371 | +} | ||
372 | + | ||
373 | +static void channel_out_run(struct fs_dma_ctrl *ctrl, int c) | ||
374 | +{ | ||
375 | + uint32_t len; | ||
376 | + uint32_t saved_data_buf; | ||
377 | + unsigned char buf[2 * 1024]; | ||
378 | + | ||
379 | + if (ctrl->channels[c].eol == 1) | ||
380 | + return; | ||
381 | + | ||
382 | + saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); | ||
383 | + | ||
384 | + D(printf("buf=%x after=%x saved_data_buf=%x\n", | ||
385 | + (uint32_t)ctrl->channels[c].current_d.buf, | ||
386 | + (uint32_t)ctrl->channels[c].current_d.after, | ||
387 | + saved_data_buf)); | ||
388 | + | ||
389 | + if (saved_data_buf == (uint32_t)ctrl->channels[c].current_d.after) { | ||
390 | + /* Done. Step to next. */ | ||
391 | + if (ctrl->channels[c].current_d.out_eop) { | ||
392 | + /* TODO: signal eop to the client. */ | ||
393 | + D(printf("signal eop\n")); | ||
394 | + } | ||
395 | + if (ctrl->channels[c].current_d.intr) { | ||
396 | + /* TODO: signal eop to the client. */ | ||
397 | + /* data intr. */ | ||
398 | + D(printf("signal intr\n")); | ||
399 | + ctrl->channels[c].regs[R_INTR] |= (1 << 2); | ||
400 | + channel_update_irq(ctrl, c); | ||
401 | + } | ||
402 | + if (ctrl->channels[c].current_d.eol) { | ||
403 | + D(printf("channel %d EOL\n", c)); | ||
404 | + ctrl->channels[c].eol = 1; | ||
405 | + channel_stop(ctrl, c); | ||
406 | + } else { | ||
407 | + ctrl->channels[c].regs[RW_SAVED_DATA] = | ||
408 | + (uint32_t) ctrl->channels[c].current_d.next; | ||
409 | + /* Load new descriptor. */ | ||
410 | + channel_load_d(ctrl, c); | ||
411 | + } | ||
412 | + | ||
413 | + channel_store_d(ctrl, c); | ||
414 | + D(dump_d(c, &ctrl->channels[c].current_d)); | ||
415 | + return; | ||
416 | + } | ||
417 | + | ||
418 | + len = (uint32_t) ctrl->channels[c].current_d.after; | ||
419 | + len -= saved_data_buf; | ||
420 | + | ||
421 | + if (len > sizeof buf) | ||
422 | + len = sizeof buf; | ||
423 | + cpu_physical_memory_read (saved_data_buf, buf, len); | ||
424 | + | ||
425 | + D(printf("channel %d pushes %x %u bytes\n", c, | ||
426 | + saved_data_buf, len)); | ||
427 | + /* TODO: Push content. */ | ||
428 | + if (ctrl->channels[c].client->client.push) | ||
429 | + ctrl->channels[c].client->client.push( | ||
430 | + ctrl->channels[c].client->client.opaque, buf, len); | ||
431 | + else | ||
432 | + printf("WARNING: DMA ch%d dataloss, no attached client.\n", c); | ||
433 | + | ||
434 | + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] += len; | ||
435 | +} | ||
436 | + | ||
437 | +static int channel_in_process(struct fs_dma_ctrl *ctrl, int c, | ||
438 | + unsigned char *buf, int buflen, int eop) | ||
439 | +{ | ||
440 | + uint32_t len; | ||
441 | + uint32_t saved_data_buf; | ||
442 | + | ||
443 | + if (ctrl->channels[c].eol == 1) | ||
444 | + return 0; | ||
445 | + | ||
446 | + saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); | ||
447 | + len = (uint32_t) ctrl->channels[c].current_d.after; | ||
448 | + len -= saved_data_buf; | ||
449 | + | ||
450 | + if (len > buflen) | ||
451 | + len = buflen; | ||
452 | + | ||
453 | + cpu_physical_memory_write (saved_data_buf, buf, len); | ||
454 | + saved_data_buf += len; | ||
455 | + | ||
456 | + if (saved_data_buf == (uint32_t)ctrl->channels[c].current_d.after | ||
457 | + || eop) { | ||
458 | + uint32_t r_intr = ctrl->channels[c].regs[R_INTR]; | ||
459 | + | ||
460 | + D(printf("in dscr end len=%d\n", | ||
461 | + ctrl->channels[c].current_d.after | ||
462 | + - ctrl->channels[c].current_d.buf)); | ||
463 | + ctrl->channels[c].current_d.after = | ||
464 | + (void *) saved_data_buf; | ||
465 | + | ||
466 | + /* Done. Step to next. */ | ||
467 | + if (ctrl->channels[c].current_d.intr) { | ||
468 | + /* TODO: signal eop to the client. */ | ||
469 | + /* data intr. */ | ||
470 | + ctrl->channels[c].regs[R_INTR] |= 3; | ||
471 | + } | ||
472 | + if (eop) { | ||
473 | + ctrl->channels[c].current_d.in_eop = 1; | ||
474 | + ctrl->channels[c].regs[R_INTR] |= 8; | ||
475 | + } | ||
476 | + if (r_intr != ctrl->channels[c].regs[R_INTR]) | ||
477 | + channel_update_irq(ctrl, c); | ||
478 | + | ||
479 | + channel_store_d(ctrl, c); | ||
480 | + D(dump_d(c, &ctrl->channels[c].current_d)); | ||
481 | + | ||
482 | + if (ctrl->channels[c].current_d.eol) { | ||
483 | + D(printf("channel %d EOL\n", c)); | ||
484 | + ctrl->channels[c].eol = 1; | ||
485 | + channel_stop(ctrl, c); | ||
486 | + } else { | ||
487 | + ctrl->channels[c].regs[RW_SAVED_DATA] = | ||
488 | + (uint32_t) ctrl->channels[c].current_d.next; | ||
489 | + /* Load new descriptor. */ | ||
490 | + channel_load_d(ctrl, c); | ||
491 | + saved_data_buf = | ||
492 | + ctrl->channels[c].regs[RW_SAVED_DATA_BUF]; | ||
493 | + } | ||
494 | + } | ||
495 | + | ||
496 | + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; | ||
497 | + return len; | ||
498 | +} | ||
499 | + | ||
500 | +static inline void channel_in_run(struct fs_dma_ctrl *ctrl, int c) | ||
501 | +{ | ||
502 | + if (ctrl->channels[c].client->client.pull) | ||
503 | + ctrl->channels[c].client->client.pull( | ||
504 | + ctrl->channels[c].client->client.opaque); | ||
505 | +} | ||
506 | + | ||
507 | +static uint32_t dma_rinvalid (void *opaque, target_phys_addr_t addr) | ||
508 | +{ | ||
509 | + struct fs_dma_ctrl *ctrl = opaque; | ||
510 | + CPUState *env = ctrl->env; | ||
511 | + cpu_abort(env, "Unsupported short access. reg=%x pc=%x.\n", | ||
512 | + addr, env->pc); | ||
513 | + return 0; | ||
514 | +} | ||
515 | + | ||
516 | +static uint32_t | ||
517 | +dma_readl (void *opaque, target_phys_addr_t addr) | ||
518 | +{ | ||
519 | + struct fs_dma_ctrl *ctrl = opaque; | ||
520 | + int c; | ||
521 | + uint32_t r = 0; | ||
522 | + | ||
523 | + /* Make addr relative to this instances base. */ | ||
524 | + c = fs_channel(ctrl->base, addr); | ||
525 | + addr &= 0x1fff; | ||
526 | + switch (addr) | ||
527 | + { | ||
528 | + case RW_STAT: | ||
529 | + r = ctrl->channels[c].state & 7; | ||
530 | + r |= ctrl->channels[c].eol << 5; | ||
531 | + r |= ctrl->channels[c].stream_cmd_src << 8; | ||
532 | + break; | ||
533 | + | ||
534 | + default: | ||
535 | + r = ctrl->channels[c].regs[addr]; | ||
536 | + D(printf ("%s c=%d addr=%x pc=%x\n", | ||
537 | + __func__, c, addr, env->pc)); | ||
538 | + break; | ||
539 | + } | ||
540 | + return r; | ||
541 | +} | ||
542 | + | ||
543 | +static void | ||
544 | +dma_winvalid (void *opaque, target_phys_addr_t addr, uint32_t value) | ||
545 | +{ | ||
546 | + struct fs_dma_ctrl *ctrl = opaque; | ||
547 | + CPUState *env = ctrl->env; | ||
548 | + cpu_abort(env, "Unsupported short access. reg=%x pc=%x.\n", | ||
549 | + addr, env->pc); | ||
550 | +} | ||
551 | + | ||
552 | +static void | ||
553 | +dma_writel (void *opaque, target_phys_addr_t addr, uint32_t value) | ||
554 | +{ | ||
555 | + struct fs_dma_ctrl *ctrl = opaque; | ||
556 | + int c; | ||
557 | + | ||
558 | + /* Make addr relative to this instances base. */ | ||
559 | + c = fs_channel(ctrl->base, addr); | ||
560 | + addr &= 0x1fff; | ||
561 | + switch (addr) | ||
562 | + { | ||
563 | + case RW_DATA: | ||
564 | + printf("RW_DATA=%x\n", value); | ||
565 | + break; | ||
566 | + | ||
567 | + case RW_CFG: | ||
568 | + ctrl->channels[c].regs[addr] = value; | ||
569 | + break; | ||
570 | + case RW_CMD: | ||
571 | + /* continue. */ | ||
572 | + ctrl->channels[c].regs[addr] = value; | ||
573 | + channel_continue(ctrl, c); | ||
574 | + break; | ||
575 | + | ||
576 | + case RW_SAVED_DATA: | ||
577 | + case RW_SAVED_DATA_BUF: | ||
578 | + case RW_GROUP: | ||
579 | + case RW_GROUP_DOWN: | ||
580 | + ctrl->channels[c].regs[addr] = value; | ||
581 | + break; | ||
582 | + | ||
583 | + case RW_ACK_INTR: | ||
584 | + case RW_INTR_MASK: | ||
585 | + ctrl->channels[c].regs[addr] = value; | ||
586 | + channel_update_irq(ctrl, c); | ||
587 | + if (addr == RW_ACK_INTR) | ||
588 | + ctrl->channels[c].regs[RW_ACK_INTR] = 0; | ||
589 | + break; | ||
590 | + | ||
591 | + case RW_STREAM_CMD: | ||
592 | + ctrl->channels[c].regs[addr] = value; | ||
593 | + channel_stream_cmd(ctrl, c, value); | ||
594 | + break; | ||
595 | + | ||
596 | + default: | ||
597 | + D(printf ("%s c=%d %x %x pc=%x\n", | ||
598 | + __func__, c, addr, value, env->pc)); | ||
599 | + break; | ||
600 | + } | ||
601 | +} | ||
602 | + | ||
603 | +static CPUReadMemoryFunc *dma_read[] = { | ||
604 | + &dma_rinvalid, | ||
605 | + &dma_rinvalid, | ||
606 | + &dma_readl, | ||
607 | +}; | ||
608 | + | ||
609 | +static CPUWriteMemoryFunc *dma_write[] = { | ||
610 | + &dma_winvalid, | ||
611 | + &dma_winvalid, | ||
612 | + &dma_writel, | ||
613 | +}; | ||
614 | + | ||
615 | +void etraxfs_dmac_run(void *opaque) | ||
616 | +{ | ||
617 | + struct fs_dma_ctrl *ctrl = opaque; | ||
618 | + int i; | ||
619 | + int p = 0; | ||
620 | + | ||
621 | + for (i = 0; | ||
622 | + i < ctrl->nr_channels; | ||
623 | + i++) | ||
624 | + { | ||
625 | + if (ctrl->channels[i].state == RUNNING) | ||
626 | + { | ||
627 | + p++; | ||
628 | + if (ctrl->channels[i].input) | ||
629 | + channel_in_run(ctrl, i); | ||
630 | + else | ||
631 | + channel_out_run(ctrl, i); | ||
632 | + } | ||
633 | + } | ||
634 | +} | ||
635 | + | ||
636 | +int etraxfs_dmac_input(struct etraxfs_dma_client *client, | ||
637 | + void *buf, int len, int eop) | ||
638 | +{ | ||
639 | + return channel_in_process(client->ctrl, client->channel, | ||
640 | + buf, len, eop); | ||
641 | +} | ||
642 | + | ||
643 | +/* Connect an IRQ line with a channel. */ | ||
644 | +void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input) | ||
645 | +{ | ||
646 | + struct fs_dma_ctrl *ctrl = opaque; | ||
647 | + ctrl->channels[c].irq = line; | ||
648 | + ctrl->channels[c].input = input; | ||
649 | +} | ||
650 | + | ||
651 | +void etraxfs_dmac_connect_client(void *opaque, int c, | ||
652 | + struct etraxfs_dma_client *cl) | ||
653 | +{ | ||
654 | + struct fs_dma_ctrl *ctrl = opaque; | ||
655 | + cl->ctrl = ctrl; | ||
656 | + cl->channel = c; | ||
657 | + ctrl->channels[c].client = cl; | ||
658 | +} | ||
659 | + | ||
660 | + | ||
661 | +void *etraxfs_dmac_init(CPUState *env, | ||
662 | + target_phys_addr_t base, int nr_channels) | ||
663 | +{ | ||
664 | + struct fs_dma_ctrl *ctrl = NULL; | ||
665 | + int i; | ||
666 | + | ||
667 | + ctrl = qemu_mallocz(sizeof *ctrl); | ||
668 | + if (!ctrl) | ||
669 | + return NULL; | ||
670 | + | ||
671 | + ctrl->base = base; | ||
672 | + ctrl->env = env; | ||
673 | + ctrl->nr_channels = nr_channels; | ||
674 | + ctrl->channels = qemu_mallocz(sizeof ctrl->channels[0] * nr_channels); | ||
675 | + if (!ctrl->channels) | ||
676 | + goto err; | ||
677 | + | ||
678 | + for (i = 0; i < nr_channels; i++) | ||
679 | + { | ||
680 | + ctrl->channels[i].regmap = cpu_register_io_memory(0, | ||
681 | + dma_read, | ||
682 | + dma_write, | ||
683 | + ctrl); | ||
684 | + cpu_register_physical_memory (base + i * 0x2000, | ||
685 | + sizeof ctrl->channels[i].regs, | ||
686 | + ctrl->channels[i].regmap); | ||
687 | + } | ||
688 | + | ||
689 | + return ctrl; | ||
690 | + err: | ||
691 | + qemu_free(ctrl->channels); | ||
692 | + qemu_free(ctrl); | ||
693 | + return NULL; | ||
694 | +} |
hw/etraxfs_dma.h
0 → 100644
1 | +struct etraxfs_dma_client | ||
2 | +{ | ||
3 | + /* DMA controller. */ | ||
4 | + int channel; | ||
5 | + void *ctrl; | ||
6 | + | ||
7 | + /* client. */ | ||
8 | + struct | ||
9 | + { | ||
10 | + int (*push)(void *opaque, unsigned char *buf, int len); | ||
11 | + void (*pull)(void *opaque); | ||
12 | + void *opaque; | ||
13 | + } client; | ||
14 | +}; | ||
15 | + | ||
16 | +void *etraxfs_dmac_init(CPUState *env, target_phys_addr_t base, | ||
17 | + int nr_channels); | ||
18 | +void etraxfs_dmac_connect(void *opaque, int channel, qemu_irq *line, | ||
19 | + int input); | ||
20 | +void etraxfs_dmac_connect_client(void *opaque, int c, | ||
21 | + struct etraxfs_dma_client *cl); | ||
22 | +void etraxfs_dmac_run(void *opaque); | ||
23 | +int etraxfs_dmac_input(struct etraxfs_dma_client *client, | ||
24 | + void *buf, int len, int eop); |