Commit afbb5194d430adc0f1f3a63ea627bc93e8d17c56

Authored by balrog
1 parent 51fec3cc

Handle on-chip DMA controllers in one place, convert OMAP DMA to use it.


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4920 c046a42c-6fe2-441c-8c8c-71466251a162
Makefile.target
... ... @@ -594,7 +594,7 @@ OBJS+= pxa2xx_lcd.o pxa2xx_mmci.o pxa2xx_pcmcia.o pxa2xx_keypad.o
594 594 OBJS+= pflash_cfi01.o gumstix.o
595 595 OBJS+= zaurus.o ide.o serial.o nand.o ecc.o spitz.o tosa.o tc6393xb.o
596 596 OBJS+= omap1.o omap_lcdc.o omap_dma.o omap_clk.o omap_mmc.o omap_i2c.o
597   -OBJS+= omap2.o omap_dss.o
  597 +OBJS+= omap2.o omap_dss.o soc_dma.o
598 598 OBJS+= palm.o tsc210x.o
599 599 OBJS+= nseries.o blizzard.o onenand.o vga.o cbus.o tusb6010.o usb-musb.o
600 600 OBJS+= tsc2005.o
... ...
hw/omap.h
... ... @@ -417,14 +417,14 @@ enum omap_dma_model {
417 417 omap_dma_4,
418 418 };
419 419  
420   -struct omap_dma_s;
421   -struct omap_dma_s *omap_dma_init(target_phys_addr_t base, qemu_irq *irqs,
  420 +struct soc_dma_s;
  421 +struct soc_dma_s *omap_dma_init(target_phys_addr_t base, qemu_irq *irqs,
422 422 qemu_irq lcd_irq, struct omap_mpu_state_s *mpu, omap_clk clk,
423 423 enum omap_dma_model model);
424   -struct omap_dma_s *omap_dma4_init(target_phys_addr_t base, qemu_irq *irqs,
  424 +struct soc_dma_s *omap_dma4_init(target_phys_addr_t base, qemu_irq *irqs,
425 425 struct omap_mpu_state_s *mpu, int fifo,
426 426 int chans, omap_clk iclk, omap_clk fclk);
427   -void omap_dma_reset(struct omap_dma_s *s);
  427 +void omap_dma_reset(struct soc_dma_s *s);
428 428  
429 429 struct dma_irq_map {
430 430 int ih;
... ... @@ -494,7 +494,7 @@ struct omap_dma_lcd_channel_s {
494 494 ram_addr_t phys_framebuffer[2];
495 495 qemu_irq irq;
496 496 struct omap_mpu_state_s *mpu;
497   -} *omap_dma_get_lcdch(struct omap_dma_s *s);
  497 +} *omap_dma_get_lcdch(struct soc_dma_s *s);
498 498  
499 499 /*
500 500 * DMA request numbers for OMAP1
... ... @@ -882,7 +882,7 @@ struct omap_mpu_state_s {
882 882 /* MPU private TIPB peripherals */
883 883 struct omap_intr_handler_s *ih[2];
884 884  
885   - struct omap_dma_s *dma;
  885 + struct soc_dma_s *dma;
886 886  
887 887 struct omap_mpu_timer_s *timer[3];
888 888 struct omap_watchdog_timer_s *wdt;
... ...
hw/omap1.c
... ... @@ -24,6 +24,7 @@
24 24 #include "sysemu.h"
25 25 #include "qemu-timer.h"
26 26 #include "qemu-char.h"
  27 +#include "soc_dma.h"
27 28 /* We use pc-style serial ports. */
28 29 #include "pc.h"
29 30  
... ... @@ -4704,6 +4705,12 @@ struct omap_mpu_state_s *omap310_mpu_init(unsigned long sdram_size,
4704 4705 s->port[local ].addr_valid = omap_validate_local_addr;
4705 4706 s->port[tipb_mpui].addr_valid = omap_validate_tipb_mpui_addr;
4706 4707  
  4708 + /* Register SDRAM and SRAM DMA ports for fast transfers. */
  4709 + soc_dma_port_add_mem_ram(s->dma,
  4710 + emiff_base, OMAP_EMIFF_BASE, s->sdram_size);
  4711 + soc_dma_port_add_mem_ram(s->dma,
  4712 + imif_base, OMAP_IMIF_BASE, s->sram_size);
  4713 +
4707 4714 s->timer[0] = omap_mpu_timer_init(0xfffec500,
4708 4715 s->irq[0][OMAP_INT_TIMER1],
4709 4716 omap_findclk(s, "mputim_ck"));
... ...
hw/omap2.c
... ... @@ -26,6 +26,7 @@
26 26 #include "qemu-timer.h"
27 27 #include "qemu-char.h"
28 28 #include "flash.h"
  29 +#include "soc_dma.h"
29 30 #include "audio/audio.h"
30 31  
31 32 /* GP timers */
... ... @@ -4493,6 +4494,10 @@ struct omap_mpu_state_s *omap2420_mpu_init(unsigned long sdram_size,
4493 4494 omap_findclk(s, "sdma_fclk"));
4494 4495 s->port->addr_valid = omap2_validate_addr;
4495 4496  
  4497 + /* Register SDRAM and SRAM ports for fast DMA transfers. */
  4498 + soc_dma_port_add_mem_ram(s->dma, q2_base, OMAP2_Q2_BASE, s->sdram_size);
  4499 + soc_dma_port_add_mem_ram(s->dma, sram_base, OMAP2_SRAM_BASE, s->sram_size);
  4500 +
4496 4501 s->uart[0] = omap2_uart_init(omap_l4ta(s->l4, 19),
4497 4502 s->irq[0][OMAP_INT_24XX_UART1_IRQ],
4498 4503 omap_findclk(s, "uart1_fclk"),
... ...
hw/omap_dma.c
... ... @@ -23,6 +23,7 @@
23 23 #include "qemu-timer.h"
24 24 #include "omap.h"
25 25 #include "irq.h"
  26 +#include "soc_dma.h"
26 27  
27 28 struct omap_dma_channel_s {
28 29 /* transfer data */
... ... @@ -66,6 +67,7 @@ struct omap_dma_channel_s {
66 67 int pending_request;
67 68 int waiting_end_prog;
68 69 uint16_t cpc;
  70 + int set_update;
69 71  
70 72 /* sync type */
71 73 int fs;
... ... @@ -89,6 +91,8 @@ struct omap_dma_channel_s {
89 91 int pck_elements;
90 92 } active_set;
91 93  
  94 + struct soc_dma_ch_s *dma;
  95 +
92 96 /* unused parameters */
93 97 int write_mode;
94 98 int priority;
... ... @@ -99,12 +103,11 @@ struct omap_dma_channel_s {
99 103 };
100 104  
101 105 struct omap_dma_s {
102   - QEMUTimer *tm;
  106 + struct soc_dma_s *dma;
  107 +
103 108 struct omap_mpu_state_s *mpu;
104 109 target_phys_addr_t base;
105 110 omap_clk clk;
106   - int64_t delay;
107   - uint64_t drq;
108 111 qemu_irq irq[4];
109 112 void (*intr_update)(struct omap_dma_s *s);
110 113 enum omap_dma_model model;
... ... @@ -115,7 +118,6 @@ struct omap_dma_s {
115 118 uint32_t caps[5];
116 119 uint32_t irqen[4];
117 120 uint32_t irqstat[4];
118   - int run_count;
119 121  
120 122 int chans;
121 123 struct omap_dma_channel_s ch[32];
... ... @@ -139,11 +141,10 @@ static inline void omap_dma_interrupts_update(struct omap_dma_s *s)
139 141 return s->intr_update(s);
140 142 }
141 143  
142   -static void omap_dma_channel_load(struct omap_dma_s *s,
143   - struct omap_dma_channel_s *ch)
  144 +static void omap_dma_channel_load(struct omap_dma_channel_s *ch)
144 145 {
145 146 struct omap_dma_reg_set_s *a = &ch->active_set;
146   - int i;
  147 + int i, normal;
147 148 int omap_3_1 = !ch->omap_3_1_compatible_disable;
148 149  
149 150 /*
... ... @@ -189,20 +190,50 @@ static void omap_dma_channel_load(struct omap_dma_s *s,
189 190 default:
190 191 break;
191 192 }
  193 +
  194 + normal = !ch->transparent_copy && !ch->constant_fill &&
  195 + /* FIFO is big-endian so either (ch->endian[n] == 1) OR
  196 + * (ch->endian_lock[n] == 1) mean no endianism conversion. */
  197 + (ch->endian[0] | ch->endian_lock[0]) ==
  198 + (ch->endian[1] | ch->endian_lock[1]);
  199 + for (i = 0; i < 2; i ++) {
  200 + /* TODO: for a->frame_delta[i] > 0 still use the fast path, just
  201 + * limit min_elems in omap_dma_transfer_setup to the nearest frame
  202 + * end. */
  203 + if (!a->elem_delta[i] && normal &&
  204 + (a->frames == 1 || !a->frame_delta[i]))
  205 + ch->dma->type[i] = soc_dma_access_const;
  206 + else if (a->elem_delta[i] == ch->data_type && normal &&
  207 + (a->frames == 1 || !a->frame_delta[i]))
  208 + ch->dma->type[i] = soc_dma_access_linear;
  209 + else
  210 + ch->dma->type[i] = soc_dma_access_other;
  211 +
  212 + ch->dma->vaddr[i] = ch->addr[i];
  213 + }
  214 + soc_dma_ch_update(ch->dma);
192 215 }
193 216  
194 217 static void omap_dma_activate_channel(struct omap_dma_s *s,
195 218 struct omap_dma_channel_s *ch)
196 219 {
197 220 if (!ch->active) {
  221 + if (ch->set_update) {
  222 + /* It's not clear when the active set is supposed to be
  223 + * loaded from registers. We're already loading it when the
  224 + * channel is enabled, and for some guests this is not enough
  225 + * but that may be also because of a race condition (no
  226 + * delays in qemu) in the guest code, which we're just
  227 + * working around here. */
  228 + omap_dma_channel_load(ch);
  229 + ch->set_update = 0;
  230 + }
  231 +
198 232 ch->active = 1;
  233 + soc_dma_set_request(ch->dma, 1);
199 234 if (ch->sync)
200 235 ch->status |= SYNC;
201   - s->run_count ++;
202 236 }
203   -
204   - if (s->delay && !qemu_timer_pending(s->tm))
205   - qemu_mod_timer(s->tm, qemu_get_clock(vm_clock) + s->delay);
206 237 }
207 238  
208 239 static void omap_dma_deactivate_channel(struct omap_dma_s *s,
... ... @@ -219,17 +250,14 @@ static void omap_dma_deactivate_channel(struct omap_dma_s *s,
219 250  
220 251 /* Don't deactive the channel if it is synchronized and the DMA request is
221 252 active */
222   - if (ch->sync && ch->enable && (s->drq & (1 << ch->sync)))
  253 + if (ch->sync && ch->enable && (s->dma->drqbmp & (1 << ch->sync)))
223 254 return;
224 255  
225 256 if (ch->active) {
226 257 ch->active = 0;
227 258 ch->status &= ~SYNC;
228   - s->run_count --;
  259 + soc_dma_set_request(ch->dma, 0);
229 260 }
230   -
231   - if (!s->run_count)
232   - qemu_del_timer(s->tm);
233 261 }
234 262  
235 263 static void omap_dma_enable_channel(struct omap_dma_s *s,
... ... @@ -238,11 +266,11 @@ static void omap_dma_enable_channel(struct omap_dma_s *s,
238 266 if (!ch->enable) {
239 267 ch->enable = 1;
240 268 ch->waiting_end_prog = 0;
241   - omap_dma_channel_load(s, ch);
  269 + omap_dma_channel_load(ch);
242 270 /* TODO: theoretically if ch->sync && ch->prefetch &&
243   - * !s->drq[ch->sync], we should also activate and fetch from source
244   - * and then stall until signalled. */
245   - if ((!ch->sync) || (s->drq & (1 << ch->sync)))
  271 + * !s->dma->drqbmp[ch->sync], we should also activate and fetch
  272 + * from source and then stall until signalled. */
  273 + if ((!ch->sync) || (s->dma->drqbmp & (1 << ch->sync)))
246 274 omap_dma_activate_channel(s, ch);
247 275 }
248 276 }
... ... @@ -338,140 +366,319 @@ static void omap_dma_process_request(struct omap_dma_s *s, int request)
338 366 omap_dma_interrupts_update(s);
339 367 }
340 368  
341   -static void omap_dma_channel_run(struct omap_dma_s *s)
  369 +static void omap_dma_transfer_generic(struct soc_dma_ch_s *dma)
342 370 {
343   - int n = s->chans;
344   - uint16_t status;
345 371 uint8_t value[4];
346   - struct omap_dma_port_if_s *src_p, *dest_p;
347   - struct omap_dma_reg_set_s *a;
348   - struct omap_dma_channel_s *ch;
349   -
350   - for (ch = s->ch; n; n --, ch ++) {
351   - if (!ch->active)
352   - continue;
353   -
354   - a = &ch->active_set;
  372 + struct omap_dma_channel_s *ch = dma->opaque;
  373 + struct omap_dma_reg_set_s *a = &ch->active_set;
  374 + int bytes = dma->bytes;
  375 +#ifdef MULTI_REQ
  376 + uint16_t status = ch->status;
  377 +#endif
355 378  
356   - src_p = &s->mpu->port[ch->port[0]];
357   - dest_p = &s->mpu->port[ch->port[1]];
358   - if ((!ch->constant_fill && !src_p->addr_valid(s->mpu, a->src)) ||
359   - (!dest_p->addr_valid(s->mpu, a->dest))) {
360   -#if 0
361   - /* Bus time-out */
362   - if (ch->interrupts & TIMEOUT_INTR)
363   - ch->status |= TIMEOUT_INTR;
  379 + do {
  380 + /* Transfer a single element */
  381 + /* FIXME: check the endianness */
  382 + if (!ch->constant_fill)
  383 + cpu_physical_memory_read(a->src, value, ch->data_type);
  384 + else
  385 + *(uint32_t *) value = ch->color;
  386 +
  387 + if (!ch->transparent_copy || *(uint32_t *) value != ch->color)
  388 + cpu_physical_memory_write(a->dest, value, ch->data_type);
  389 +
  390 + a->src += a->elem_delta[0];
  391 + a->dest += a->elem_delta[1];
  392 + a->element ++;
  393 +
  394 +#ifndef MULTI_REQ
  395 + if (a->element == a->elements) {
  396 + /* End of Frame */
  397 + a->element = 0;
  398 + a->src += a->frame_delta[0];
  399 + a->dest += a->frame_delta[1];
  400 + a->frame ++;
  401 +
  402 + /* If the channel is async, update cpc */
  403 + if (!ch->sync)
  404 + ch->cpc = a->dest & 0xffff;
  405 + }
  406 + } while ((bytes -= ch->data_type));
  407 +#else
  408 + /* If the channel is element synchronized, deactivate it */
  409 + if (ch->sync && !ch->fs && !ch->bs)
364 410 omap_dma_deactivate_channel(s, ch);
365   - continue;
366   -#endif
367   - printf("%s: Bus time-out in DMA%i operation\n",
368   - __FUNCTION__, s->chans - n);
  411 +
  412 + /* If it is the last frame, set the LAST_FRAME interrupt */
  413 + if (a->element == 1 && a->frame == a->frames - 1)
  414 + if (ch->interrupts & LAST_FRAME_INTR)
  415 + ch->status |= LAST_FRAME_INTR;
  416 +
  417 + /* If the half of the frame was reached, set the HALF_FRAME
  418 + interrupt */
  419 + if (a->element == (a->elements >> 1))
  420 + if (ch->interrupts & HALF_FRAME_INTR)
  421 + ch->status |= HALF_FRAME_INTR;
  422 +
  423 + if (ch->fs && ch->bs) {
  424 + a->pck_element ++;
  425 + /* Check if a full packet has beed transferred. */
  426 + if (a->pck_element == a->pck_elements) {
  427 + a->pck_element = 0;
  428 +
  429 + /* Set the END_PKT interrupt */
  430 + if ((ch->interrupts & END_PKT_INTR) && !ch->src_sync)
  431 + ch->status |= END_PKT_INTR;
  432 +
  433 + /* If the channel is packet-synchronized, deactivate it */
  434 + if (ch->sync)
  435 + omap_dma_deactivate_channel(s, ch);
  436 + }
369 437 }
370 438  
371   - status = ch->status;
372   - while (status == ch->status && ch->active) {
373   - /* Transfer a single element */
374   - /* FIXME: check the endianness */
375   - if (!ch->constant_fill)
376   - cpu_physical_memory_read(a->src, value, ch->data_type);
377   - else
378   - *(uint32_t *) value = ch->color;
379   -
380   - if (!ch->transparent_copy ||
381   - *(uint32_t *) value != ch->color)
382   - cpu_physical_memory_write(a->dest, value, ch->data_type);
383   -
384   - a->src += a->elem_delta[0];
385   - a->dest += a->elem_delta[1];
386   - a->element ++;
387   -
388   - /* If the channel is element synchronized, deactivate it */
389   - if (ch->sync && !ch->fs && !ch->bs)
  439 + if (a->element == a->elements) {
  440 + /* End of Frame */
  441 + a->element = 0;
  442 + a->src += a->frame_delta[0];
  443 + a->dest += a->frame_delta[1];
  444 + a->frame ++;
  445 +
  446 + /* If the channel is frame synchronized, deactivate it */
  447 + if (ch->sync && ch->fs && !ch->bs)
390 448 omap_dma_deactivate_channel(s, ch);
391 449  
392   - /* If it is the last frame, set the LAST_FRAME interrupt */
393   - if (a->element == 1 && a->frame == a->frames - 1)
394   - if (ch->interrupts & LAST_FRAME_INTR)
395   - ch->status |= LAST_FRAME_INTR;
396   -
397   - /* If the half of the frame was reached, set the HALF_FRAME
398   - interrupt */
399   - if (a->element == (a->elements >> 1))
400   - if (ch->interrupts & HALF_FRAME_INTR)
401   - ch->status |= HALF_FRAME_INTR;
402   -
403   - if (ch->fs && ch->bs) {
404   - a->pck_element ++;
405   - /* Check if a full packet has beed transferred. */
406   - if (a->pck_element == a->pck_elements) {
407   - a->pck_element = 0;
408   -
409   - /* Set the END_PKT interrupt */
410   - if ((ch->interrupts & END_PKT_INTR) && !ch->src_sync)
411   - ch->status |= END_PKT_INTR;
412   -
413   - /* If the channel is packet-synchronized, deactivate it */
414   - if (ch->sync)
  450 + /* If the channel is async, update cpc */
  451 + if (!ch->sync)
  452 + ch->cpc = a->dest & 0xffff;
  453 +
  454 + /* Set the END_FRAME interrupt */
  455 + if (ch->interrupts & END_FRAME_INTR)
  456 + ch->status |= END_FRAME_INTR;
  457 +
  458 + if (a->frame == a->frames) {
  459 + /* End of Block */
  460 + /* Disable the channel */
  461 +
  462 + if (ch->omap_3_1_compatible_disable) {
  463 + omap_dma_disable_channel(s, ch);
  464 + if (ch->link_enabled)
  465 + omap_dma_enable_channel(s,
  466 + &s->ch[ch->link_next_ch]);
  467 + } else {
  468 + if (!ch->auto_init)
  469 + omap_dma_disable_channel(s, ch);
  470 + else if (ch->repeat || ch->end_prog)
  471 + omap_dma_channel_load(ch);
  472 + else {
  473 + ch->waiting_end_prog = 1;
415 474 omap_dma_deactivate_channel(s, ch);
  475 + }
416 476 }
  477 +
  478 + if (ch->interrupts & END_BLOCK_INTR)
  479 + ch->status |= END_BLOCK_INTR;
417 480 }
  481 + }
  482 + } while (status == ch->status && ch->active);
418 483  
419   - if (a->element == a->elements) {
420   - /* End of Frame */
421   - a->element = 0;
422   - a->src += a->frame_delta[0];
423   - a->dest += a->frame_delta[1];
424   - a->frame ++;
  484 + omap_dma_interrupts_update(s);
  485 +#endif
  486 +}
425 487  
426   - /* If the channel is frame synchronized, deactivate it */
427   - if (ch->sync && ch->fs && !ch->bs)
428   - omap_dma_deactivate_channel(s, ch);
  488 +enum {
  489 + omap_dma_intr_element_sync,
  490 + omap_dma_intr_last_frame,
  491 + omap_dma_intr_half_frame,
  492 + omap_dma_intr_frame,
  493 + omap_dma_intr_frame_sync,
  494 + omap_dma_intr_packet,
  495 + omap_dma_intr_packet_sync,
  496 + omap_dma_intr_block,
  497 + __omap_dma_intr_last,
  498 +};
429 499  
430   - /* If the channel is async, update cpc */
431   - if (!ch->sync)
432   - ch->cpc = a->dest & 0xffff;
  500 +static void omap_dma_transfer_setup(struct soc_dma_ch_s *dma)
  501 +{
  502 + struct omap_dma_port_if_s *src_p, *dest_p;
  503 + struct omap_dma_reg_set_s *a;
  504 + struct omap_dma_channel_s *ch = dma->opaque;
  505 + struct omap_dma_s *s = dma->dma->opaque;
  506 + int frames, min_elems, elements[__omap_dma_intr_last];
433 507  
434   - /* Set the END_FRAME interrupt */
435   - if (ch->interrupts & END_FRAME_INTR)
436   - ch->status |= END_FRAME_INTR;
  508 + a = &ch->active_set;
437 509  
438   - if (a->frame == a->frames) {
439   - /* End of Block */
440   - /* Disable the channel */
  510 + src_p = &s->mpu->port[ch->port[0]];
  511 + dest_p = &s->mpu->port[ch->port[1]];
  512 + if ((!ch->constant_fill && !src_p->addr_valid(s->mpu, a->src)) ||
  513 + (!dest_p->addr_valid(s->mpu, a->dest))) {
  514 +#if 0
  515 + /* Bus time-out */
  516 + if (ch->interrupts & TIMEOUT_INTR)
  517 + ch->status |= TIMEOUT_INTR;
  518 + omap_dma_deactivate_channel(s, ch);
  519 + continue;
  520 +#endif
  521 + printf("%s: Bus time-out in DMA%i operation\n",
  522 + __FUNCTION__, dma->num);
  523 + }
441 524  
442   - if (ch->omap_3_1_compatible_disable) {
443   - omap_dma_disable_channel(s, ch);
444   - if (ch->link_enabled)
445   - omap_dma_enable_channel(s,
446   - &s->ch[ch->link_next_ch]);
447   - } else {
448   - if (!ch->auto_init)
449   - omap_dma_disable_channel(s, ch);
450   - else if (ch->repeat || ch->end_prog)
451   - omap_dma_channel_load(s, ch);
452   - else {
453   - ch->waiting_end_prog = 1;
454   - omap_dma_deactivate_channel(s, ch);
455   - }
456   - }
  525 + min_elems = INT_MAX;
  526 +
  527 + /* Check all the conditions that terminate the transfer starting
  528 + * with those that can occur the soonest. */
  529 +#define INTR_CHECK(cond, id, nelements) \
  530 + if (cond) { \
  531 + elements[id] = nelements; \
  532 + if (elements[id] < min_elems) \
  533 + min_elems = elements[id]; \
  534 + } else \
  535 + elements[id] = INT_MAX;
  536 +
  537 + /* Elements */
  538 + INTR_CHECK(
  539 + ch->sync && !ch->fs && !ch->bs,
  540 + omap_dma_intr_element_sync,
  541 + 1)
  542 +
  543 + /* Frames */
  544 + /* TODO: for transfers where entire frames can be read and written
  545 + * using memcpy() but a->frame_delta is non-zero, try to still do
  546 + * transfers using soc_dma but limit min_elems to a->elements - ...
  547 + * See also the TODO in omap_dma_channel_load. */
  548 + INTR_CHECK(
  549 + (ch->interrupts & LAST_FRAME_INTR) &&
  550 + ((a->frame < a->frames - 1) || !a->element),
  551 + omap_dma_intr_last_frame,
  552 + (a->frames - a->frame - 2) * a->elements +
  553 + (a->elements - a->element + 1))
  554 + INTR_CHECK(
  555 + ch->interrupts & HALF_FRAME_INTR,
  556 + omap_dma_intr_half_frame,
  557 + (a->elements >> 1) +
  558 + (a->element >= (a->elements >> 1) ? a->elements : 0) -
  559 + a->element)
  560 + INTR_CHECK(
  561 + ch->sync && ch->fs && (ch->interrupts & END_FRAME_INTR),
  562 + omap_dma_intr_frame,
  563 + a->elements - a->element)
  564 + INTR_CHECK(
  565 + ch->sync && ch->fs && !ch->bs,
  566 + omap_dma_intr_frame_sync,
  567 + a->elements - a->element)
  568 +
  569 + /* Packets */
  570 + INTR_CHECK(
  571 + ch->fs && ch->bs &&
  572 + (ch->interrupts & END_PKT_INTR) && !ch->src_sync,
  573 + omap_dma_intr_packet,
  574 + a->pck_elements - a->pck_element)
  575 + INTR_CHECK(
  576 + ch->fs && ch->bs && ch->sync,
  577 + omap_dma_intr_packet_sync,
  578 + a->pck_elements - a->pck_element)
  579 +
  580 + /* Blocks */
  581 + INTR_CHECK(
  582 + 1,
  583 + omap_dma_intr_block,
  584 + (a->frames - a->frame - 1) * a->elements +
  585 + (a->elements - a->element))
  586 +
  587 + dma->bytes = min_elems * ch->data_type;
  588 +
  589 + /* Set appropriate interrupts and/or deactivate channels */
  590 +
  591 +#ifdef MULTI_REQ
  592 + /* TODO: should all of this only be done if dma->update, and otherwise
  593 + * inside omap_dma_transfer_generic below - check what's faster. */
  594 + if (dma->update) {
  595 +#endif
457 596  
458   - if (ch->interrupts & END_BLOCK_INTR)
459   - ch->status |= END_BLOCK_INTR;
460   - }
  597 + /* If the channel is element synchronized, deactivate it */
  598 + if (min_elems == elements[omap_dma_intr_element_sync])
  599 + omap_dma_deactivate_channel(s, ch);
  600 +
  601 + /* If it is the last frame, set the LAST_FRAME interrupt */
  602 + if (min_elems == elements[omap_dma_intr_last_frame])
  603 + ch->status |= LAST_FRAME_INTR;
  604 +
  605 + /* If exactly half of the frame was reached, set the HALF_FRAME
  606 + interrupt */
  607 + if (min_elems == elements[omap_dma_intr_half_frame])
  608 + ch->status |= HALF_FRAME_INTR;
  609 +
  610 + /* If a full packet has been transferred, set the END_PKT interrupt */
  611 + if (min_elems == elements[omap_dma_intr_packet])
  612 + ch->status |= END_PKT_INTR;
  613 +
  614 + /* If the channel is packet-synchronized, deactivate it */
  615 + if (min_elems == elements[omap_dma_intr_packet_sync])
  616 + omap_dma_deactivate_channel(s, ch);
  617 +
  618 + /* If the channel is frame synchronized, deactivate it */
  619 + if (min_elems == elements[omap_dma_intr_frame_sync])
  620 + omap_dma_deactivate_channel(s, ch);
  621 +
  622 + /* Set the END_FRAME interrupt */
  623 + if (min_elems == elements[omap_dma_intr_frame])
  624 + ch->status |= END_FRAME_INTR;
  625 +
  626 + if (min_elems == elements[omap_dma_intr_block]) {
  627 + /* End of Block */
  628 + /* Disable the channel */
  629 +
  630 + if (ch->omap_3_1_compatible_disable) {
  631 + omap_dma_disable_channel(s, ch);
  632 + if (ch->link_enabled)
  633 + omap_dma_enable_channel(s, &s->ch[ch->link_next_ch]);
  634 + } else {
  635 + if (!ch->auto_init)
  636 + omap_dma_disable_channel(s, ch);
  637 + else if (ch->repeat || ch->end_prog)
  638 + omap_dma_channel_load(ch);
  639 + else {
  640 + ch->waiting_end_prog = 1;
  641 + omap_dma_deactivate_channel(s, ch);
461 642 }
462 643 }
  644 +
  645 + if (ch->interrupts & END_BLOCK_INTR)
  646 + ch->status |= END_BLOCK_INTR;
  647 + }
  648 +
  649 + /* Update packet number */
  650 + if (ch->fs && ch->bs) {
  651 + a->pck_element += min_elems;
  652 + a->pck_element %= a->pck_elements;
  653 + }
  654 +
  655 + /* TODO: check if we really need to update anything here or perhaps we
  656 + * can skip part of this. */
  657 +#ifndef MULTI_REQ
  658 + if (dma->update) {
  659 +#endif
  660 + a->element += min_elems;
  661 +
  662 + frames = a->element / a->elements;
  663 + a->element = a->element % a->elements;
  664 + a->frame += frames;
  665 + a->src += min_elems * a->elem_delta[0] + frames * a->frame_delta[0];
  666 + a->dest += min_elems * a->elem_delta[1] + frames * a->frame_delta[1];
  667 +
  668 + /* If the channel is async, update cpc */
  669 + if (!ch->sync && frames)
  670 + ch->cpc = a->dest & 0xffff;
463 671 }
464 672  
465 673 omap_dma_interrupts_update(s);
466   - if (s->run_count && s->delay)
467   - qemu_mod_timer(s->tm, qemu_get_clock(vm_clock) + s->delay);
468 674 }
469 675  
470   -void omap_dma_reset(struct omap_dma_s *s)
  676 +void omap_dma_reset(struct soc_dma_s *dma)
471 677 {
472 678 int i;
  679 + struct omap_dma_s *s = dma->opaque;
473 680  
474   - qemu_del_timer(s->tm);
  681 + soc_dma_reset(s->dma);
475 682 if (s->model < omap_dma_4)
476 683 s->gcr = 0x0004;
477 684 else
... ... @@ -479,8 +686,6 @@ void omap_dma_reset(struct omap_dma_s *s)
479 686 s->ocp = 0x00000000;
480 687 memset(&s->irqstat, 0, sizeof(s->irqstat));
481 688 memset(&s->irqen, 0, sizeof(s->irqen));
482   - s->drq = 0x00000000;
483   - s->run_count = 0;
484 689 s->lcd_ch.src = emiff;
485 690 s->lcd_ch.condition = 0;
486 691 s->lcd_ch.interrupts = 0;
... ... @@ -1161,7 +1366,7 @@ static int omap_dma_sys_write(struct omap_dma_s *s, int offset, uint16_t value)
1161 1366  
1162 1367 case 0x408: /* DMA_GRST */
1163 1368 if (value & 0x1)
1164   - omap_dma_reset(s);
  1369 + omap_dma_reset(s->dma);
1165 1370 break;
1166 1371  
1167 1372 default:
... ... @@ -1338,27 +1543,25 @@ static void omap_dma_request(void *opaque, int drq, int req)
1338 1543 struct omap_dma_s *s = (struct omap_dma_s *) opaque;
1339 1544 /* The request pins are level triggered in QEMU. */
1340 1545 if (req) {
1341   - if (~s->drq & (1 << drq)) {
1342   - s->drq |= 1 << drq;
  1546 + if (~s->dma->drqbmp & (1 << drq)) {
  1547 + s->dma->drqbmp |= 1 << drq;
1343 1548 omap_dma_process_request(s, drq);
1344 1549 }
1345 1550 } else
1346   - s->drq &= ~(1 << drq);
  1551 + s->dma->drqbmp &= ~(1 << drq);
1347 1552 }
1348 1553  
  1554 +/* XXX: this won't be needed once soc_dma knows about clocks. */
1349 1555 static void omap_dma_clk_update(void *opaque, int line, int on)
1350 1556 {
1351 1557 struct omap_dma_s *s = (struct omap_dma_s *) opaque;
  1558 + int i;
1352 1559  
1353   - if (on) {
1354   - /* TODO: make a clever calculation */
1355   - s->delay = ticks_per_sec >> 8;
1356   - if (s->run_count)
1357   - qemu_mod_timer(s->tm, qemu_get_clock(vm_clock) + s->delay);
1358   - } else {
1359   - s->delay = 0;
1360   - qemu_del_timer(s->tm);
1361   - }
  1560 + s->dma->freq = omap_clk_getrate(s->clk);
  1561 +
  1562 + for (i = 0; i < s->chans; i ++)
  1563 + if (s->ch[i].active)
  1564 + soc_dma_set_request(s->ch[i].dma, on);
1362 1565 }
1363 1566  
1364 1567 static void omap_dma_setcaps(struct omap_dma_s *s)
... ... @@ -1407,7 +1610,7 @@ static void omap_dma_setcaps(struct omap_dma_s *s)
1407 1610 }
1408 1611 }
1409 1612  
1410   -struct omap_dma_s *omap_dma_init(target_phys_addr_t base, qemu_irq *irqs,
  1613 +struct soc_dma_s *omap_dma_init(target_phys_addr_t base, qemu_irq *irqs,
1411 1614 qemu_irq lcd_irq, struct omap_mpu_state_s *mpu, omap_clk clk,
1412 1615 enum omap_dma_model model)
1413 1616 {
... ... @@ -1428,24 +1631,37 @@ struct omap_dma_s *omap_dma_init(target_phys_addr_t base, qemu_irq *irqs,
1428 1631 s->clk = clk;
1429 1632 s->lcd_ch.irq = lcd_irq;
1430 1633 s->lcd_ch.mpu = mpu;
1431   - omap_dma_setcaps(s);
  1634 +
  1635 + s->dma = soc_dma_init((model <= omap_dma_3_1) ? 9 : 16);
  1636 + s->dma->freq = omap_clk_getrate(clk);
  1637 + s->dma->transfer_fn = omap_dma_transfer_generic;
  1638 + s->dma->setup_fn = omap_dma_transfer_setup;
  1639 + s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 32);
  1640 + s->dma->opaque = s;
  1641 +
1432 1642 while (num_irqs --)
1433 1643 s->ch[num_irqs].irq = irqs[num_irqs];
1434 1644 for (i = 0; i < 3; i ++) {
1435 1645 s->ch[i].sibling = &s->ch[i + 6];
1436 1646 s->ch[i + 6].sibling = &s->ch[i];
1437 1647 }
1438   - s->tm = qemu_new_timer(vm_clock, (QEMUTimerCB *) omap_dma_channel_run, s);
  1648 + for (i = (model <= omap_dma_3_1) ? 8 : 15; i >= 0; i --) {
  1649 + s->ch[i].dma = &s->dma->ch[i];
  1650 + s->dma->ch[i].opaque = &s->ch[i];
  1651 + }
  1652 +
  1653 + omap_dma_setcaps(s);
1439 1654 omap_clk_adduser(s->clk, qemu_allocate_irqs(omap_dma_clk_update, s, 1)[0]);
1440   - mpu->drq = qemu_allocate_irqs(omap_dma_request, s, 32);
1441   - omap_dma_reset(s);
  1655 + omap_dma_reset(s->dma);
1442 1656 omap_dma_clk_update(s, 0, 1);
1443 1657  
1444 1658 iomemtype = cpu_register_io_memory(0, omap_dma_readfn,
1445 1659 omap_dma_writefn, s);
1446 1660 cpu_register_physical_memory(s->base, memsize, iomemtype);
1447 1661  
1448   - return s;
  1662 + mpu->drq = s->dma->drq;
  1663 +
  1664 + return s->dma;
1449 1665 }
1450 1666  
1451 1667 static void omap_dma_interrupts_4_update(struct omap_dma_s *s)
... ... @@ -1646,7 +1862,7 @@ static void omap_dma4_write(void *opaque, target_phys_addr_t addr,
1646 1862  
1647 1863 case 0x2c: /* DMA4_OCP_SYSCONFIG */
1648 1864 if (value & 2) /* SOFTRESET */
1649   - omap_dma_reset(s);
  1865 + omap_dma_reset(s->dma);
1650 1866 s->ocp = value & 0x3321;
1651 1867 if (((s->ocp >> 12) & 3) == 3) /* MIDLEMODE */
1652 1868 fprintf(stderr, "%s: invalid DMA power mode\n", __FUNCTION__);
... ... @@ -1728,7 +1944,7 @@ static void omap_dma4_write(void *opaque, target_phys_addr_t addr,
1728 1944 ch->endian[1] =(value >> 19) & 1;
1729 1945 ch->endian_lock[1] =(value >> 18) & 1;
1730 1946 if (ch->endian[0] != ch->endian[1])
1731   - fprintf(stderr, "%s: DMA endianned conversion enable attempt\n",
  1947 + fprintf(stderr, "%s: DMA endiannes conversion enable attempt\n",
1732 1948 __FUNCTION__);
1733 1949 ch->write_mode = (value >> 16) & 3;
1734 1950 ch->burst[1] = (value & 0xc000) >> 14;
... ... @@ -1746,35 +1962,43 @@ static void omap_dma4_write(void *opaque, target_phys_addr_t addr,
1746 1962 break;
1747 1963  
1748 1964 case 0x14: /* DMA4_CEN */
  1965 + ch->set_update = 1;
1749 1966 ch->elements = value & 0xffffff;
1750 1967 break;
1751 1968  
1752 1969 case 0x18: /* DMA4_CFN */
1753 1970 ch->frames = value & 0xffff;
  1971 + ch->set_update = 1;
1754 1972 break;
1755 1973  
1756 1974 case 0x1c: /* DMA4_CSSA */
1757 1975 ch->addr[0] = (target_phys_addr_t) (uint32_t) value;
  1976 + ch->set_update = 1;
1758 1977 break;
1759 1978  
1760 1979 case 0x20: /* DMA4_CDSA */
1761 1980 ch->addr[1] = (target_phys_addr_t) (uint32_t) value;
  1981 + ch->set_update = 1;
1762 1982 break;
1763 1983  
1764 1984 case 0x24: /* DMA4_CSEI */
1765 1985 ch->element_index[0] = (int16_t) value;
  1986 + ch->set_update = 1;
1766 1987 break;
1767 1988  
1768 1989 case 0x28: /* DMA4_CSFI */
1769 1990 ch->frame_index[0] = (int32_t) value;
  1991 + ch->set_update = 1;
1770 1992 break;
1771 1993  
1772 1994 case 0x2c: /* DMA4_CDEI */
1773 1995 ch->element_index[1] = (int16_t) value;
  1996 + ch->set_update = 1;
1774 1997 break;
1775 1998  
1776 1999 case 0x30: /* DMA4_CDFI */
1777 2000 ch->frame_index[1] = (int32_t) value;
  2001 + ch->set_update = 1;
1778 2002 break;
1779 2003  
1780 2004 case 0x44: /* DMA4_COLOR */
... ... @@ -1806,11 +2030,11 @@ static CPUWriteMemoryFunc *omap_dma4_writefn[] = {
1806 2030 omap_dma4_write,
1807 2031 };
1808 2032  
1809   -struct omap_dma_s *omap_dma4_init(target_phys_addr_t base, qemu_irq *irqs,
  2033 +struct soc_dma_s *omap_dma4_init(target_phys_addr_t base, qemu_irq *irqs,
1810 2034 struct omap_mpu_state_s *mpu, int fifo,
1811 2035 int chans, omap_clk iclk, omap_clk fclk)
1812 2036 {
1813   - int iomemtype;
  2037 + int iomemtype, i;
1814 2038 struct omap_dma_s *s = (struct omap_dma_s *)
1815 2039 qemu_mallocz(sizeof(struct omap_dma_s));
1816 2040  
... ... @@ -1819,23 +2043,38 @@ struct omap_dma_s *omap_dma4_init(target_phys_addr_t base, qemu_irq *irqs,
1819 2043 s->chans = chans;
1820 2044 s->mpu = mpu;
1821 2045 s->clk = fclk;
  2046 +
  2047 + s->dma = soc_dma_init(s->chans);
  2048 + s->dma->freq = omap_clk_getrate(fclk);
  2049 + s->dma->transfer_fn = omap_dma_transfer_generic;
  2050 + s->dma->setup_fn = omap_dma_transfer_setup;
  2051 + s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 64);
  2052 + s->dma->opaque = s;
  2053 + for (i = 0; i < s->chans; i ++) {
  2054 + s->ch[i].dma = &s->dma->ch[i];
  2055 + s->dma->ch[i].opaque = &s->ch[i];
  2056 + }
  2057 +
1822 2058 memcpy(&s->irq, irqs, sizeof(s->irq));
1823 2059 s->intr_update = omap_dma_interrupts_4_update;
  2060 +
1824 2061 omap_dma_setcaps(s);
1825   - s->tm = qemu_new_timer(vm_clock, (QEMUTimerCB *) omap_dma_channel_run, s);
1826 2062 omap_clk_adduser(s->clk, qemu_allocate_irqs(omap_dma_clk_update, s, 1)[0]);
1827   - mpu->drq = qemu_allocate_irqs(omap_dma_request, s, 64);
1828   - omap_dma_reset(s);
1829   - omap_dma_clk_update(s, 0, 1);
  2063 + omap_dma_reset(s->dma);
  2064 + omap_dma_clk_update(s, 0, !!s->dma->freq);
1830 2065  
1831 2066 iomemtype = cpu_register_io_memory(0, omap_dma4_readfn,
1832 2067 omap_dma4_writefn, s);
1833 2068 cpu_register_physical_memory(s->base, 0x1000, iomemtype);
1834 2069  
1835   - return s;
  2070 + mpu->drq = s->dma->drq;
  2071 +
  2072 + return s->dma;
1836 2073 }
1837 2074  
1838   -struct omap_dma_lcd_channel_s *omap_dma_get_lcdch(struct omap_dma_s *s)
  2075 +struct omap_dma_lcd_channel_s *omap_dma_get_lcdch(struct soc_dma_s *dma)
1839 2076 {
  2077 + struct omap_dma_s *s = dma->opaque;
  2078 +
1840 2079 return &s->lcd_ch;
1841 2080 }
... ...
hw/soc_dma.c 0 โ†’ 100644
  1 +/*
  2 + * On-chip DMA controller framework.
  3 + *
  4 + * Copyright (C) 2008 Nokia Corporation
  5 + * Written by Andrzej Zaborowski <andrew@openedhand.com>
  6 + *
  7 + * This program is free software; you can redistribute it and/or
  8 + * modify it under the terms of the GNU General Public License as
  9 + * published by the Free Software Foundation; either version 2 or
  10 + * (at your option) version 3 of the License.
  11 + *
  12 + * This program is distributed in the hope that it will be useful,
  13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15 + * GNU General Public License for more details.
  16 + *
  17 + * You should have received a copy of the GNU General Public License
  18 + * along with this program; if not, write to the Free Software
  19 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  20 + * MA 02111-1307 USA
  21 + */
  22 +#include "qemu-common.h"
  23 +#include "qemu-timer.h"
  24 +#include "soc_dma.h"
  25 +
  26 +void transfer_mem2mem(struct soc_dma_ch_s *ch)
  27 +{
  28 + memcpy(ch->paddr[0], ch->paddr[1], ch->bytes);
  29 + ch->paddr[0] += ch->bytes;
  30 + ch->paddr[1] += ch->bytes;
  31 +}
  32 +
  33 +void transfer_mem2fifo(struct soc_dma_ch_s *ch)
  34 +{
  35 + ch->io_fn[1](ch->io_opaque[1], ch->paddr[0], ch->bytes);
  36 + ch->paddr[0] += ch->bytes;
  37 +}
  38 +
  39 +void transfer_fifo2mem(struct soc_dma_ch_s *ch)
  40 +{
  41 + ch->io_fn[0](ch->io_opaque[0], ch->paddr[1], ch->bytes);
  42 + ch->paddr[1] += ch->bytes;
  43 +}
  44 +
  45 +/* This is further optimisable but isn't very important because often
  46 + * DMA peripherals forbid this kind of transfers and even when they don't,
  47 + * oprating systems may not need to use them. */
  48 +static void *fifo_buf;
  49 +static int fifo_size;
  50 +void transfer_fifo2fifo(struct soc_dma_ch_s *ch)
  51 +{
  52 + if (ch->bytes < fifo_size)
  53 + fifo_buf = realloc(fifo_buf, fifo_size = ch->bytes);
  54 +
  55 + /* Implement as transfer_fifo2linear + transfer_linear2fifo. */
  56 + ch->io_fn[0](ch->io_opaque[0], fifo_buf, ch->bytes);
  57 + ch->io_fn[1](ch->io_opaque[1], fifo_buf, ch->bytes);
  58 +}
  59 +
  60 +struct dma_s {
  61 + struct soc_dma_s soc;
  62 + int chnum;
  63 + uint64_t ch_enable_mask;
  64 + int64_t channel_freq;
  65 + int enabled_count;
  66 +
  67 + struct memmap_entry_s {
  68 + enum soc_dma_port_type type;
  69 + target_phys_addr_t addr;
  70 + union {
  71 + struct {
  72 + void *opaque;
  73 + soc_dma_io_t fn;
  74 + int out;
  75 + } fifo;
  76 + struct {
  77 + void *base;
  78 + size_t size;
  79 + } mem;
  80 + } u;
  81 + } *memmap;
  82 + int memmap_size;
  83 +
  84 + struct soc_dma_ch_s ch[0];
  85 +};
  86 +
  87 +static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes)
  88 +{
  89 + int64_t now = qemu_get_clock(vm_clock);
  90 + struct dma_s *dma = (struct dma_s *) ch->dma;
  91 +
  92 + qemu_mod_timer(ch->timer, now + delay_bytes / dma->channel_freq);
  93 +}
  94 +
  95 +static void soc_dma_ch_run(void *opaque)
  96 +{
  97 + struct soc_dma_ch_s *ch = (struct soc_dma_ch_s *) opaque;
  98 +
  99 + ch->running = 1;
  100 + ch->dma->setup_fn(ch);
  101 + ch->transfer_fn(ch);
  102 + ch->running = 0;
  103 +
  104 + if (ch->enable)
  105 + soc_dma_ch_schedule(ch, ch->bytes);
  106 + ch->bytes = 0;
  107 +}
  108 +
  109 +static inline struct memmap_entry_s *soc_dma_lookup(struct dma_s *dma,
  110 + target_phys_addr_t addr)
  111 +{
  112 + struct memmap_entry_s *lo;
  113 + int hi;
  114 +
  115 + lo = dma->memmap;
  116 + hi = dma->memmap_size;
  117 +
  118 + while (hi > 1) {
  119 + hi /= 2;
  120 + if (lo[hi].addr <= addr)
  121 + lo += hi;
  122 + }
  123 +
  124 + return lo;
  125 +}
  126 +
  127 +static inline enum soc_dma_port_type soc_dma_ch_update_type(
  128 + struct soc_dma_ch_s *ch, int port)
  129 +{
  130 + struct dma_s *dma = (struct dma_s *) ch->dma;
  131 + struct memmap_entry_s *entry = soc_dma_lookup(dma, ch->vaddr[port]);
  132 +
  133 + if (entry->type == soc_dma_port_fifo) {
  134 + while (entry < dma->memmap + dma->memmap_size &&
  135 + entry->u.fifo.out != port)
  136 + entry ++;
  137 + if (entry->addr != ch->vaddr[port] || entry->u.fifo.out != port)
  138 + return soc_dma_port_other;
  139 +
  140 + if (ch->type[port] != soc_dma_access_const)
  141 + return soc_dma_port_other;
  142 +
  143 + ch->io_fn[port] = entry->u.fifo.fn;
  144 + ch->io_opaque[port] = entry->u.fifo.opaque;
  145 + return soc_dma_port_fifo;
  146 + } else if (entry->type == soc_dma_port_mem) {
  147 + if (entry->addr > ch->vaddr[port] ||
  148 + entry->addr + entry->u.mem.size <= ch->vaddr[port])
  149 + return soc_dma_port_other;
  150 +
  151 + /* TODO: support constant memory address for source port as used for
  152 + * drawing solid rectangles by PalmOS(R). */
  153 + if (ch->type[port] != soc_dma_access_const)
  154 + return soc_dma_port_other;
  155 +
  156 + ch->paddr[port] = (uint8_t *) entry->u.mem.base +
  157 + (ch->vaddr[port] - entry->addr);
  158 + /* TODO: save bytes left to the end of the mapping somewhere so we
  159 + * can check we're not reading beyond it. */
  160 + return soc_dma_port_mem;
  161 + } else
  162 + return soc_dma_port_other;
  163 +}
  164 +
  165 +void soc_dma_ch_update(struct soc_dma_ch_s *ch)
  166 +{
  167 + enum soc_dma_port_type src, dst;
  168 +
  169 + src = soc_dma_ch_update_type(ch, 0);
  170 + if (src == soc_dma_port_other) {
  171 + ch->update = 0;
  172 + ch->transfer_fn = ch->dma->transfer_fn;
  173 + return;
  174 + }
  175 + dst = soc_dma_ch_update_type(ch, 1);
  176 +
  177 + /* TODO: use src and dst as array indices. */
  178 + if (src == soc_dma_port_mem && dst == soc_dma_port_mem)
  179 + ch->transfer_fn = transfer_mem2mem;
  180 + else if (src == soc_dma_port_mem && dst == soc_dma_port_fifo)
  181 + ch->transfer_fn = transfer_mem2fifo;
  182 + else if (src == soc_dma_port_fifo && dst == soc_dma_port_mem)
  183 + ch->transfer_fn = transfer_fifo2mem;
  184 + else if (src == soc_dma_port_fifo && dst == soc_dma_port_fifo)
  185 + ch->transfer_fn = transfer_fifo2fifo;
  186 + else
  187 + ch->transfer_fn = ch->dma->transfer_fn;
  188 +
  189 + ch->update = (dst != soc_dma_port_other);
  190 +}
  191 +
  192 +static void soc_dma_ch_freq_update(struct dma_s *s)
  193 +{
  194 + if (s->enabled_count)
  195 + /* We completely ignore channel priorities and stuff */
  196 + s->channel_freq = s->soc.freq / s->enabled_count;
  197 + else
  198 + /* TODO: Signal that we want to disable the functional clock and let
  199 + * the platform code decide what to do with it, i.e. check that
  200 + * auto-idle is enabled in the clock controller and if we are stopping
  201 + * the clock, do the same with any parent clocks that had only one
  202 + * user keeping them on and auto-idle enabled. */;
  203 +}
  204 +
  205 +void soc_dma_set_request(struct soc_dma_ch_s *ch, int level)
  206 +{
  207 + struct dma_s *dma = (struct dma_s *) ch->dma;
  208 +
  209 + dma->enabled_count += level - ch->enable;
  210 +
  211 + if (level)
  212 + dma->ch_enable_mask |= 1 << ch->num;
  213 + else
  214 + dma->ch_enable_mask &= ~(1 << ch->num);
  215 +
  216 + if (level != ch->enable) {
  217 + soc_dma_ch_freq_update(dma);
  218 + ch->enable = level;
  219 +
  220 + if (!ch->enable)
  221 + qemu_del_timer(ch->timer);
  222 + else if (!ch->running)
  223 + soc_dma_ch_run(ch);
  224 + else
  225 + soc_dma_ch_schedule(ch, 1);
  226 + }
  227 +}
  228 +
  229 +void soc_dma_reset(struct soc_dma_s *soc)
  230 +{
  231 + struct dma_s *s = (struct dma_s *) soc;
  232 +
  233 + s->soc.drqbmp = 0;
  234 + s->ch_enable_mask = 0;
  235 + s->enabled_count = 0;
  236 + soc_dma_ch_freq_update(s);
  237 +}
  238 +
  239 +/* TODO: take a functional-clock argument */
  240 +struct soc_dma_s *soc_dma_init(int n)
  241 +{
  242 + int i;
  243 + struct dma_s *s = qemu_mallocz(sizeof(*s) + n * sizeof(*s->ch));
  244 +
  245 + s->chnum = n;
  246 + s->soc.ch = s->ch;
  247 + for (i = 0; i < n; i ++) {
  248 + s->ch[i].dma = &s->soc;
  249 + s->ch[i].num = i;
  250 + s->ch[i].timer = qemu_new_timer(vm_clock, soc_dma_ch_run, &s->ch[i]);
  251 + }
  252 +
  253 + soc_dma_reset(&s->soc);
  254 +
  255 + return &s->soc;
  256 +}
  257 +
  258 +void soc_dma_port_add_fifo(struct soc_dma_s *soc, target_phys_addr_t virt_base,
  259 + soc_dma_io_t fn, void *opaque, int out)
  260 +{
  261 + struct memmap_entry_s *entry;
  262 + struct dma_s *dma = (struct dma_s *) soc;
  263 +
  264 + dma->memmap = realloc(dma->memmap, sizeof(*entry) *
  265 + (dma->memmap_size + 1));
  266 + entry = soc_dma_lookup(dma, virt_base);
  267 +
  268 + if (dma->memmap_size) {
  269 + if (entry->type == soc_dma_port_mem) {
  270 + if (entry->addr <= virt_base &&
  271 + entry->addr + entry->u.mem.size > virt_base) {
  272 + fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx
  273 + " collides with RAM region at " TARGET_FMT_lx
  274 + "-" TARGET_FMT_lx "\n", __FUNCTION__,
  275 + (target_ulong) virt_base,
  276 + (target_ulong) entry->addr, (target_ulong)
  277 + (entry->addr + entry->u.mem.size));
  278 + exit(-1);
  279 + }
  280 +
  281 + if (entry->addr <= virt_base)
  282 + entry ++;
  283 + } else
  284 + while (entry < dma->memmap + dma->memmap_size &&
  285 + entry->addr <= virt_base) {
  286 + if (entry->addr == virt_base && entry->u.fifo.out == out) {
  287 + fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx
  288 + " collides FIFO at " TARGET_FMT_lx "\n",
  289 + __FUNCTION__, (target_ulong) virt_base,
  290 + (target_ulong) entry->addr);
  291 + exit(-1);
  292 + }
  293 +
  294 + entry ++;
  295 + }
  296 +
  297 + memmove(entry + 1, entry,
  298 + (uint8_t *) (dma->memmap + dma->memmap_size ++) -
  299 + (uint8_t *) entry);
  300 + } else
  301 + dma->memmap_size ++;
  302 +
  303 + entry->addr = virt_base;
  304 + entry->type = soc_dma_port_fifo;
  305 + entry->u.fifo.fn = fn;
  306 + entry->u.fifo.opaque = opaque;
  307 + entry->u.fifo.out = out;
  308 +}
  309 +
  310 +void soc_dma_port_add_mem(struct soc_dma_s *soc, uint8_t *phys_base,
  311 + target_phys_addr_t virt_base, size_t size)
  312 +{
  313 + struct memmap_entry_s *entry;
  314 + struct dma_s *dma = (struct dma_s *) soc;
  315 +
  316 + dma->memmap = realloc(dma->memmap, sizeof(*entry) *
  317 + (dma->memmap_size + 1));
  318 + entry = soc_dma_lookup(dma, virt_base);
  319 +
  320 + if (dma->memmap_size) {
  321 + if (entry->type == soc_dma_port_mem) {
  322 + if ((entry->addr >= virt_base && entry->addr < virt_base + size) ||
  323 + (entry->addr <= virt_base &&
  324 + entry->addr + entry->u.mem.size > virt_base)) {
  325 + fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx
  326 + " collides with RAM region at " TARGET_FMT_lx
  327 + "-" TARGET_FMT_lx "\n", __FUNCTION__,
  328 + (target_ulong) virt_base,
  329 + (target_ulong) (virt_base + size),
  330 + (target_ulong) entry->addr, (target_ulong)
  331 + (entry->addr + entry->u.mem.size));
  332 + exit(-1);
  333 + }
  334 +
  335 + if (entry->addr <= virt_base)
  336 + entry ++;
  337 + } else {
  338 + if (entry->addr >= virt_base &&
  339 + entry->addr < virt_base + size) {
  340 + fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx
  341 + " collides with FIFO at " TARGET_FMT_lx
  342 + "\n", __FUNCTION__,
  343 + (target_ulong) virt_base,
  344 + (target_ulong) (virt_base + size),
  345 + (target_ulong) entry->addr);
  346 + exit(-1);
  347 + }
  348 +
  349 + while (entry < dma->memmap + dma->memmap_size &&
  350 + entry->addr <= virt_base)
  351 + entry ++;
  352 + }
  353 +
  354 + memmove(entry + 1, entry,
  355 + (uint8_t *) (dma->memmap + dma->memmap_size ++) -
  356 + (uint8_t *) entry);
  357 + } else
  358 + dma->memmap_size ++;
  359 +
  360 + entry->addr = virt_base;
  361 + entry->type = soc_dma_port_mem;
  362 + entry->u.mem.base = phys_base;
  363 + entry->u.mem.size = size;
  364 +}
  365 +
  366 +/* TODO: port removal for ports like PCMCIA memory */
... ...
hw/soc_dma.h 0 โ†’ 100644
  1 +/*
  2 + * On-chip DMA controller framework.
  3 + *
  4 + * Copyright (C) 2008 Nokia Corporation
  5 + * Written by Andrzej Zaborowski <andrew@openedhand.com>
  6 + *
  7 + * This program is free software; you can redistribute it and/or
  8 + * modify it under the terms of the GNU General Public License as
  9 + * published by the Free Software Foundation; either version 2 or
  10 + * (at your option) version 3 of the License.
  11 + *
  12 + * This program is distributed in the hope that it will be useful,
  13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15 + * GNU General Public License for more details.
  16 + *
  17 + * You should have received a copy of the GNU General Public License
  18 + * along with this program; if not, write to the Free Software
  19 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  20 + * MA 02111-1307 USA
  21 + */
  22 +
  23 +struct soc_dma_s;
  24 +struct soc_dma_ch_s;
  25 +typedef void (*soc_dma_io_t)(void *opaque, uint8_t *buf, int len);
  26 +typedef void (*soc_dma_transfer_t)(struct soc_dma_ch_s *ch);
  27 +
  28 +enum soc_dma_port_type {
  29 + soc_dma_port_mem,
  30 + soc_dma_port_fifo,
  31 + soc_dma_port_other,
  32 +};
  33 +
  34 +enum soc_dma_access_type {
  35 + soc_dma_access_const,
  36 + soc_dma_access_linear,
  37 + soc_dma_access_other,
  38 +};
  39 +
  40 +struct soc_dma_ch_s {
  41 + /* Private */
  42 + struct soc_dma_s *dma;
  43 + int num;
  44 + QEMUTimer *timer;
  45 +
  46 + /* Set by soc_dma.c */
  47 + int enable;
  48 + int update;
  49 +
  50 + /* This should be set by dma->setup_fn(). */
  51 + int bytes;
  52 + /* Initialised by the DMA module, call soc_dma_ch_update after writing. */
  53 + enum soc_dma_access_type type[2];
  54 + target_phys_addr_t vaddr[2]; /* Updated by .transfer_fn(). */
  55 + /* Private */
  56 + void *paddr[2];
  57 + soc_dma_io_t io_fn[2];
  58 + void *io_opaque[2];
  59 +
  60 + int running;
  61 + soc_dma_transfer_t transfer_fn;
  62 +
  63 + /* Set and used by the DMA module. */
  64 + void *opaque;
  65 +};
  66 +
  67 +struct soc_dma_s {
  68 + /* Following fields are set by the SoC DMA module and can be used
  69 + * by anybody. */
  70 + uint64_t drqbmp; /* Is zeroed by soc_dma_reset() */
  71 + qemu_irq *drq;
  72 + void *opaque;
  73 + int64_t freq;
  74 + soc_dma_transfer_t transfer_fn;
  75 + soc_dma_transfer_t setup_fn;
  76 + /* Set by soc_dma_init() for use by the DMA module. */
  77 + struct soc_dma_ch_s *ch;
  78 +};
  79 +
  80 +/* Call to activate or stop a DMA channel. */
  81 +void soc_dma_set_request(struct soc_dma_ch_s *ch, int level);
  82 +/* Call after every write to one of the following fields and before
  83 + * calling soc_dma_set_request(ch, 1):
  84 + * ch->type[0...1],
  85 + * ch->vaddr[0...1],
  86 + * ch->paddr[0...1],
  87 + * or after a soc_dma_port_add_fifo() or soc_dma_port_add_mem(). */
  88 +void soc_dma_ch_update(struct soc_dma_ch_s *ch);
  89 +
  90 +/* The SoC should call this when the DMA module is being reset. */
  91 +void soc_dma_reset(struct soc_dma_s *s);
  92 +struct soc_dma_s *soc_dma_init(int n);
  93 +
  94 +void soc_dma_port_add_fifo(struct soc_dma_s *dma, target_phys_addr_t virt_base,
  95 + soc_dma_io_t fn, void *opaque, int out);
  96 +void soc_dma_port_add_mem(struct soc_dma_s *dma, uint8_t *phys_base,
  97 + target_phys_addr_t virt_base, size_t size);
  98 +
  99 +static inline void soc_dma_port_add_fifo_in(struct soc_dma_s *dma,
  100 + target_phys_addr_t virt_base, soc_dma_io_t fn, void *opaque)
  101 +{
  102 + return soc_dma_port_add_fifo(dma, virt_base, fn, opaque, 0);
  103 +}
  104 +
  105 +static inline void soc_dma_port_add_fifo_out(struct soc_dma_s *dma,
  106 + target_phys_addr_t virt_base, soc_dma_io_t fn, void *opaque)
  107 +{
  108 + return soc_dma_port_add_fifo(dma, virt_base, fn, opaque, 1);
  109 +}
  110 +
  111 +static inline void soc_dma_port_add_mem_ram(struct soc_dma_s *dma,
  112 + ram_addr_t offset, target_phys_addr_t virt_base, size_t size)
  113 +{
  114 + return soc_dma_port_add_mem(dma, phys_ram_base + offset, virt_base, size);
  115 +}
... ...