Commit b5ff1b3127119aa430a6fd309591d584803b7b6e

Authored by bellard
1 parent 0e43e99c

ARM system emulation (Paul Brook)


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1661 c046a42c-6fe2-441c-8c8c-71466251a162
Changelog
1 1 version 0.7.3:
2 2  
  3 + - ARM system emulation: Arm Integrator/CP board with an arm1026ej-s
  4 + cpu (Paul Brook)
  5 + - SMP support
3 6 - Mac OS X cocoa improvements (Mike Kronenberg)
4 7 - Mac OS X CoreAudio driver (Mike Kronenberg)
5 8 - DirectSound driver (malc)
... ... @@ -10,7 +13,6 @@ version 0.7.3:
10 13 - Linux host serial port access
11 14 - Linux host low level parallel port access
12 15 - New network emulation code supporting VLANs.
13   - - SMP support
14 16  
15 17 version 0.7.2:
16 18  
... ...
Makefile.target
... ... @@ -211,7 +211,7 @@ LIBOBJS+= op_helper.o helper.o
211 211 endif
212 212  
213 213 ifeq ($(TARGET_BASE_ARCH), arm)
214   -LIBOBJS+= op_helper.o
  214 +LIBOBJS+= op_helper.o helper.o
215 215 endif
216 216  
217 217 # NOTE: the disassembler code is only needed for debugging
... ... @@ -324,6 +324,9 @@ VL_OBJS+= sun4m.o tcx.o lance.o iommu.o m48t59.o magic-load.o slavio_intctl.o
324 324 VL_OBJS+= slavio_timer.o slavio_serial.o slavio_misc.o fdc.o esp.o
325 325 endif
326 326 endif
  327 +ifeq ($(TARGET_BASE_ARCH), arm)
  328 +VL_OBJS+= integratorcp.o ps2.o
  329 +endif
327 330 ifdef CONFIG_GDBSTUB
328 331 VL_OBJS+=gdbstub.o
329 332 endif
... ...
configure
... ... @@ -227,7 +227,7 @@ fi
227 227  
228 228 if test -z "$target_list" ; then
229 229 # these targets are portable
230   - target_list="i386-softmmu ppc-softmmu sparc-softmmu x86_64-softmmu mips-softmmu"
  230 + target_list="i386-softmmu ppc-softmmu sparc-softmmu x86_64-softmmu mips-softmmu arm-softmmu"
231 231 # the following are Linux specific
232 232 if [ "$linux" = "yes" ] ; then
233 233 target_list="i386-user arm-user armeb-user sparc-user ppc-user $target_list"
... ...
cpu-exec.c
... ... @@ -172,7 +172,9 @@ static inline TranslationBlock *tb_find_fast(void)
172 172 pc = cs_base + env->eip;
173 173 #elif defined(TARGET_ARM)
174 174 flags = env->thumb | (env->vfp.vec_len << 1)
175   - | (env->vfp.vec_stride << 4);
  175 + | (env->vfp.vec_stride << 4);
  176 + if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
  177 + flags |= (1 << 6);
176 178 cs_base = 0;
177 179 pc = env->regs[15];
178 180 #elif defined(TARGET_SPARC)
... ... @@ -322,15 +324,6 @@ int cpu_exec(CPUState *env1)
322 324 CC_OP = CC_OP_EFLAGS;
323 325 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
324 326 #elif defined(TARGET_ARM)
325   - {
326   - unsigned int psr;
327   - psr = env->cpsr;
328   - env->CF = (psr >> 29) & 1;
329   - env->NZF = (psr & 0xc0000000) ^ 0x40000000;
330   - env->VF = (psr << 3) & 0x80000000;
331   - env->QF = (psr >> 27) & 1;
332   - env->cpsr = psr & ~CACHED_CPSR_BITS;
333   - }
334 327 #elif defined(TARGET_SPARC)
335 328 #if defined(reg_REGWPTR)
336 329 saved_regwptr = REGWPTR;
... ... @@ -379,6 +372,8 @@ int cpu_exec(CPUState *env1)
379 372 do_interrupt(env);
380 373 #elif defined(TARGET_SPARC)
381 374 do_interrupt(env->exception_index);
  375 +#elif defined(TARGET_ARM)
  376 + do_interrupt(env);
382 377 #endif
383 378 }
384 379 env->exception_index = -1;
... ... @@ -508,8 +503,19 @@ int cpu_exec(CPUState *env1)
508 503 //do_interrupt(0, 0, 0, 0, 0);
509 504 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
510 505 }
  506 +#elif defined(TARGET_ARM)
  507 + if (interrupt_request & CPU_INTERRUPT_FIQ
  508 + && !(env->uncached_cpsr & CPSR_F)) {
  509 + env->exception_index = EXCP_FIQ;
  510 + do_interrupt(env);
  511 + }
  512 + if (interrupt_request & CPU_INTERRUPT_HARD
  513 + && !(env->uncached_cpsr & CPSR_I)) {
  514 + env->exception_index = EXCP_IRQ;
  515 + do_interrupt(env);
  516 + }
511 517 #endif
512   - if (interrupt_request & CPU_INTERRUPT_EXITTB) {
  518 + if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
513 519 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
514 520 /* ensure that no TB jump will be modified as
515 521 the program flow was changed */
... ... @@ -526,7 +532,7 @@ int cpu_exec(CPUState *env1)
526 532 }
527 533 }
528 534 #ifdef DEBUG_EXEC
529   - if ((loglevel & CPU_LOG_EXEC)) {
  535 + if ((loglevel & CPU_LOG_TB_CPU)) {
530 536 #if defined(TARGET_I386)
531 537 /* restore flags in standard format */
532 538 #ifdef reg_EAX
... ... @@ -557,9 +563,7 @@ int cpu_exec(CPUState *env1)
557 563 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
558 564 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
559 565 #elif defined(TARGET_ARM)
560   - env->cpsr = compute_cpsr();
561 566 cpu_dump_state(env, logfile, fprintf, 0);
562   - env->cpsr &= ~CACHED_CPSR_BITS;
563 567 #elif defined(TARGET_SPARC)
564 568 REGWPTR = env->regbase + (env->cwp * 16);
565 569 env->regwptr = REGWPTR;
... ... @@ -760,7 +764,6 @@ int cpu_exec(CPUState *env1)
760 764 EDI = saved_EDI;
761 765 #endif
762 766 #elif defined(TARGET_ARM)
763   - env->cpsr = compute_cpsr();
764 767 /* XXX: Save/restore host fpu exception state?. */
765 768 #elif defined(TARGET_SPARC)
766 769 #if defined(reg_REGWPTR)
... ...
exec-all.h
... ... @@ -549,8 +549,10 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
549 549 is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM);
550 550 #elif defined (TARGET_SPARC)
551 551 is_user = (env->psrs == 0);
  552 +#elif defined (TARGET_ARM)
  553 + is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR);
552 554 #else
553   -#error "Unimplemented !"
  555 +#error unimplemented CPU
554 556 #endif
555 557 if (__builtin_expect(env->tlb_read[is_user][index].address !=
556 558 (addr & TARGET_PAGE_MASK), 0)) {
... ...
... ... @@ -1868,14 +1868,14 @@ int cpu_register_io_memory(int io_index,
1868 1868 int i;
1869 1869  
1870 1870 if (io_index <= 0) {
1871   - if (io_index >= IO_MEM_NB_ENTRIES)
  1871 + if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1872 1872 return -1;
1873 1873 io_index = io_mem_nb++;
1874 1874 } else {
1875 1875 if (io_index >= IO_MEM_NB_ENTRIES)
1876 1876 return -1;
1877 1877 }
1878   -
  1878 +
1879 1879 for(i = 0;i < 3; i++) {
1880 1880 io_mem_read[io_index][i] = mem_read[i];
1881 1881 io_mem_write[io_index][i] = mem_write[i];
... ...
gdbstub.c
... ... @@ -399,7 +399,7 @@ static int cpu_gdb_read_registers(CPUState *env, uint8_t *mem_buf)
399 399 memset (ptr, 0, 8 * 12 + 4);
400 400 ptr += 8 * 12 + 4;
401 401 /* CPSR (4 bytes). */
402   - *(uint32_t *)ptr = tswapl (env->cpsr);
  402 + *(uint32_t *)ptr = tswapl (cpsr_read(env));
403 403 ptr += 4;
404 404  
405 405 return ptr - mem_buf;
... ... @@ -419,7 +419,7 @@ static void cpu_gdb_write_registers(CPUState *env, uint8_t *mem_buf, int size)
419 419 }
420 420 /* Ignore FPA regs and scr. */
421 421 ptr += 8 * 12 + 4;
422   - env->cpsr = tswapl(*(uint32_t *)ptr);
  422 + cpsr_write (env, tswapl(*(uint32_t *)ptr), 0xffffffff);
423 423 }
424 424 #else
425 425 static int cpu_gdb_read_registers(CPUState *env, uint8_t *mem_buf)
... ... @@ -463,6 +463,8 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf)
463 463 #elif defined (TARGET_SPARC)
464 464 env->pc = addr;
465 465 env->npc = addr + 4;
  466 +#elif defined (TARGET_ARM)
  467 + env->regs[15] = addr;
466 468 #endif
467 469 }
468 470 #ifdef CONFIG_USER_ONLY
... ... @@ -481,6 +483,8 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf)
481 483 #elif defined (TARGET_SPARC)
482 484 env->pc = addr;
483 485 env->npc = addr + 4;
  486 +#elif defined (TARGET_ARM)
  487 + env->regs[15] = addr;
484 488 #endif
485 489 }
486 490 cpu_single_step(env, 1);
... ...
hw/integratorcp.c 0 โ†’ 100644
  1 +/*
  2 + * ARM Integrator CP System emulation.
  3 + *
  4 + * Copyright (c) 2005 CodeSourcery, LLC.
  5 + * Written by Paul Brook
  6 + *
  7 + * This code is licenced under the GPL
  8 + */
  9 +
  10 +#include <vl.h>
  11 +
  12 +#define KERNEL_ARGS_ADDR 0x100
  13 +#define KERNEL_LOAD_ADDR 0x00010000
  14 +#define INITRD_LOAD_ADDR 0x00800000
  15 +
  16 +/* Stub functions for hardware that doesn't exist. */
  17 +void pic_set_irq(int irq, int level)
  18 +{
  19 + cpu_abort (cpu_single_env, "pic_set_irq");
  20 +}
  21 +
  22 +void pic_info(void)
  23 +{
  24 +}
  25 +
  26 +void irq_info(void)
  27 +{
  28 +}
  29 +
  30 +void vga_update_display(void)
  31 +{
  32 +}
  33 +
  34 +void vga_screen_dump(const char *filename)
  35 +{
  36 +}
  37 +
  38 +void vga_invalidate_display(void)
  39 +{
  40 +}
  41 +
  42 +void DMA_run (void)
  43 +{
  44 +}
  45 +
  46 +typedef struct {
  47 + uint32_t flash_offset;
  48 + uint32_t cm_osc;
  49 + uint32_t cm_ctrl;
  50 + uint32_t cm_lock;
  51 + uint32_t cm_auxosc;
  52 + uint32_t cm_sdram;
  53 + uint32_t cm_init;
  54 + uint32_t cm_flags;
  55 + uint32_t cm_nvflags;
  56 + uint32_t int_level;
  57 + uint32_t irq_enabled;
  58 + uint32_t fiq_enabled;
  59 +} integratorcm_state;
  60 +
  61 +static uint8_t integrator_spd[128] = {
  62 + 128, 8, 4, 11, 9, 1, 64, 0, 2, 0xa0, 0xa0, 0, 0, 8, 0, 1,
  63 + 0xe, 4, 0x1c, 1, 2, 0x20, 0xc0, 0, 0, 0, 0, 0x30, 0x28, 0x30, 0x28, 0x40
  64 +};
  65 +
  66 +static uint32_t integratorcm_read(void *opaque, target_phys_addr_t offset)
  67 +{
  68 + integratorcm_state *s = (integratorcm_state *)opaque;
  69 + offset -= 0x10000000;
  70 + if (offset >= 0x100 && offset < 0x200) {
  71 + /* CM_SPD */
  72 + if (offset >= 0x180)
  73 + return 0;
  74 + return integrator_spd[offset >> 2];
  75 + }
  76 + switch (offset >> 2) {
  77 + case 0: /* CM_ID */
  78 + return 0x411a3001;
  79 + case 1: /* CM_PROC */
  80 + return 0;
  81 + case 2: /* CM_OSC */
  82 + return s->cm_osc;
  83 + case 3: /* CM_CTRL */
  84 + return s->cm_ctrl;
  85 + case 4: /* CM_STAT */
  86 + return 0x00100000;
  87 + case 5: /* CM_LOCK */
  88 + if (s->cm_lock == 0xa05f) {
  89 + return 0x1a05f;
  90 + } else {
  91 + return s->cm_lock;
  92 + }
  93 + case 6: /* CM_LMBUSCNT */
  94 + /* ??? High frequency timer. */
  95 + cpu_abort(cpu_single_env, "integratorcm_read: CM_LMBUSCNT");
  96 + case 7: /* CM_AUXOSC */
  97 + return s->cm_auxosc;
  98 + case 8: /* CM_SDRAM */
  99 + return s->cm_sdram;
  100 + case 9: /* CM_INIT */
  101 + return s->cm_init;
  102 + case 10: /* CM_REFCT */
  103 + /* ??? High frequency timer. */
  104 + cpu_abort(cpu_single_env, "integratorcm_read: CM_REFCT");
  105 + case 12: /* CM_FLAGS */
  106 + return s->cm_flags;
  107 + case 14: /* CM_NVFLAGS */
  108 + return s->cm_nvflags;
  109 + case 16: /* CM_IRQ_STAT */
  110 + return s->int_level & s->irq_enabled;
  111 + case 17: /* CM_IRQ_RSTAT */
  112 + return s->int_level;
  113 + case 18: /* CM_IRQ_ENSET */
  114 + return s->irq_enabled;
  115 + case 20: /* CM_SOFT_INTSET */
  116 + return s->int_level & 1;
  117 + case 24: /* CM_FIQ_STAT */
  118 + return s->int_level & s->fiq_enabled;
  119 + case 25: /* CM_FIQ_RSTAT */
  120 + return s->int_level;
  121 + case 26: /* CM_FIQ_ENSET */
  122 + return s->fiq_enabled;
  123 + case 32: /* CM_VOLTAGE_CTL0 */
  124 + case 33: /* CM_VOLTAGE_CTL1 */
  125 + case 34: /* CM_VOLTAGE_CTL2 */
  126 + case 35: /* CM_VOLTAGE_CTL3 */
  127 + /* ??? Voltage control unimplemented. */
  128 + return 0;
  129 + default:
  130 + cpu_abort (cpu_single_env,
  131 + "integratorcm_read: Unimplemented offset 0x%x\n", offset);
  132 + return 0;
  133 + }
  134 +}
  135 +
  136 +static void integratorcm_do_remap(integratorcm_state *s, int flash)
  137 +{
  138 + if (flash) {
  139 + cpu_register_physical_memory(0, 0x100000, IO_MEM_RAM);
  140 + } else {
  141 + cpu_register_physical_memory(0, 0x100000, s->flash_offset | IO_MEM_RAM);
  142 + }
  143 + //??? tlb_flush (cpu_single_env, 1);
  144 +}
  145 +
  146 +static void integratorcm_set_ctrl(integratorcm_state *s, uint32_t value)
  147 +{
  148 + if (value & 8) {
  149 + cpu_abort(cpu_single_env, "Board reset\n");
  150 + }
  151 + if ((s->cm_init ^ value) & 4) {
  152 + integratorcm_do_remap(s, (value & 4) == 0);
  153 + }
  154 + if ((s->cm_init ^ value) & 1) {
  155 + printf("Green LED %s\n", (value & 1) ? "on" : "off");
  156 + }
  157 + s->cm_init = (s->cm_init & ~ 5) | (value ^ 5);
  158 +}
  159 +
  160 +static void integratorcm_update(integratorcm_state *s)
  161 +{
  162 + /* ??? The CPU irq/fiq is raised when either the core module or base PIC
  163 + are active. */
  164 + if (s->int_level & (s->irq_enabled | s->fiq_enabled))
  165 + cpu_abort(cpu_single_env, "Core module interrupt\n");
  166 +}
  167 +
  168 +static void integratorcm_write(void *opaque, target_phys_addr_t offset,
  169 + uint32_t value)
  170 +{
  171 + integratorcm_state *s = (integratorcm_state *)opaque;
  172 + offset -= 0x10000000;
  173 + switch (offset >> 2) {
  174 + case 2: /* CM_OSC */
  175 + if (s->cm_lock == 0xa05f)
  176 + s->cm_osc = value;
  177 + break;
  178 + case 3: /* CM_CTRL */
  179 + integratorcm_set_ctrl(s, value);
  180 + break;
  181 + case 5: /* CM_LOCK */
  182 + s->cm_lock = value & 0xffff;
  183 + break;
  184 + case 7: /* CM_AUXOSC */
  185 + if (s->cm_lock == 0xa05f)
  186 + s->cm_auxosc = value;
  187 + break;
  188 + case 8: /* CM_SDRAM */
  189 + s->cm_sdram = value;
  190 + break;
  191 + case 9: /* CM_INIT */
  192 + /* ??? This can change the memory bus frequency. */
  193 + s->cm_init = value;
  194 + break;
  195 + case 12: /* CM_FLAGSS */
  196 + s->cm_flags |= value;
  197 + break;
  198 + case 13: /* CM_FLAGSC */
  199 + s->cm_flags &= ~value;
  200 + break;
  201 + case 14: /* CM_NVFLAGSS */
  202 + s->cm_nvflags |= value;
  203 + break;
  204 + case 15: /* CM_NVFLAGSS */
  205 + s->cm_nvflags &= ~value;
  206 + break;
  207 + case 18: /* CM_IRQ_ENSET */
  208 + s->irq_enabled |= value;
  209 + integratorcm_update(s);
  210 + break;
  211 + case 19: /* CM_IRQ_ENCLR */
  212 + s->irq_enabled &= ~value;
  213 + integratorcm_update(s);
  214 + break;
  215 + case 20: /* CM_SOFT_INTSET */
  216 + s->int_level |= (value & 1);
  217 + integratorcm_update(s);
  218 + break;
  219 + case 21: /* CM_SOFT_INTCLR */
  220 + s->int_level &= ~(value & 1);
  221 + integratorcm_update(s);
  222 + break;
  223 + case 26: /* CM_FIQ_ENSET */
  224 + s->fiq_enabled |= value;
  225 + integratorcm_update(s);
  226 + break;
  227 + case 27: /* CM_FIQ_ENCLR */
  228 + s->fiq_enabled &= ~value;
  229 + integratorcm_update(s);
  230 + break;
  231 + case 32: /* CM_VOLTAGE_CTL0 */
  232 + case 33: /* CM_VOLTAGE_CTL1 */
  233 + case 34: /* CM_VOLTAGE_CTL2 */
  234 + case 35: /* CM_VOLTAGE_CTL3 */
  235 + /* ??? Voltage control unimplemented. */
  236 + break;
  237 + default:
  238 + cpu_abort (cpu_single_env,
  239 + "integratorcm_write: Unimplemented offset 0x%x\n", offset);
  240 + break;
  241 + }
  242 +}
  243 +
  244 +/* Integrator/CM control registers. */
  245 +
  246 +static CPUReadMemoryFunc *integratorcm_readfn[] = {
  247 + integratorcm_read,
  248 + integratorcm_read,
  249 + integratorcm_read
  250 +};
  251 +
  252 +static CPUWriteMemoryFunc *integratorcm_writefn[] = {
  253 + integratorcm_write,
  254 + integratorcm_write,
  255 + integratorcm_write
  256 +};
  257 +
  258 +static void integratorcm_init(int memsz, uint32_t flash_offset)
  259 +{
  260 + int iomemtype;
  261 + integratorcm_state *s;
  262 +
  263 + s = (integratorcm_state *)qemu_mallocz(sizeof(integratorcm_state));
  264 + s->cm_osc = 0x01000048;
  265 + /* ??? What should the high bits of this value be? */
  266 + s->cm_auxosc = 0x0007feff;
  267 + s->cm_sdram = 0x00011122;
  268 + if (memsz >= 256) {
  269 + integrator_spd[31] = 64;
  270 + s->cm_sdram |= 0x10;
  271 + } else if (memsz >= 128) {
  272 + integrator_spd[31] = 32;
  273 + s->cm_sdram |= 0x0c;
  274 + } else if (memsz >= 64) {
  275 + integrator_spd[31] = 16;
  276 + s->cm_sdram |= 0x08;
  277 + } else if (memsz >= 32) {
  278 + integrator_spd[31] = 4;
  279 + s->cm_sdram |= 0x04;
  280 + } else {
  281 + integrator_spd[31] = 2;
  282 + }
  283 + memcpy(integrator_spd + 73, "QEMU-MEMORY", 11);
  284 + s->cm_init = 0x00000112;
  285 + s->flash_offset = flash_offset;
  286 +
  287 + iomemtype = cpu_register_io_memory(0, integratorcm_readfn,
  288 + integratorcm_writefn, s);
  289 + cpu_register_physical_memory(0x10000000, 0x007fffff, iomemtype);
  290 + integratorcm_do_remap(s, 1);
  291 + /* ??? Save/restore. */
  292 +}
  293 +
  294 +/* Integrator/CP hardware emulation. */
  295 +/* Primary interrupt controller. */
  296 +
  297 +typedef struct icp_pic_state
  298 +{
  299 + uint32_t base;
  300 + uint32_t level;
  301 + uint32_t irq_enabled;
  302 + uint32_t fiq_enabled;
  303 + void *parent;
  304 + /* -1 if parent is a cpu, otherwise IRQ number on parent PIC. */
  305 + int parent_irq;
  306 +} icp_pic_state;
  307 +
  308 +static void icp_pic_set_level(icp_pic_state *, int, int);
  309 +
  310 +static void icp_pic_update(icp_pic_state *s)
  311 +{
  312 + CPUState *env;
  313 + if (s->parent_irq != -1) {
  314 + uint32_t flags;
  315 +
  316 + flags = (s->level & s->irq_enabled);
  317 + icp_pic_set_level((icp_pic_state *)s->parent, s->parent_irq,
  318 + flags != 0);
  319 + return;
  320 + }
  321 + /* Raise CPU interrupt. */
  322 + env = (CPUState *)s->parent;
  323 + if (s->level & s->fiq_enabled) {
  324 + cpu_interrupt (env, CPU_INTERRUPT_FIQ);
  325 + } else {
  326 + cpu_reset_interrupt (env, CPU_INTERRUPT_FIQ);
  327 + }
  328 + if (s->level & s->irq_enabled) {
  329 + cpu_interrupt (env, CPU_INTERRUPT_HARD);
  330 + } else {
  331 + cpu_reset_interrupt (env, CPU_INTERRUPT_HARD);
  332 + }
  333 +}
  334 +
  335 +static void icp_pic_set_level(icp_pic_state *s, int n, int level)
  336 +{
  337 + if (level)
  338 + s->level |= 1 << n;
  339 + else
  340 + s->level &= ~(1 << n);
  341 + icp_pic_update(s);
  342 +}
  343 +
  344 +static uint32_t icp_pic_read(void *opaque, target_phys_addr_t offset)
  345 +{
  346 + icp_pic_state *s = (icp_pic_state *)opaque;
  347 +
  348 + offset -= s->base;
  349 + switch (offset >> 2) {
  350 + case 0: /* IRQ_STATUS */
  351 + return s->level & s->irq_enabled;
  352 + case 1: /* IRQ_RAWSTAT */
  353 + return s->level;
  354 + case 2: /* IRQ_ENABLESET */
  355 + return s->irq_enabled;
  356 + case 4: /* INT_SOFTSET */
  357 + return s->level & 1;
  358 + case 8: /* FRQ_STATUS */
  359 + return s->level & s->fiq_enabled;
  360 + case 9: /* FRQ_RAWSTAT */
  361 + return s->level;
  362 + case 10: /* FRQ_ENABLESET */
  363 + return s->fiq_enabled;
  364 + case 3: /* IRQ_ENABLECLR */
  365 + case 5: /* INT_SOFTCLR */
  366 + case 11: /* FRQ_ENABLECLR */
  367 + default:
  368 + printf ("icp_pic_read: Bad register offset 0x%x\n", offset);
  369 + return 0;
  370 + }
  371 +}
  372 +
  373 +static void icp_pic_write(void *opaque, target_phys_addr_t offset,
  374 + uint32_t value)
  375 +{
  376 + icp_pic_state *s = (icp_pic_state *)opaque;
  377 + offset -= s->base;
  378 +
  379 + switch (offset >> 2) {
  380 + case 2: /* IRQ_ENABLESET */
  381 + s->irq_enabled |= value;
  382 + break;
  383 + case 3: /* IRQ_ENABLECLR */
  384 + s->irq_enabled &= ~value;
  385 + break;
  386 + case 4: /* INT_SOFTSET */
  387 + if (value & 1)
  388 + icp_pic_set_level(s, 0, 1);
  389 + break;
  390 + case 5: /* INT_SOFTCLR */
  391 + if (value & 1)
  392 + icp_pic_set_level(s, 0, 0);
  393 + break;
  394 + case 10: /* FRQ_ENABLESET */
  395 + s->fiq_enabled |= value;
  396 + break;
  397 + case 11: /* FRQ_ENABLECLR */
  398 + s->fiq_enabled &= ~value;
  399 + break;
  400 + case 0: /* IRQ_STATUS */
  401 + case 1: /* IRQ_RAWSTAT */
  402 + case 8: /* FRQ_STATUS */
  403 + case 9: /* FRQ_RAWSTAT */
  404 + default:
  405 + printf ("icp_pic_write: Bad register offset 0x%x\n", offset);
  406 + return;
  407 + }
  408 + icp_pic_update(s);
  409 +}
  410 +
  411 +static CPUReadMemoryFunc *icp_pic_readfn[] = {
  412 + icp_pic_read,
  413 + icp_pic_read,
  414 + icp_pic_read
  415 +};
  416 +
  417 +static CPUWriteMemoryFunc *icp_pic_writefn[] = {
  418 + icp_pic_write,
  419 + icp_pic_write,
  420 + icp_pic_write
  421 +};
  422 +
  423 +static icp_pic_state *icp_pic_init(uint32_t base, void *parent,
  424 + int parent_irq)
  425 +{
  426 + icp_pic_state *s;
  427 + int iomemtype;
  428 +
  429 + s = (icp_pic_state *)qemu_mallocz(sizeof(icp_pic_state));
  430 + if (!s)
  431 + return NULL;
  432 +
  433 + s->base = base;
  434 + s->parent = parent;
  435 + s->parent_irq = parent_irq;
  436 + iomemtype = cpu_register_io_memory(0, icp_pic_readfn,
  437 + icp_pic_writefn, s);
  438 + cpu_register_physical_memory(base, 0x007fffff, iomemtype);
  439 + /* ??? Save/restore. */
  440 + return s;
  441 +}
  442 +
  443 +/* Timers. */
  444 +
  445 +/* System bus clock speed (40MHz) for timer 0. Not sure about this value. */
  446 +#define ICP_BUS_FREQ 40000000
  447 +
  448 +typedef struct {
  449 + int64_t next_time;
  450 + int64_t expires[3];
  451 + int64_t loaded[3];
  452 + QEMUTimer *timer;
  453 + icp_pic_state *pic;
  454 + uint32_t base;
  455 + uint32_t control[3];
  456 + uint32_t count[3];
  457 + uint32_t limit[3];
  458 + int freq[3];
  459 + int int_level[3];
  460 +} icp_pit_state;
  461 +
  462 +/* Calculate the new expiry time of the given timer. */
  463 +
  464 +static void icp_pit_reload(icp_pit_state *s, int n)
  465 +{
  466 + int64_t delay;
  467 +
  468 + s->loaded[n] = s->expires[n];
  469 + delay = muldiv64(s->count[n], ticks_per_sec, s->freq[n]);
  470 + if (delay == 0)
  471 + delay = 1;
  472 + s->expires[n] += delay;
  473 +}
  474 +
  475 +/* Check all active timers, and schedule the next timer interrupt. */
  476 +
  477 +static void icp_pit_update(icp_pit_state *s, int64_t now)
  478 +{
  479 + int n;
  480 + int64_t next;
  481 +
  482 + next = now;
  483 + for (n = 0; n < 3; n++) {
  484 + /* Ignore disabled timers. */
  485 + if ((s->control[n] & 0x80) == 0)
  486 + continue;
  487 + /* Ignore expired one-shot timers. */
  488 + if (s->count[n] == 0 && s->control[n] & 1)
  489 + continue;
  490 + if (s->expires[n] - now <= 0) {
  491 + /* Timer has expired. */
  492 + s->int_level[n] = 1;
  493 + if (s->control[n] & 1) {
  494 + /* One-shot. */
  495 + s->count[n] = 0;
  496 + } else {
  497 + if ((s->control[n] & 0x40) == 0) {
  498 + /* Free running. */
  499 + if (s->control[n] & 2)
  500 + s->count[n] = 0xffffffff;
  501 + else
  502 + s->count[n] = 0xffff;
  503 + } else {
  504 + /* Periodic. */
  505 + s->count[n] = s->limit[n];
  506 + }
  507 + }
  508 + }
  509 + while (s->expires[n] - now <= 0) {
  510 + icp_pit_reload(s, n);
  511 + }
  512 + }
  513 + /* Update interrupts. */
  514 + for (n = 0; n < 3; n++) {
  515 + if (s->int_level[n] && (s->control[n] & 0x20)) {
  516 + icp_pic_set_level(s->pic, 5 + n, 1);
  517 + } else {
  518 + icp_pic_set_level(s->pic, 5 + n, 0);
  519 + }
  520 + if (next - s->expires[n] < 0)
  521 + next = s->expires[n];
  522 + }
  523 + /* Schedule the next timer interrupt. */
  524 + if (next == now) {
  525 + qemu_del_timer(s->timer);
  526 + s->next_time = 0;
  527 + } else if (next != s->next_time) {
  528 + qemu_mod_timer(s->timer, next);
  529 + s->next_time = next;
  530 + }
  531 +}
  532 +
  533 +/* Return the current value of the timer. */
  534 +static uint32_t icp_pit_getcount(icp_pit_state *s, int n, int64_t now)
  535 +{
  536 + int64_t elapsed;
  537 + int64_t period;
  538 +
  539 + if (s->count[n] == 0)
  540 + return 0;
  541 + if ((s->control[n] & 0x80) == 0)
  542 + return s->count[n];
  543 + elapsed = now - s->loaded[n];
  544 + period = s->expires[n] - s->loaded[n];
  545 + /* If the timer should have expired then return 0. This can happen
  546 + when the host timer signal doesnt occur immediately. It's better to
  547 + have a timer appear to sit at zero for a while than have it wrap
  548 + around before the guest interrupt is raised. */
  549 + /* ??? Could we trigger the interrupt here? */
  550 + if (elapsed > period)
  551 + return 0;
  552 + /* We need to calculate count * elapsed / period without overfowing.
  553 + Scale both elapsed and period so they fit in a 32-bit int. */
  554 + while (period != (int32_t)period) {
  555 + period >>= 1;
  556 + elapsed >>= 1;
  557 + }
  558 + return ((uint64_t)s->count[n] * (uint64_t)(int32_t)elapsed)
  559 + / (int32_t)period;
  560 +}
  561 +
  562 +static uint32_t icp_pit_read(void *opaque, target_phys_addr_t offset)
  563 +{
  564 + int n;
  565 + icp_pit_state *s = (icp_pit_state *)opaque;
  566 +
  567 + offset -= s->base;
  568 + n = offset >> 8;
  569 + if (n > 2)
  570 + cpu_abort (cpu_single_env, "icp_pit_read: Bad timer %x\n", offset);
  571 + switch ((offset & 0xff) >> 2) {
  572 + case 0: /* TimerLoad */
  573 + case 6: /* TimerBGLoad */
  574 + return s->limit[n];
  575 + case 1: /* TimerValue */
  576 + return icp_pit_getcount(s, n, qemu_get_clock(vm_clock));
  577 + case 2: /* TimerControl */
  578 + return s->control[n];
  579 + case 4: /* TimerRIS */
  580 + return s->int_level[n];
  581 + case 5: /* TimerMIS */
  582 + if ((s->control[n] & 0x20) == 0)
  583 + return 0;
  584 + return s->int_level[n];
  585 + default:
  586 + cpu_abort (cpu_single_env, "icp_pit_read: Bad offset %x\n", offset);
  587 + return 0;
  588 + }
  589 +}
  590 +
  591 +static void icp_pit_write(void *opaque, target_phys_addr_t offset,
  592 + uint32_t value)
  593 +{
  594 + icp_pit_state *s = (icp_pit_state *)opaque;
  595 + int n;
  596 + int64_t now;
  597 +
  598 + now = qemu_get_clock(vm_clock);
  599 + offset -= s->base;
  600 + n = offset >> 8;
  601 + if (n > 2)
  602 + cpu_abort (cpu_single_env, "icp_pit_write: Bad offset %x\n", offset);
  603 +
  604 + switch ((offset & 0xff) >> 2) {
  605 + case 0: /* TimerLoad */
  606 + s->limit[n] = value;
  607 + s->count[n] = value;
  608 + s->expires[n] = now;
  609 + icp_pit_reload(s, n);
  610 + break;
  611 + case 1: /* TimerValue */
  612 + /* ??? Linux seems to want to write to this readonly register.
  613 + Ignore it. */
  614 + break;
  615 + case 2: /* TimerControl */
  616 + if (s->control[n] & 0x80) {
  617 + /* Pause the timer if it is running. This may cause some
  618 + inaccuracy dure to rounding, but avoids a whole lot of other
  619 + messyness. */
  620 + s->count[n] = icp_pit_getcount(s, n, now);
  621 + }
  622 + s->control[n] = value;
  623 + if (n == 0)
  624 + s->freq[n] = ICP_BUS_FREQ;
  625 + else
  626 + s->freq[n] = 1000000;
  627 + /* ??? Need to recalculate expiry time after changing divisor. */
  628 + switch ((value >> 2) & 3) {
  629 + case 1: s->freq[n] >>= 4; break;
  630 + case 2: s->freq[n] >>= 8; break;
  631 + }
  632 + if (s->control[n] & 0x80) {
  633 + /* Restart the timer if still enabled. */
  634 + s->expires[n] = now;
  635 + icp_pit_reload(s, n);
  636 + }
  637 + break;
  638 + case 3: /* TimerIntClr */
  639 + s->int_level[n] = 0;
  640 + break;
  641 + case 6: /* TimerBGLoad */
  642 + s->limit[n] = value;
  643 + break;
  644 + default:
  645 + cpu_abort (cpu_single_env, "icp_pit_write: Bad offset %x\n", offset);
  646 + }
  647 + icp_pit_update(s, now);
  648 +}
  649 +
  650 +static void icp_pit_tick(void *opaque)
  651 +{
  652 + int64_t now;
  653 +
  654 + now = qemu_get_clock(vm_clock);
  655 + icp_pit_update((icp_pit_state *)opaque, now);
  656 +}
  657 +
  658 +static CPUReadMemoryFunc *icp_pit_readfn[] = {
  659 + icp_pit_read,
  660 + icp_pit_read,
  661 + icp_pit_read
  662 +};
  663 +
  664 +static CPUWriteMemoryFunc *icp_pit_writefn[] = {
  665 + icp_pit_write,
  666 + icp_pit_write,
  667 + icp_pit_write
  668 +};
  669 +
  670 +static void icp_pit_init(uint32_t base, icp_pic_state *pic)
  671 +{
  672 + int iomemtype;
  673 + icp_pit_state *s;
  674 + int n;
  675 +
  676 + s = (icp_pit_state *)qemu_mallocz(sizeof(icp_pit_state));
  677 + s->base = base;
  678 + s->pic = pic;
  679 + s->freq[0] = ICP_BUS_FREQ;
  680 + s->freq[1] = 1000000;
  681 + s->freq[2] = 1000000;
  682 + for (n = 0; n < 3; n++) {
  683 + s->control[n] = 0x20;
  684 + s->count[n] = 0xffffffff;
  685 + }
  686 +
  687 + iomemtype = cpu_register_io_memory(0, icp_pit_readfn,
  688 + icp_pit_writefn, s);
  689 + cpu_register_physical_memory(base, 0x007fffff, iomemtype);
  690 + s->timer = qemu_new_timer(vm_clock, icp_pit_tick, s);
  691 + /* ??? Save/restore. */
  692 +}
  693 +
  694 +/* ARM PrimeCell PL011 UART */
  695 +
  696 +typedef struct {
  697 + uint32_t base;
  698 + uint32_t readbuff;
  699 + uint32_t flags;
  700 + uint32_t lcr;
  701 + uint32_t cr;
  702 + uint32_t dmacr;
  703 + uint32_t int_enabled;
  704 + uint32_t int_level;
  705 + uint32_t read_fifo[16];
  706 + uint32_t ilpr;
  707 + uint32_t ibrd;
  708 + uint32_t fbrd;
  709 + uint32_t ifl;
  710 + int read_pos;
  711 + int read_count;
  712 + int read_trigger;
  713 + CharDriverState *chr;
  714 + icp_pic_state *pic;
  715 + int irq;
  716 +} pl011_state;
  717 +
  718 +#define PL011_INT_TX 0x20
  719 +#define PL011_INT_RX 0x10
  720 +
  721 +#define PL011_FLAG_TXFE 0x80
  722 +#define PL011_FLAG_RXFF 0x40
  723 +#define PL011_FLAG_TXFF 0x20
  724 +#define PL011_FLAG_RXFE 0x10
  725 +
  726 +static const unsigned char pl011_id[] =
  727 +{ 0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1 };
  728 +
  729 +static void pl011_update(pl011_state *s)
  730 +{
  731 + uint32_t flags;
  732 +
  733 + flags = s->int_level & s->int_enabled;
  734 + icp_pic_set_level(s->pic, s->irq, flags != 0);
  735 +}
  736 +
  737 +static uint32_t pl011_read(void *opaque, target_phys_addr_t offset)
  738 +{
  739 + pl011_state *s = (pl011_state *)opaque;
  740 + uint32_t c;
  741 +
  742 + offset -= s->base;
  743 + if (offset >= 0xfe0 && offset < 0x1000) {
  744 + return pl011_id[(offset - 0xfe0) >> 2];
  745 + }
  746 + switch (offset >> 2) {
  747 + case 0: /* UARTDR */
  748 + s->flags &= ~PL011_FLAG_RXFF;
  749 + c = s->read_fifo[s->read_pos];
  750 + if (s->read_count > 0) {
  751 + s->read_count--;
  752 + if (++s->read_pos == 16)
  753 + s->read_pos = 0;
  754 + }
  755 + if (s->read_count == 0) {
  756 + s->flags |= PL011_FLAG_RXFE;
  757 + }
  758 + if (s->read_count == s->read_trigger - 1)
  759 + s->int_level &= ~ PL011_INT_RX;
  760 + pl011_update(s);
  761 + return c;
  762 + case 1: /* UARTCR */
  763 + return 0;
  764 + case 6: /* UARTFR */
  765 + return s->flags;
  766 + case 8: /* UARTILPR */
  767 + return s->ilpr;
  768 + case 9: /* UARTIBRD */
  769 + return s->ibrd;
  770 + case 10: /* UARTFBRD */
  771 + return s->fbrd;
  772 + case 11: /* UARTLCR_H */
  773 + return s->lcr;
  774 + case 12: /* UARTCR */
  775 + return s->cr;
  776 + case 13: /* UARTIFLS */
  777 + return s->ifl;
  778 + case 14: /* UARTIMSC */
  779 + return s->int_enabled;
  780 + case 15: /* UARTRIS */
  781 + return s->int_level;
  782 + case 16: /* UARTMIS */
  783 + return s->int_level & s->int_enabled;
  784 + case 18: /* UARTDMACR */
  785 + return s->dmacr;
  786 + default:
  787 + cpu_abort (cpu_single_env, "pl011_read: Bad offset %x\n", offset);
  788 + return 0;
  789 + }
  790 +}
  791 +
  792 +static void pl011_set_read_trigger(pl011_state *s)
  793 +{
  794 +#if 0
  795 + /* The docs say the RX interrupt is triggered when the FIFO exceeds
  796 + the threshold. However linux only reads the FIFO in response to an
  797 + interrupt. Triggering the interrupt when the FIFO is non-empty seems
  798 + to make things work. */
  799 + if (s->lcr & 0x10)
  800 + s->read_trigger = (s->ifl >> 1) & 0x1c;
  801 + else
  802 +#endif
  803 + s->read_trigger = 1;
  804 +}
  805 +
  806 +static void pl011_write(void *opaque, target_phys_addr_t offset,
  807 + uint32_t value)
  808 +{
  809 + pl011_state *s = (pl011_state *)opaque;
  810 + unsigned char ch;
  811 +
  812 + offset -= s->base;
  813 + switch (offset >> 2) {
  814 + case 0: /* UARTDR */
  815 + /* ??? Check if transmitter is enabled. */
  816 + ch = value;
  817 + if (s->chr)
  818 + qemu_chr_write(s->chr, &ch, 1);
  819 + s->int_level |= PL011_INT_TX;
  820 + pl011_update(s);
  821 + break;
  822 + case 1: /* UARTCR */
  823 + s->cr = value;
  824 + break;
  825 + case 8: /* UARTUARTILPR */
  826 + s->ilpr = value;
  827 + break;
  828 + case 9: /* UARTIBRD */
  829 + s->ibrd = value;
  830 + break;
  831 + case 10: /* UARTFBRD */
  832 + s->fbrd = value;
  833 + break;
  834 + case 11: /* UARTLCR_H */
  835 + s->lcr = value;
  836 + pl011_set_read_trigger(s);
  837 + break;
  838 + case 12: /* UARTCR */
  839 + /* ??? Need to implement the enable and loopback bits. */
  840 + s->cr = value;
  841 + break;
  842 + case 13: /* UARTIFS */
  843 + s->ifl = value;
  844 + pl011_set_read_trigger(s);
  845 + break;
  846 + case 14: /* UARTIMSC */
  847 + s->int_enabled = value;
  848 + pl011_update(s);
  849 + break;
  850 + case 17: /* UARTICR */
  851 + s->int_level &= ~value;
  852 + pl011_update(s);
  853 + break;
  854 + case 18: /* UARTDMACR */
  855 + s->dmacr = value;
  856 + if (value & 3)
  857 + cpu_abort(cpu_single_env, "PL011: DMA not implemented\n");
  858 + break;
  859 + default:
  860 + cpu_abort (cpu_single_env, "pl011_write: Bad offset %x\n", offset);
  861 + }
  862 +}
  863 +
  864 +static int pl011_can_recieve(void *opaque)
  865 +{
  866 + pl011_state *s = (pl011_state *)opaque;
  867 +
  868 + if (s->lcr & 0x10)
  869 + return s->read_count < 16;
  870 + else
  871 + return s->read_count < 1;
  872 +}
  873 +
  874 +static void pl011_recieve(void *opaque, const uint8_t *buf, int size)
  875 +{
  876 + pl011_state *s = (pl011_state *)opaque;
  877 + int slot;
  878 +
  879 + slot = s->read_pos + s->read_count;
  880 + if (slot >= 16)
  881 + slot -= 16;
  882 + s->read_fifo[slot] = *buf;
  883 + s->read_count++;
  884 + s->flags &= ~PL011_FLAG_RXFE;
  885 + if (s->cr & 0x10 || s->read_count == 16) {
  886 + s->flags |= PL011_FLAG_RXFF;
  887 + }
  888 + if (s->read_count == s->read_trigger) {
  889 + s->int_level |= PL011_INT_RX;
  890 + pl011_update(s);
  891 + }
  892 +}
  893 +
  894 +static void pl011_event(void *opaque, int event)
  895 +{
  896 + /* ??? Should probably implement break. */
  897 +}
  898 +
  899 +static CPUReadMemoryFunc *pl011_readfn[] = {
  900 + pl011_read,
  901 + pl011_read,
  902 + pl011_read
  903 +};
  904 +
  905 +static CPUWriteMemoryFunc *pl011_writefn[] = {
  906 + pl011_write,
  907 + pl011_write,
  908 + pl011_write
  909 +};
  910 +
  911 +static void pl011_init(uint32_t base, icp_pic_state *pic, int irq,
  912 + CharDriverState *chr)
  913 +{
  914 + int iomemtype;
  915 + pl011_state *s;
  916 +
  917 + s = (pl011_state *)qemu_mallocz(sizeof(pl011_state));
  918 + iomemtype = cpu_register_io_memory(0, pl011_readfn,
  919 + pl011_writefn, s);
  920 + cpu_register_physical_memory(base, 0x007fffff, iomemtype);
  921 + s->base = base;
  922 + s->pic = pic;
  923 + s->irq = irq;
  924 + s->chr = chr;
  925 + s->read_trigger = 1;
  926 + s->ifl = 0x12;
  927 + s->cr = 0x300;
  928 + s->flags = 0x90;
  929 + if (chr){
  930 + qemu_chr_add_read_handler(chr, pl011_can_recieve, pl011_recieve, s);
  931 + qemu_chr_add_event_handler(chr, pl011_event);
  932 + }
  933 + /* ??? Save/restore. */
  934 +}
  935 +
  936 +/* CP control registers. */
  937 +typedef struct {
  938 + uint32_t base;
  939 +} icp_control_state;
  940 +
  941 +static uint32_t icp_control_read(void *opaque, target_phys_addr_t offset)
  942 +{
  943 + icp_control_state *s = (icp_control_state *)opaque;
  944 + offset -= s->base;
  945 + switch (offset >> 2) {
  946 + case 0: /* CP_IDFIELD */
  947 + return 0x41034003;
  948 + case 1: /* CP_FLASHPROG */
  949 + return 0;
  950 + case 2: /* CP_INTREG */
  951 + return 0;
  952 + case 3: /* CP_DECODE */
  953 + return 0x11;
  954 + default:
  955 + cpu_abort (cpu_single_env, "icp_control_read: Bad offset %x\n", offset);
  956 + return 0;
  957 + }
  958 +}
  959 +
  960 +static void icp_control_write(void *opaque, target_phys_addr_t offset,
  961 + uint32_t value)
  962 +{
  963 + icp_control_state *s = (icp_control_state *)opaque;
  964 + offset -= s->base;
  965 + switch (offset >> 2) {
  966 + case 1: /* CP_FLASHPROG */
  967 + case 2: /* CP_INTREG */
  968 + case 3: /* CP_DECODE */
  969 + /* Nothing interesting implemented yet. */
  970 + break;
  971 + default:
  972 + cpu_abort (cpu_single_env, "icp_control_write: Bad offset %x\n", offset);
  973 + }
  974 +}
  975 +static CPUReadMemoryFunc *icp_control_readfn[] = {
  976 + icp_control_read,
  977 + icp_control_read,
  978 + icp_control_read
  979 +};
  980 +
  981 +static CPUWriteMemoryFunc *icp_control_writefn[] = {
  982 + icp_control_write,
  983 + icp_control_write,
  984 + icp_control_write
  985 +};
  986 +
  987 +static void icp_control_init(uint32_t base)
  988 +{
  989 + int iomemtype;
  990 + icp_control_state *s;
  991 +
  992 + s = (icp_control_state *)qemu_mallocz(sizeof(icp_control_state));
  993 + iomemtype = cpu_register_io_memory(0, icp_control_readfn,
  994 + icp_control_writefn, s);
  995 + cpu_register_physical_memory(base, 0x007fffff, iomemtype);
  996 + s->base = base;
  997 + /* ??? Save/restore. */
  998 +}
  999 +
  1000 +
  1001 +/* Keyboard/Mouse Interface. */
  1002 +
  1003 +typedef struct {
  1004 + void *dev;
  1005 + uint32_t base;
  1006 + uint32_t cr;
  1007 + uint32_t clk;
  1008 + uint32_t last;
  1009 + icp_pic_state *pic;
  1010 + int pending;
  1011 + int irq;
  1012 + int is_mouse;
  1013 +} icp_kmi_state;
  1014 +
  1015 +static void icp_kmi_update(void *opaque, int level)
  1016 +{
  1017 + icp_kmi_state *s = (icp_kmi_state *)opaque;
  1018 + int raise;
  1019 +
  1020 + s->pending = level;
  1021 + raise = (s->pending && (s->cr & 0x10) != 0)
  1022 + || (s->cr & 0x08) != 0;
  1023 + icp_pic_set_level(s->pic, s->irq, raise);
  1024 +}
  1025 +
  1026 +static uint32_t icp_kmi_read(void *opaque, target_phys_addr_t offset)
  1027 +{
  1028 + icp_kmi_state *s = (icp_kmi_state *)opaque;
  1029 + offset -= s->base;
  1030 + if (offset >= 0xfe0 && offset < 0x1000)
  1031 + return 0;
  1032 +
  1033 + switch (offset >> 2) {
  1034 + case 0: /* KMICR */
  1035 + return s->cr;
  1036 + case 1: /* KMISTAT */
  1037 + /* KMIC and KMID bits not implemented. */
  1038 + if (s->pending) {
  1039 + return 0x10;
  1040 + } else {
  1041 + return 0;
  1042 + }
  1043 + case 2: /* KMIDATA */
  1044 + if (s->pending)
  1045 + s->last = ps2_read_data(s->dev);
  1046 + return s->last;
  1047 + case 3: /* KMICLKDIV */
  1048 + return s->clk;
  1049 + case 4: /* KMIIR */
  1050 + return s->pending | 2;
  1051 + default:
  1052 + cpu_abort (cpu_single_env, "icp_kmi_read: Bad offset %x\n", offset);
  1053 + return 0;
  1054 + }
  1055 +}
  1056 +
  1057 +static void icp_kmi_write(void *opaque, target_phys_addr_t offset,
  1058 + uint32_t value)
  1059 +{
  1060 + icp_kmi_state *s = (icp_kmi_state *)opaque;
  1061 + offset -= s->base;
  1062 + switch (offset >> 2) {
  1063 + case 0: /* KMICR */
  1064 + s->cr = value;
  1065 + icp_kmi_update(s, s->pending);
  1066 + /* ??? Need to implement the enable/disable bit. */
  1067 + break;
  1068 + case 2: /* KMIDATA */
  1069 + /* ??? This should toggle the TX interrupt line. */
  1070 + /* ??? This means kbd/mouse can block each other. */
  1071 + if (s->is_mouse) {
  1072 + ps2_write_mouse(s->dev, value);
  1073 + } else {
  1074 + ps2_write_keyboard(s->dev, value);
  1075 + }
  1076 + break;
  1077 + case 3: /* KMICLKDIV */
  1078 + s->clk = value;
  1079 + return;
  1080 + default:
  1081 + cpu_abort (cpu_single_env, "icp_kmi_write: Bad offset %x\n", offset);
  1082 + }
  1083 +}
  1084 +static CPUReadMemoryFunc *icp_kmi_readfn[] = {
  1085 + icp_kmi_read,
  1086 + icp_kmi_read,
  1087 + icp_kmi_read
  1088 +};
  1089 +
  1090 +static CPUWriteMemoryFunc *icp_kmi_writefn[] = {
  1091 + icp_kmi_write,
  1092 + icp_kmi_write,
  1093 + icp_kmi_write
  1094 +};
  1095 +
  1096 +static void icp_kmi_init(uint32_t base, icp_pic_state * pic, int irq,
  1097 + int is_mouse)
  1098 +{
  1099 + int iomemtype;
  1100 + icp_kmi_state *s;
  1101 +
  1102 + s = (icp_kmi_state *)qemu_mallocz(sizeof(icp_kmi_state));
  1103 + iomemtype = cpu_register_io_memory(0, icp_kmi_readfn,
  1104 + icp_kmi_writefn, s);
  1105 + cpu_register_physical_memory(base, 0x007fffff, iomemtype);
  1106 + s->base = base;
  1107 + s->pic = pic;
  1108 + s->irq = irq;
  1109 + s->is_mouse = is_mouse;
  1110 + if (is_mouse)
  1111 + s->dev = ps2_mouse_init(icp_kmi_update, s);
  1112 + else
  1113 + s->dev = ps2_kbd_init(icp_kmi_update, s);
  1114 + /* ??? Save/restore. */
  1115 +}
  1116 +
  1117 +/* The worlds second smallest bootloader. Set r0-r2, then jump to kernel. */
  1118 +static uint32_t bootloader[] = {
  1119 + 0xe3a00000, /* mov r0, #0 */
  1120 + 0xe3a01013, /* mov r1, #0x13 */
  1121 + 0xe3811c01, /* orr r1, r1, #0x100 */
  1122 + 0xe59f2000, /* ldr r2, [pc, #0] */
  1123 + 0xe59ff000, /* ldr pc, [pc, #0] */
  1124 + 0, /* Address of kernel args. Set by integratorcp_init. */
  1125 + 0 /* Kernel entry point. Set by integratorcp_init. */
  1126 +};
  1127 +
  1128 +static void set_kernel_args(uint32_t ram_size, int initrd_size,
  1129 + const char *kernel_cmdline)
  1130 +{
  1131 + uint32_t *p;
  1132 +
  1133 + p = (uint32_t *)(phys_ram_base + KERNEL_ARGS_ADDR);
  1134 + /* ATAG_CORE */
  1135 + *(p++) = 5;
  1136 + *(p++) = 0x54410001;
  1137 + *(p++) = 1;
  1138 + *(p++) = 0x1000;
  1139 + *(p++) = 0;
  1140 + /* ATAG_MEM */
  1141 + *(p++) = 4;
  1142 + *(p++) = 0x54410002;
  1143 + *(p++) = ram_size;
  1144 + *(p++) = 0;
  1145 + if (initrd_size) {
  1146 + /* ATAG_INITRD2 */
  1147 + *(p++) = 4;
  1148 + *(p++) = 0x54420005;
  1149 + *(p++) = INITRD_LOAD_ADDR;
  1150 + *(p++) = initrd_size;
  1151 + }
  1152 + if (kernel_cmdline && *kernel_cmdline) {
  1153 + /* ATAG_CMDLINE */
  1154 + int cmdline_size;
  1155 +
  1156 + cmdline_size = strlen(kernel_cmdline);
  1157 + memcpy (p + 2, kernel_cmdline, cmdline_size + 1);
  1158 + cmdline_size = (cmdline_size >> 2) + 1;
  1159 + *(p++) = cmdline_size + 2;
  1160 + *(p++) = 0x54410009;
  1161 + p += cmdline_size;
  1162 + }
  1163 + /* ATAG_END */
  1164 + *(p++) = 0;
  1165 + *(p++) = 0;
  1166 +}
  1167 +
  1168 +/* Board init. */
  1169 +
  1170 +static void integratorcp_init(int ram_size, int vga_ram_size, int boot_device,
  1171 + DisplayState *ds, const char **fd_filename, int snapshot,
  1172 + const char *kernel_filename, const char *kernel_cmdline,
  1173 + const char *initrd_filename)
  1174 +{
  1175 + CPUState *env;
  1176 + uint32_t bios_offset;
  1177 + icp_pic_state *pic;
  1178 + int kernel_size;
  1179 + int initrd_size;
  1180 +
  1181 + env = cpu_init();
  1182 + bios_offset = ram_size + vga_ram_size;
  1183 + /* ??? On a real system the first 1Mb is mapped as SSRAM or boot flash. */
  1184 + /* ??? RAM shoud repeat to fill physical memory space. */
  1185 + /* SDRAM at address zero*/
  1186 + cpu_register_physical_memory(0, ram_size, IO_MEM_RAM);
  1187 + /* And again at address 0x80000000 */
  1188 + cpu_register_physical_memory(0x80000000, ram_size, IO_MEM_RAM);
  1189 +
  1190 + integratorcm_init(ram_size >> 20, bios_offset);
  1191 + pic = icp_pic_init(0x14000000, env, -1);
  1192 + icp_pic_init(0xca000000, pic, 26);
  1193 + icp_pit_init(0x13000000, pic);
  1194 + pl011_init(0x16000000, pic, 1, serial_hds[0]);
  1195 + pl011_init(0x17000000, pic, 2, serial_hds[1]);
  1196 + icp_control_init(0xcb000000);
  1197 + icp_kmi_init(0x18000000, pic, 3, 0);
  1198 + icp_kmi_init(0x19000000, pic, 4, 1);
  1199 +
  1200 + /* Load the kernel. */
  1201 + if (!kernel_filename) {
  1202 + fprintf(stderr, "Kernel image must be specified\n");
  1203 + exit(1);
  1204 + }
  1205 + kernel_size = load_image(kernel_filename,
  1206 + phys_ram_base + KERNEL_LOAD_ADDR);
  1207 + if (kernel_size < 0) {
  1208 + fprintf(stderr, "qemu: could not load kernel '%s'\n", kernel_filename);
  1209 + exit(1);
  1210 + }
  1211 + if (initrd_filename) {
  1212 + initrd_size = load_image(initrd_filename,
  1213 + phys_ram_base + INITRD_LOAD_ADDR);
  1214 + if (initrd_size < 0) {
  1215 + fprintf(stderr, "qemu: could not load initrd '%s'\n",
  1216 + initrd_filename);
  1217 + exit(1);
  1218 + }
  1219 + } else {
  1220 + initrd_size = 0;
  1221 + }
  1222 + bootloader[5] = KERNEL_ARGS_ADDR;
  1223 + bootloader[6] = KERNEL_LOAD_ADDR;
  1224 + memcpy(phys_ram_base, bootloader, sizeof(bootloader));
  1225 + set_kernel_args(ram_size, initrd_size, kernel_cmdline);
  1226 +}
  1227 +
  1228 +QEMUMachine integratorcp_machine = {
  1229 + "integratorcp",
  1230 + "ARM Integrator/CP",
  1231 + integratorcp_init,
  1232 +};
... ...
linux-user/main.c
... ... @@ -331,6 +331,7 @@ void cpu_loop(CPUARMState *env)
331 331 int trapnr;
332 332 unsigned int n, insn;
333 333 target_siginfo_t info;
  334 + uint32_t addr;
334 335  
335 336 for(;;) {
336 337 trapnr = cpu_arm_exec(env);
... ... @@ -397,13 +398,18 @@ void cpu_loop(CPUARMState *env)
397 398 /* just indicate that signals should be handled asap */
398 399 break;
399 400 case EXCP_PREFETCH_ABORT:
  401 + addr = env->cp15.c6_data;
  402 + goto do_segv;
400 403 case EXCP_DATA_ABORT:
  404 + addr = env->cp15.c6_insn;
  405 + goto do_segv;
  406 + do_segv:
401 407 {
402 408 info.si_signo = SIGSEGV;
403 409 info.si_errno = 0;
404 410 /* XXX: check env->error_code */
405 411 info.si_code = TARGET_SEGV_MAPERR;
406   - info._sifields._sigfault._addr = env->cp15_6;
  412 + info._sifields._sigfault._addr = addr;
407 413 queue_signal(info.si_signo, &info);
408 414 }
409 415 break;
... ... @@ -1190,10 +1196,10 @@ int main(int argc, char **argv)
1190 1196 #elif defined(TARGET_ARM)
1191 1197 {
1192 1198 int i;
  1199 + cpsr_write(env, regs->uregs[16], 0xffffffff);
1193 1200 for(i = 0; i < 16; i++) {
1194 1201 env->regs[i] = regs->uregs[i];
1195 1202 }
1196   - env->cpsr = regs->uregs[16];
1197 1203 ts->stack_base = info->start_stack;
1198 1204 ts->heap_base = info->brk;
1199 1205 /* This will be filled in on the first SYS_HEAPINFO call. */
... ...
linux-user/signal.c
... ... @@ -1003,7 +1003,7 @@ setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1003 1003 __put_user_error(env->regs[14], &sc->arm_lr, err);
1004 1004 __put_user_error(env->regs[15], &sc->arm_pc, err);
1005 1005 #ifdef TARGET_CONFIG_CPU_32
1006   - __put_user_error(env->cpsr, &sc->arm_cpsr, err);
  1006 + __put_user_error(cpsr_read(env), &sc->arm_cpsr, err);
1007 1007 #endif
1008 1008  
1009 1009 __put_user_error(/* current->thread.trap_no */ 0, &sc->trap_no, err);
... ... @@ -1040,9 +1040,9 @@ setup_return(CPUState *env, struct emulated_sigaction *ka,
1040 1040 target_ulong retcode;
1041 1041 int thumb = 0;
1042 1042 #if defined(TARGET_CONFIG_CPU_32)
  1043 +#if 0
1043 1044 target_ulong cpsr = env->cpsr;
1044 1045  
1045   -#if 0
1046 1046 /*
1047 1047 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
1048 1048 */
... ... @@ -1088,9 +1088,11 @@ setup_return(CPUState *env, struct emulated_sigaction *ka,
1088 1088 env->regs[14] = retcode;
1089 1089 env->regs[15] = handler & (thumb ? ~1 : ~3);
1090 1090  
  1091 +#if 0
1091 1092 #ifdef TARGET_CONFIG_CPU_32
1092 1093 env->cpsr = cpsr;
1093 1094 #endif
  1095 +#endif
1094 1096  
1095 1097 return 0;
1096 1098 }
... ... @@ -1157,6 +1159,7 @@ static int
1157 1159 restore_sigcontext(CPUState *env, struct target_sigcontext *sc)
1158 1160 {
1159 1161 int err = 0;
  1162 + uint32_t cpsr;
1160 1163  
1161 1164 __get_user_error(env->regs[0], &sc->arm_r0, err);
1162 1165 __get_user_error(env->regs[1], &sc->arm_r1, err);
... ... @@ -1175,7 +1178,8 @@ restore_sigcontext(CPUState *env, struct target_sigcontext *sc)
1175 1178 __get_user_error(env->regs[14], &sc->arm_lr, err);
1176 1179 __get_user_error(env->regs[15], &sc->arm_pc, err);
1177 1180 #ifdef TARGET_CONFIG_CPU_32
1178   - __get_user_error(env->cpsr, &sc->arm_cpsr, err);
  1181 + __get_user_error(cpsr, &sc->arm_cpsr, err);
  1182 + cpsr_write(env, cpsr, 0xffffffff);
1179 1183 #endif
1180 1184  
1181 1185 err |= !valid_user_regs(env);
... ...
softmmu_header.h
... ... @@ -59,6 +59,10 @@
59 59 #define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM)
60 60 #elif defined (TARGET_SPARC)
61 61 #define CPU_MEM_INDEX ((env->psrs) == 0)
  62 +#elif defined (TARGET_ARM)
  63 +#define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
  64 +#else
  65 +#error unsupported CPU
62 66 #endif
63 67 #define MMUSUFFIX _mmu
64 68  
... ... @@ -72,6 +76,10 @@
72 76 #define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM)
73 77 #elif defined (TARGET_SPARC)
74 78 #define CPU_MEM_INDEX ((env->psrs) == 0)
  79 +#elif defined (TARGET_ARM)
  80 +#define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
  81 +#else
  82 +#error unsupported CPU
75 83 #endif
76 84 #define MMUSUFFIX _cmmu
77 85  
... ...
target-arm/cpu.h
... ... @@ -32,6 +32,8 @@
32 32 #define EXCP_SWI 2 /* software interrupt */
33 33 #define EXCP_PREFETCH_ABORT 3
34 34 #define EXCP_DATA_ABORT 4
  35 +#define EXCP_IRQ 5
  36 +#define EXCP_FIQ 6
35 37  
36 38 /* We currently assume float and double are IEEE single and double
37 39 precision respectively.
... ... @@ -42,8 +44,22 @@
42 44 */
43 45  
44 46 typedef struct CPUARMState {
  47 + /* Regs for current mode. */
45 48 uint32_t regs[16];
46   - uint32_t cpsr;
  49 + /* Frequently accessed CPSR bits are stored separately for efficiently.
  50 + This contains all the other bits. Use cpsr_{read,write} to accless
  51 + the whole CPSR. */
  52 + uint32_t uncached_cpsr;
  53 + uint32_t spsr;
  54 +
  55 + /* Banked registers. */
  56 + uint32_t banked_spsr[6];
  57 + uint32_t banked_r13[6];
  58 + uint32_t banked_r14[6];
  59 +
  60 + /* These hold r8-r12. */
  61 + uint32_t usr_regs[5];
  62 + uint32_t fiq_regs[5];
47 63  
48 64 /* cpsr flag cache for faster execution */
49 65 uint32_t CF; /* 0 or 1 */
... ... @@ -53,8 +69,21 @@ typedef struct CPUARMState {
53 69  
54 70 int thumb; /* 0 = arm mode, 1 = thumb mode */
55 71  
56   - /* coprocessor 15 (MMU) status */
57   - uint32_t cp15_6;
  72 + /* System control coprocessor (cp15) */
  73 + struct {
  74 + uint32_t c1_sys; /* System control register. */
  75 + uint32_t c1_coproc; /* Coprocessor access register. */
  76 + uint32_t c2; /* MMU translation table base. */
  77 + uint32_t c3; /* MMU domain access control register. */
  78 + uint32_t c5_insn; /* Fault status registers. */
  79 + uint32_t c5_data;
  80 + uint32_t c6_insn; /* Fault address registers. */
  81 + uint32_t c6_data;
  82 + uint32_t c9_insn; /* Cache lockdown registers. */
  83 + uint32_t c9_data;
  84 + uint32_t c13_fcse; /* FCSE PID. */
  85 + uint32_t c13_context; /* Context ID. */
  86 + } cp15;
58 87  
59 88 /* exception/interrupt handling */
60 89 jmp_buf jmp_env;
... ... @@ -87,6 +116,9 @@ typedef struct CPUARMState {
87 116 CPUARMState *cpu_arm_init(void);
88 117 int cpu_arm_exec(CPUARMState *s);
89 118 void cpu_arm_close(CPUARMState *s);
  119 +void do_interrupt(CPUARMState *);
  120 +void switch_mode(CPUARMState *, int);
  121 +
90 122 /* you can call this signal handler from your SIGBUS and SIGSEGV
91 123 signal handlers to inform the virtual CPU of exceptions. non zero
92 124 is returned if the signal was handled by the virtual CPU. */
... ... @@ -94,7 +126,69 @@ struct siginfo;
94 126 int cpu_arm_signal_handler(int host_signum, struct siginfo *info,
95 127 void *puc);
96 128  
  129 +#define CPSR_M (0x1f)
  130 +#define CPSR_T (1 << 5)
  131 +#define CPSR_F (1 << 6)
  132 +#define CPSR_I (1 << 7)
  133 +#define CPSR_A (1 << 8)
  134 +#define CPSR_E (1 << 9)
  135 +#define CPSR_IT_2_7 (0xfc00)
  136 +/* Bits 20-23 reserved. */
  137 +#define CPSR_J (1 << 24)
  138 +#define CPSR_IT_0_1 (3 << 25)
  139 +#define CPSR_Q (1 << 27)
  140 +#define CPSR_NZCV (0xf << 28)
  141 +
  142 +#define CACHED_CPSR_BITS (CPSR_T | CPSR_Q | CPSR_NZCV)
  143 +/* Return the current CPSR value. */
  144 +static inline uint32_t cpsr_read(CPUARMState *env)
  145 +{
  146 + int ZF;
  147 + ZF = (env->NZF == 0);
  148 + return env->uncached_cpsr | (env->NZF & 0x80000000) | (ZF << 30) |
  149 + (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
  150 + | (env->thumb << 5);
  151 +}
  152 +
  153 +/* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */
  154 +static inline void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
  155 +{
  156 + /* NOTE: N = 1 and Z = 1 cannot be stored currently */
  157 + if (mask & CPSR_NZCV) {
  158 + env->NZF = (val & 0xc0000000) ^ 0x40000000;
  159 + env->CF = (val >> 29) & 1;
  160 + env->VF = (val << 3) & 0x80000000;
  161 + }
  162 + if (mask & CPSR_Q)
  163 + env->QF = ((val & CPSR_Q) != 0);
  164 + if (mask & CPSR_T)
  165 + env->thumb = ((val & CPSR_T) != 0);
  166 +
  167 + if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
  168 + switch_mode(env, val & CPSR_M);
  169 + }
  170 + mask &= ~CACHED_CPSR_BITS;
  171 + env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
  172 +}
  173 +
  174 +enum arm_cpu_mode {
  175 + ARM_CPU_MODE_USR = 0x10,
  176 + ARM_CPU_MODE_FIQ = 0x11,
  177 + ARM_CPU_MODE_IRQ = 0x12,
  178 + ARM_CPU_MODE_SVC = 0x13,
  179 + ARM_CPU_MODE_ABT = 0x17,
  180 + ARM_CPU_MODE_UND = 0x1b,
  181 + ARM_CPU_MODE_SYS = 0x1f
  182 +};
  183 +
  184 +#if defined(CONFIG_USER_ONLY)
97 185 #define TARGET_PAGE_BITS 12
  186 +#else
  187 +/* The ARM MMU allows 1k pages. */
  188 +/* ??? Linux doesn't actually use these, and they're deprecated in recent
  189 + architecture revisions. Maybe an a configure option to disable them. */
  190 +#define TARGET_PAGE_BITS 10
  191 +#endif
98 192 #include "cpu-all.h"
99 193  
100 194 #endif
... ...
target-arm/exec.h
... ... @@ -34,16 +34,6 @@ register uint32_t T2 asm(AREG3);
34 34 #include "cpu.h"
35 35 #include "exec-all.h"
36 36  
37   -/* Implemented CPSR bits. */
38   -#define CACHED_CPSR_BITS 0xf8000000
39   -static inline int compute_cpsr(void)
40   -{
41   - int ZF;
42   - ZF = (env->NZF == 0);
43   - return env->cpsr | (env->NZF & 0x80000000) | (ZF << 30) |
44   - (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27);
45   -}
46   -
47 37 static inline void env_to_regs(void)
48 38 {
49 39 }
... ... @@ -55,10 +45,17 @@ static inline void regs_to_env(void)
55 45 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
56 46 int is_user, int is_softmmu);
57 47  
  48 +#if !defined(CONFIG_USER_ONLY)
  49 +#include "softmmu_exec.h"
  50 +#endif
  51 +
58 52 /* In op_helper.c */
59 53  
60 54 void cpu_lock(void);
61 55 void cpu_unlock(void);
  56 +void helper_set_cp15(CPUState *, uint32_t, uint32_t);
  57 +uint32_t helper_get_cp15(CPUState *, uint32_t);
  58 +
62 59 void cpu_loop_exit(void);
63 60  
64 61 void raise_exception(int);
... ...
target-arm/helper.c 0 โ†’ 100644
  1 +#include <stdio.h>
  2 +#include <stdlib.h>
  3 +#include <string.h>
  4 +
  5 +#include "cpu.h"
  6 +#include "exec-all.h"
  7 +
  8 +#if defined(CONFIG_USER_ONLY)
  9 +
  10 +void do_interrupt (CPUState *env)
  11 +{
  12 + env->exception_index = -1;
  13 +}
  14 +
  15 +int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
  16 + int is_user, int is_softmmu)
  17 +{
  18 + if (rw == 2) {
  19 + env->exception_index = EXCP_PREFETCH_ABORT;
  20 + env->cp15.c6_insn = address;
  21 + } else {
  22 + env->exception_index = EXCP_DATA_ABORT;
  23 + env->cp15.c6_data = address;
  24 + }
  25 + return 1;
  26 +}
  27 +
  28 +target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
  29 +{
  30 + return addr;
  31 +}
  32 +
  33 +/* These should probably raise undefined insn exceptions. */
  34 +void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
  35 +{
  36 + cpu_abort(env, "cp15 insn %08x\n", insn);
  37 +}
  38 +
  39 +uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
  40 +{
  41 + cpu_abort(env, "cp15 insn %08x\n", insn);
  42 + return 0;
  43 +}
  44 +
  45 +void switch_mode(CPUState *env, int mode)
  46 +{
  47 + if (mode != ARM_CPU_MODE_USR)
  48 + cpu_abort(env, "Tried to switch out of user mode\n");
  49 +}
  50 +
  51 +#else
  52 +
  53 +/* Map CPU modes onto saved register banks. */
  54 +static inline int bank_number (int mode)
  55 +{
  56 + switch (mode) {
  57 + case ARM_CPU_MODE_USR:
  58 + case ARM_CPU_MODE_SYS:
  59 + return 0;
  60 + case ARM_CPU_MODE_SVC:
  61 + return 1;
  62 + case ARM_CPU_MODE_ABT:
  63 + return 2;
  64 + case ARM_CPU_MODE_UND:
  65 + return 3;
  66 + case ARM_CPU_MODE_IRQ:
  67 + return 4;
  68 + case ARM_CPU_MODE_FIQ:
  69 + return 5;
  70 + }
  71 + cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
  72 + return -1;
  73 +}
  74 +
  75 +void switch_mode(CPUState *env, int mode)
  76 +{
  77 + int old_mode;
  78 + int i;
  79 +
  80 + old_mode = env->uncached_cpsr & CPSR_M;
  81 + if (mode == old_mode)
  82 + return;
  83 +
  84 + if (old_mode == ARM_CPU_MODE_FIQ) {
  85 + memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
  86 + memcpy (env->regs, env->usr_regs + 8, 5 * sizeof(uint32_t));
  87 + } else if (mode == ARM_CPU_MODE_FIQ) {
  88 + memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
  89 + memcpy (env->regs, env->fiq_regs + 8, 5 * sizeof(uint32_t));
  90 + }
  91 +
  92 + i = bank_number(old_mode);
  93 + env->banked_r13[i] = env->regs[13];
  94 + env->banked_r14[i] = env->regs[14];
  95 + env->banked_spsr[i] = env->spsr;
  96 +
  97 + i = bank_number(mode);
  98 + env->regs[13] = env->banked_r13[i];
  99 + env->regs[14] = env->banked_r14[i];
  100 + env->spsr = env->banked_spsr[i];
  101 +}
  102 +
  103 +/* Handle a CPU exception. */
  104 +void do_interrupt(CPUARMState *env)
  105 +{
  106 + uint32_t addr;
  107 + uint32_t mask;
  108 + int new_mode;
  109 + uint32_t offset;
  110 +
  111 + /* TODO: Vectored interrupt controller. */
  112 + switch (env->exception_index) {
  113 + case EXCP_UDEF:
  114 + new_mode = ARM_CPU_MODE_UND;
  115 + addr = 0x04;
  116 + mask = CPSR_I;
  117 + if (env->thumb)
  118 + offset = 2;
  119 + else
  120 + offset = 4;
  121 + break;
  122 + case EXCP_SWI:
  123 + new_mode = ARM_CPU_MODE_SVC;
  124 + addr = 0x08;
  125 + mask = CPSR_I;
  126 + /* The PC already points to the next instructon. */
  127 + offset = 0;
  128 + break;
  129 + case EXCP_PREFETCH_ABORT:
  130 + new_mode = ARM_CPU_MODE_ABT;
  131 + addr = 0x0c;
  132 + mask = CPSR_A | CPSR_I;
  133 + offset = 4;
  134 + break;
  135 + case EXCP_DATA_ABORT:
  136 + new_mode = ARM_CPU_MODE_ABT;
  137 + addr = 0x10;
  138 + mask = CPSR_A | CPSR_I;
  139 + offset = 8;
  140 + break;
  141 + case EXCP_IRQ:
  142 + new_mode = ARM_CPU_MODE_IRQ;
  143 + addr = 0x18;
  144 + /* Disable IRQ and imprecise data aborts. */
  145 + mask = CPSR_A | CPSR_I;
  146 + offset = 4;
  147 + break;
  148 + case EXCP_FIQ:
  149 + new_mode = ARM_CPU_MODE_FIQ;
  150 + addr = 0x1c;
  151 + /* Disable FIQ, IRQ and imprecise data aborts. */
  152 + mask = CPSR_A | CPSR_I | CPSR_F;
  153 + offset = 4;
  154 + break;
  155 + default:
  156 + cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
  157 + return; /* Never happens. Keep compiler happy. */
  158 + }
  159 + /* High vectors. */
  160 + if (env->cp15.c1_sys & (1 << 13)) {
  161 + addr += 0xffff0000;
  162 + }
  163 + switch_mode (env, new_mode);
  164 + env->spsr = cpsr_read(env);
  165 + /* Switch to the new mode, and clear the thumb bit. */
  166 + /* ??? Thumb interrupt handlers not implemented. */
  167 + env->uncached_cpsr = (env->uncached_cpsr & ~(CPSR_M | CPSR_T)) | new_mode;
  168 + env->uncached_cpsr |= mask;
  169 + env->regs[14] = env->regs[15] + offset;
  170 + env->regs[15] = addr;
  171 + env->interrupt_request |= CPU_INTERRUPT_EXITTB;
  172 +}
  173 +
  174 +/* Check section/page access permissions.
  175 + Returns the page protection flags, or zero if the access is not
  176 + permitted. */
  177 +static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
  178 + int is_user)
  179 +{
  180 + if (domain == 3)
  181 + return PAGE_READ | PAGE_WRITE;
  182 +
  183 + switch (ap) {
  184 + case 0:
  185 + if (access_type != 1)
  186 + return 0;
  187 + switch ((env->cp15.c1_sys >> 8) & 3) {
  188 + case 1:
  189 + return is_user ? 0 : PAGE_READ;
  190 + case 2:
  191 + return PAGE_READ;
  192 + default:
  193 + return 0;
  194 + }
  195 + case 1:
  196 + return is_user ? 0 : PAGE_READ | PAGE_WRITE;
  197 + case 2:
  198 + if (is_user)
  199 + return (access_type == 1) ? 0 : PAGE_READ;
  200 + else
  201 + return PAGE_READ | PAGE_WRITE;
  202 + case 3:
  203 + return PAGE_READ | PAGE_WRITE;
  204 + default:
  205 + abort();
  206 + }
  207 +}
  208 +
  209 +static int get_phys_addr(CPUState *env, uint32_t address, int access_type,
  210 + int is_user, uint32_t *phys_ptr, int *prot)
  211 +{
  212 + int code;
  213 + uint32_t table;
  214 + uint32_t desc;
  215 + int type;
  216 + int ap;
  217 + int domain;
  218 + uint32_t phys_addr;
  219 +
  220 + /* Fast Context Switch Extension. */
  221 + if (address < 0x02000000)
  222 + address += env->cp15.c13_fcse;
  223 +
  224 + if ((env->cp15.c1_sys & 1) == 0) {
  225 + /* MMU diusabled. */
  226 + *phys_ptr = address;
  227 + *prot = PAGE_READ | PAGE_WRITE;
  228 + } else {
  229 + /* Pagetable walk. */
  230 + /* Lookup l1 descriptor. */
  231 + table = (env->cp15.c2 & 0xffffc000) | ((address >> 18) & 0x3ffc);
  232 + desc = ldl_phys(table);
  233 + type = (desc & 3);
  234 + domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
  235 + if (type == 0) {
  236 + /* Secton translation fault. */
  237 + code = 5;
  238 + goto do_fault;
  239 + }
  240 + if (domain == 0 || domain == 2) {
  241 + if (type == 2)
  242 + code = 9; /* Section domain fault. */
  243 + else
  244 + code = 11; /* Page domain fault. */
  245 + goto do_fault;
  246 + }
  247 + if (type == 2) {
  248 + /* 1Mb section. */
  249 + phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
  250 + ap = (desc >> 10) & 3;
  251 + code = 13;
  252 + } else {
  253 + /* Lookup l2 entry. */
  254 + table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
  255 + desc = ldl_phys(table);
  256 + switch (desc & 3) {
  257 + case 0: /* Page translation fault. */
  258 + code = 7;
  259 + goto do_fault;
  260 + case 1: /* 64k page. */
  261 + phys_addr = (desc & 0xffff0000) | (address & 0xffff);
  262 + ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
  263 + break;
  264 + case 2: /* 4k page. */
  265 + phys_addr = (desc & 0xfffff000) | (address & 0xfff);
  266 + ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
  267 + break;
  268 + case 3: /* 1k page. */
  269 + if (type == 1) {
  270 + /* Page translation fault. */
  271 + code = 7;
  272 + goto do_fault;
  273 + }
  274 + phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
  275 + ap = (desc >> 4) & 3;
  276 + break;
  277 + default:
  278 + /* Never happens, but compiler isn't smart enough to tell. */
  279 + abort();
  280 + }
  281 + code = 15;
  282 + }
  283 + *prot = check_ap(env, ap, domain, access_type, is_user);
  284 + if (!*prot) {
  285 + /* Access permission fault. */
  286 + goto do_fault;
  287 + }
  288 + *phys_ptr = phys_addr;
  289 + }
  290 + return 0;
  291 +do_fault:
  292 + return code | (domain << 4);
  293 +}
  294 +
  295 +int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
  296 + int access_type, int is_user, int is_softmmu)
  297 +{
  298 + uint32_t phys_addr;
  299 + int prot;
  300 + int ret;
  301 +
  302 + ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
  303 + if (ret == 0) {
  304 + /* Map a single [sub]page. */
  305 + phys_addr &= ~(uint32_t)0x3ff;
  306 + address &= ~(uint32_t)0x3ff;
  307 + return tlb_set_page (env, address, phys_addr, prot, is_user,
  308 + is_softmmu);
  309 + }
  310 +
  311 + if (access_type == 2) {
  312 + env->cp15.c5_insn = ret;
  313 + env->cp15.c6_insn = address;
  314 + env->exception_index = EXCP_PREFETCH_ABORT;
  315 + } else {
  316 + env->cp15.c5_data = ret;
  317 + env->cp15.c6_data = address;
  318 + env->exception_index = EXCP_DATA_ABORT;
  319 + }
  320 + return 1;
  321 +}
  322 +
  323 +target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
  324 +{
  325 + uint32_t phys_addr;
  326 + int prot;
  327 + int ret;
  328 +
  329 + ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
  330 +
  331 + if (ret != 0)
  332 + return -1;
  333 +
  334 + return phys_addr;
  335 +}
  336 +
  337 +void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
  338 +{
  339 + uint32_t op2;
  340 +
  341 + op2 = (insn >> 5) & 7;
  342 + switch ((insn >> 16) & 0xf) {
  343 + case 0: /* ID codes. */
  344 + goto bad_reg;
  345 + case 1: /* System configuration. */
  346 + switch (op2) {
  347 + case 0:
  348 + env->cp15.c1_sys = val;
  349 + /* ??? Lots of these bits are not implemented. */
  350 + /* This may enable/disable the MMU, so do a TLB flush. */
  351 + tlb_flush(env, 1);
  352 + break;
  353 + case 2:
  354 + env->cp15.c1_coproc = val;
  355 + /* ??? Is this safe when called from within a TB? */
  356 + tb_flush(env);
  357 + default:
  358 + goto bad_reg;
  359 + }
  360 + break;
  361 + case 2: /* MMU Page table control. */
  362 + env->cp15.c2 = val;
  363 + break;
  364 + case 3: /* MMU Domain access control. */
  365 + env->cp15.c3 = val;
  366 + break;
  367 + case 4: /* Reserved. */
  368 + goto bad_reg;
  369 + case 5: /* MMU Fault status. */
  370 + switch (op2) {
  371 + case 0:
  372 + env->cp15.c5_data = val;
  373 + break;
  374 + case 1:
  375 + env->cp15.c5_insn = val;
  376 + break;
  377 + default:
  378 + goto bad_reg;
  379 + }
  380 + break;
  381 + case 6: /* MMU Fault address. */
  382 + switch (op2) {
  383 + case 0:
  384 + env->cp15.c6_data = val;
  385 + break;
  386 + case 1:
  387 + env->cp15.c6_insn = val;
  388 + break;
  389 + default:
  390 + goto bad_reg;
  391 + }
  392 + break;
  393 + case 7: /* Cache control. */
  394 + /* No cache, so nothing to do. */
  395 + break;
  396 + case 8: /* MMU TLB control. */
  397 + switch (op2) {
  398 + case 0: /* Invalidate all. */
  399 + tlb_flush(env, 0);
  400 + break;
  401 + case 1: /* Invalidate single TLB entry. */
  402 +#if 0
  403 + /* ??? This is wrong for large pages and sections. */
  404 + /* As an ugly hack to make linux work we always flush a 4K
  405 + pages. */
  406 + val &= 0xfffff000;
  407 + tlb_flush_page(env, val);
  408 + tlb_flush_page(env, val + 0x400);
  409 + tlb_flush_page(env, val + 0x800);
  410 + tlb_flush_page(env, val + 0xc00);
  411 +#else
  412 + tlb_flush(env, 1);
  413 +#endif
  414 + break;
  415 + default:
  416 + goto bad_reg;
  417 + }
  418 + break;
  419 + case 9: /* Cache lockdown. */
  420 + switch (op2) {
  421 + case 0:
  422 + env->cp15.c9_data = val;
  423 + break;
  424 + case 1:
  425 + env->cp15.c9_insn = val;
  426 + break;
  427 + default:
  428 + goto bad_reg;
  429 + }
  430 + break;
  431 + case 10: /* MMU TLB lockdown. */
  432 + /* ??? TLB lockdown not implemented. */
  433 + break;
  434 + case 11: /* TCM DMA control. */
  435 + case 12: /* Reserved. */
  436 + goto bad_reg;
  437 + case 13: /* Process ID. */
  438 + switch (op2) {
  439 + case 0:
  440 + env->cp15.c9_data = val;
  441 + break;
  442 + case 1:
  443 + env->cp15.c9_insn = val;
  444 + break;
  445 + default:
  446 + goto bad_reg;
  447 + }
  448 + break;
  449 + case 14: /* Reserved. */
  450 + goto bad_reg;
  451 + case 15: /* Implementation specific. */
  452 + /* ??? Internal registers not implemented. */
  453 + break;
  454 + }
  455 + return;
  456 +bad_reg:
  457 + /* ??? For debugging only. Should raise illegal instruction exception. */
  458 + cpu_abort(env, "Unimplemented cp15 register read\n");
  459 +}
  460 +
  461 +uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
  462 +{
  463 + uint32_t op2;
  464 +
  465 + op2 = (insn >> 5) & 7;
  466 + switch ((insn >> 16) & 0xf) {
  467 + case 0: /* ID codes. */
  468 + switch (op2) {
  469 + default: /* Device ID. */
  470 + return 0x4106a262;
  471 + case 1: /* Cache Type. */
  472 + return 0x1dd20d2;
  473 + case 2: /* TCM status. */
  474 + return 0;
  475 + }
  476 + case 1: /* System configuration. */
  477 + switch (op2) {
  478 + case 0: /* Control register. */
  479 + return env->cp15.c1_sys;
  480 + case 1: /* Auxiliary control register. */
  481 + return 1;
  482 + case 2: /* Coprocessor access register. */
  483 + return env->cp15.c1_coproc;
  484 + default:
  485 + goto bad_reg;
  486 + }
  487 + case 2: /* MMU Page table control. */
  488 + return env->cp15.c2;
  489 + case 3: /* MMU Domain access control. */
  490 + return env->cp15.c3;
  491 + case 4: /* Reserved. */
  492 + goto bad_reg;
  493 + case 5: /* MMU Fault status. */
  494 + switch (op2) {
  495 + case 0:
  496 + return env->cp15.c5_data;
  497 + case 1:
  498 + return env->cp15.c5_insn;
  499 + default:
  500 + goto bad_reg;
  501 + }
  502 + case 6: /* MMU Fault address. */
  503 + switch (op2) {
  504 + case 0:
  505 + return env->cp15.c6_data;
  506 + case 1:
  507 + return env->cp15.c6_insn;
  508 + default:
  509 + goto bad_reg;
  510 + }
  511 + case 7: /* Cache control. */
  512 + /* ??? This is for test, clean and invaidate operations that set the
  513 + Z flag. We can't represent N = Z = 1, so it also clears clears
  514 + the N flag. Oh well. */
  515 + env->NZF = 0;
  516 + return 0;
  517 + case 8: /* MMU TLB control. */
  518 + goto bad_reg;
  519 + case 9: /* Cache lockdown. */
  520 + switch (op2) {
  521 + case 0:
  522 + return env->cp15.c9_data;
  523 + case 1:
  524 + return env->cp15.c9_insn;
  525 + default:
  526 + goto bad_reg;
  527 + }
  528 + case 10: /* MMU TLB lockdown. */
  529 + /* ??? TLB lockdown not implemented. */
  530 + return 0;
  531 + case 11: /* TCM DMA control. */
  532 + case 12: /* Reserved. */
  533 + goto bad_reg;
  534 + case 13: /* Process ID. */
  535 + switch (op2) {
  536 + case 0:
  537 + return env->cp15.c13_fcse;
  538 + case 1:
  539 + return env->cp15.c13_context;
  540 + default:
  541 + goto bad_reg;
  542 + }
  543 + case 14: /* Reserved. */
  544 + goto bad_reg;
  545 + case 15: /* Implementation specific. */
  546 + /* ??? Internal registers not implemented. */
  547 + return 0;
  548 + }
  549 +bad_reg:
  550 + /* ??? For debugging only. Should raise illegal instruction exception. */
  551 + cpu_abort(env, "Unimplemented cp15 register read\n");
  552 + return 0;
  553 +}
  554 +
  555 +#endif
... ...
target-arm/op.c
... ... @@ -101,6 +101,11 @@ void OPPROTO op_movl_T0_im(void)
101 101 T0 = PARAM1;
102 102 }
103 103  
  104 +void OPPROTO op_movl_T0_T1(void)
  105 +{
  106 + T0 = T1;
  107 +}
  108 +
104 109 void OPPROTO op_movl_T1_im(void)
105 110 {
106 111 T1 = PARAM1;
... ... @@ -361,20 +366,27 @@ void OPPROTO op_exit_tb(void)
361 366 EXIT_TB();
362 367 }
363 368  
364   -void OPPROTO op_movl_T0_psr(void)
  369 +void OPPROTO op_movl_T0_cpsr(void)
365 370 {
366   - T0 = compute_cpsr();
  371 + T0 = cpsr_read(env);
  372 + FORCE_RET();
367 373 }
368 374  
369   -/* NOTE: N = 1 and Z = 1 cannot be stored currently */
370   -void OPPROTO op_movl_psr_T0(void)
  375 +void OPPROTO op_movl_T0_spsr(void)
371 376 {
372   - unsigned int psr;
373   - psr = T0;
374   - env->CF = (psr >> 29) & 1;
375   - env->NZF = (psr & 0xc0000000) ^ 0x40000000;
376   - env->VF = (psr << 3) & 0x80000000;
377   - /* for user mode we do not update other state info */
  377 + T0 = env->spsr;
  378 +}
  379 +
  380 +void OPPROTO op_movl_spsr_T0(void)
  381 +{
  382 + uint32_t mask = PARAM1;
  383 + env->spsr = (env->spsr & ~mask) | (T0 & mask);
  384 +}
  385 +
  386 +void OPPROTO op_movl_cpsr_T0(void)
  387 +{
  388 + cpsr_write(env, T0, PARAM1);
  389 + FORCE_RET();
378 390 }
379 391  
380 392 void OPPROTO op_mul_T0_T1(void)
... ... @@ -433,67 +445,15 @@ void OPPROTO op_logicq_cc(void)
433 445  
434 446 /* memory access */
435 447  
436   -void OPPROTO op_ldub_T0_T1(void)
437   -{
438   - T0 = ldub((void *)T1);
439   -}
440   -
441   -void OPPROTO op_ldsb_T0_T1(void)
442   -{
443   - T0 = ldsb((void *)T1);
444   -}
445   -
446   -void OPPROTO op_lduw_T0_T1(void)
447   -{
448   - T0 = lduw((void *)T1);
449   -}
450   -
451   -void OPPROTO op_ldsw_T0_T1(void)
452   -{
453   - T0 = ldsw((void *)T1);
454   -}
455   -
456   -void OPPROTO op_ldl_T0_T1(void)
457   -{
458   - T0 = ldl((void *)T1);
459   -}
460   -
461   -void OPPROTO op_stb_T0_T1(void)
462   -{
463   - stb((void *)T1, T0);
464   -}
465   -
466   -void OPPROTO op_stw_T0_T1(void)
467   -{
468   - stw((void *)T1, T0);
469   -}
470   -
471   -void OPPROTO op_stl_T0_T1(void)
472   -{
473   - stl((void *)T1, T0);
474   -}
475   -
476   -void OPPROTO op_swpb_T0_T1(void)
477   -{
478   - int tmp;
  448 +#define MEMSUFFIX _raw
  449 +#include "op_mem.h"
479 450  
480   - cpu_lock();
481   - tmp = ldub((void *)T1);
482   - stb((void *)T1, T0);
483   - T0 = tmp;
484   - cpu_unlock();
485   -}
486   -
487   -void OPPROTO op_swpl_T0_T1(void)
488   -{
489   - int tmp;
490   -
491   - cpu_lock();
492   - tmp = ldl((void *)T1);
493   - stl((void *)T1, T0);
494   - T0 = tmp;
495   - cpu_unlock();
496   -}
  451 +#if !defined(CONFIG_USER_ONLY)
  452 +#define MEMSUFFIX _user
  453 +#include "op_mem.h"
  454 +#define MEMSUFFIX _kernel
  455 +#include "op_mem.h"
  456 +#endif
497 457  
498 458 /* shifts */
499 459  
... ... @@ -744,17 +704,48 @@ void OPPROTO op_sarl_T0_im(void)
744 704 T0 = (int32_t)T0 >> PARAM1;
745 705 }
746 706  
747   -/* 16->32 Sign extend */
748   -void OPPROTO op_sxl_T0(void)
  707 +/* Sign/zero extend */
  708 +void OPPROTO op_sxth_T0(void)
749 709 {
750 710 T0 = (int16_t)T0;
751 711 }
752 712  
753   -void OPPROTO op_sxl_T1(void)
  713 +void OPPROTO op_sxth_T1(void)
754 714 {
755 715 T1 = (int16_t)T1;
756 716 }
757 717  
  718 +void OPPROTO op_sxtb_T1(void)
  719 +{
  720 + T1 = (int8_t)T1;
  721 +}
  722 +
  723 +void OPPROTO op_uxtb_T1(void)
  724 +{
  725 + T1 = (uint8_t)T1;
  726 +}
  727 +
  728 +void OPPROTO op_uxth_T1(void)
  729 +{
  730 + T1 = (uint16_t)T1;
  731 +}
  732 +
  733 +void OPPROTO op_sxtb16_T1(void)
  734 +{
  735 + uint32_t res;
  736 + res = (uint16_t)(int8_t)T1;
  737 + res |= (uint32_t)(int8_t)(T1 >> 16) << 16;
  738 + T1 = res;
  739 +}
  740 +
  741 +void OPPROTO op_uxtb16_T1(void)
  742 +{
  743 + uint32_t res;
  744 + res = (uint16_t)(uint8_t)T1;
  745 + res |= (uint32_t)(uint8_t)(T1 >> 16) << 16;
  746 + T1 = res;
  747 +}
  748 +
758 749 #define SIGNBIT (uint32_t)0x80000000
759 750 /* saturating arithmetic */
760 751 void OPPROTO op_addl_T0_T1_setq(void)
... ... @@ -1128,23 +1119,52 @@ void OPPROTO op_vfp_mdrr(void)
1128 1119 FT0d = u.d;
1129 1120 }
1130 1121  
1131   -/* Floating point load/store. Address is in T1 */
1132   -void OPPROTO op_vfp_lds(void)
  1122 +/* Copy the most significant bit to T0 to all bits of T1. */
  1123 +void OPPROTO op_signbit_T1_T0(void)
1133 1124 {
1134   - FT0s = ldfl((void *)T1);
  1125 + T1 = (int32_t)T0 >> 31;
1135 1126 }
1136 1127  
1137   -void OPPROTO op_vfp_ldd(void)
  1128 +void OPPROTO op_movl_cp15_T0(void)
1138 1129 {
1139   - FT0d = ldfq((void *)T1);
  1130 + helper_set_cp15(env, PARAM1, T0);
  1131 + FORCE_RET();
1140 1132 }
1141 1133  
1142   -void OPPROTO op_vfp_sts(void)
  1134 +void OPPROTO op_movl_T0_cp15(void)
1143 1135 {
1144   - stfl((void *)T1, FT0s);
  1136 + T0 = helper_get_cp15(env, PARAM1);
  1137 + FORCE_RET();
1145 1138 }
1146 1139  
1147   -void OPPROTO op_vfp_std(void)
  1140 +/* Access to user mode registers from privileged modes. */
  1141 +void OPPROTO op_movl_T0_user(void)
1148 1142 {
1149   - stfq((void *)T1, FT0d);
  1143 + int regno = PARAM1;
  1144 + if (regno == 13) {
  1145 + T0 = env->banked_r13[0];
  1146 + } else if (regno == 14) {
  1147 + T0 = env->banked_r14[0];
  1148 + } else if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
  1149 + T0 = env->usr_regs[regno - 8];
  1150 + } else {
  1151 + T0 = env->regs[regno];
  1152 + }
  1153 + FORCE_RET();
  1154 +}
  1155 +
  1156 +
  1157 +void OPPROTO op_movl_user_T0(void)
  1158 +{
  1159 + int regno = PARAM1;
  1160 + if (regno == 13) {
  1161 + env->banked_r13[0] = T0;
  1162 + } else if (regno == 14) {
  1163 + env->banked_r14[0] = T0;
  1164 + } else if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
  1165 + env->usr_regs[regno - 8] = T0;
  1166 + } else {
  1167 + env->regs[regno] = T0;
  1168 + }
  1169 + FORCE_RET();
1150 1170 }
... ...
target-arm/op_helper.c
... ... @@ -172,3 +172,54 @@ void do_vfp_get_fpscr(void)
172 172 i = get_float_exception_flags(&env->vfp.fp_status);
173 173 T0 |= vfp_exceptbits_from_host(i);
174 174 }
  175 +
  176 +#if !defined(CONFIG_USER_ONLY)
  177 +
  178 +#define MMUSUFFIX _mmu
  179 +#define GETPC() (__builtin_return_address(0))
  180 +
  181 +#define SHIFT 0
  182 +#include "softmmu_template.h"
  183 +
  184 +#define SHIFT 1
  185 +#include "softmmu_template.h"
  186 +
  187 +#define SHIFT 2
  188 +#include "softmmu_template.h"
  189 +
  190 +#define SHIFT 3
  191 +#include "softmmu_template.h"
  192 +
  193 +/* try to fill the TLB and return an exception if error. If retaddr is
  194 + NULL, it means that the function was called in C code (i.e. not
  195 + from generated code or from helper.c) */
  196 +/* XXX: fix it to restore all registers */
  197 +void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
  198 +{
  199 + TranslationBlock *tb;
  200 + CPUState *saved_env;
  201 + target_phys_addr_t pc;
  202 + int ret;
  203 +
  204 + /* XXX: hack to restore env in all cases, even if not called from
  205 + generated code */
  206 + saved_env = env;
  207 + env = cpu_single_env;
  208 + ret = cpu_arm_handle_mmu_fault(env, addr, is_write, is_user, 1);
  209 + if (__builtin_expect(ret, 0)) {
  210 + if (retaddr) {
  211 + /* now we have a real cpu fault */
  212 + pc = (target_phys_addr_t)retaddr;
  213 + tb = tb_find_pc(pc);
  214 + if (tb) {
  215 + /* the PC is inside the translated code. It means that we have
  216 + a virtual CPU fault */
  217 + cpu_restore_state(tb, env, pc, NULL);
  218 + }
  219 + }
  220 + raise_exception(env->exception_index);
  221 + }
  222 + env = saved_env;
  223 +}
  224 +
  225 +#endif
... ...
target-arm/op_mem.h 0 โ†’ 100644
  1 +/* ARM memory operations. */
  2 +
  3 +/* Load from address T1 into T0. */
  4 +#define MEM_LD_OP(name) \
  5 +void OPPROTO glue(op_ld##name,MEMSUFFIX)(void) \
  6 +{ \
  7 + T0 = glue(ld##name,MEMSUFFIX)(T1); \
  8 + FORCE_RET(); \
  9 +}
  10 +
  11 +MEM_LD_OP(ub)
  12 +MEM_LD_OP(sb)
  13 +MEM_LD_OP(uw)
  14 +MEM_LD_OP(sw)
  15 +MEM_LD_OP(l)
  16 +
  17 +#undef MEM_LD_OP
  18 +
  19 +/* Store T0 to address T1. */
  20 +#define MEM_ST_OP(name) \
  21 +void OPPROTO glue(op_st##name,MEMSUFFIX)(void) \
  22 +{ \
  23 + glue(st##name,MEMSUFFIX)(T1, T0); \
  24 + FORCE_RET(); \
  25 +}
  26 +
  27 +MEM_ST_OP(b)
  28 +MEM_ST_OP(w)
  29 +MEM_ST_OP(l)
  30 +
  31 +#undef MEM_ST_OP
  32 +
  33 +/* Swap T0 with memory at address T1. */
  34 +/* ??? Is this exception safe? */
  35 +#define MEM_SWP_OP(name, lname) \
  36 +void OPPROTO glue(op_swp##name,MEMSUFFIX)(void) \
  37 +{ \
  38 + uint32_t tmp; \
  39 + cpu_lock(); \
  40 + tmp = glue(ld##lname,MEMSUFFIX)(T1); \
  41 + glue(st##name,MEMSUFFIX)(T1, T0); \
  42 + T0 = tmp; \
  43 + cpu_unlock(); \
  44 + FORCE_RET(); \
  45 +}
  46 +
  47 +MEM_SWP_OP(b, ub)
  48 +MEM_SWP_OP(l, l)
  49 +
  50 +#undef MEM_SWP_OP
  51 +
  52 +/* Floating point load/store. Address is in T1 */
  53 +#define VFP_MEM_OP(p, w) \
  54 +void OPPROTO glue(op_vfp_ld##p,MEMSUFFIX)(void) \
  55 +{ \
  56 + FT0##p = glue(ldf##w,MEMSUFFIX)(T1); \
  57 + FORCE_RET(); \
  58 +} \
  59 +void OPPROTO glue(op_vfp_st##p,MEMSUFFIX)(void) \
  60 +{ \
  61 + glue(stf##w,MEMSUFFIX)(T1, FT0##p); \
  62 + FORCE_RET(); \
  63 +}
  64 +
  65 +VFP_MEM_OP(s,l)
  66 +VFP_MEM_OP(d,q)
  67 +
  68 +#undef VFP_MEM_OP
  69 +
  70 +#undef MEMSUFFIX
... ...
target-arm/translate.c
... ... @@ -28,6 +28,12 @@
28 28 #include "exec-all.h"
29 29 #include "disas.h"
30 30  
  31 +#define ENABLE_ARCH_5J 0
  32 +#define ENABLE_ARCH_6 1
  33 +#define ENABLE_ARCH_6T2 1
  34 +
  35 +#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
  36 +
31 37 /* internal defines */
32 38 typedef struct DisasContext {
33 39 target_ulong pc;
... ... @@ -39,8 +45,17 @@ typedef struct DisasContext {
39 45 struct TranslationBlock *tb;
40 46 int singlestep_enabled;
41 47 int thumb;
  48 +#if !defined(CONFIG_USER_ONLY)
  49 + int user;
  50 +#endif
42 51 } DisasContext;
43 52  
  53 +#if defined(CONFIG_USER_ONLY)
  54 +#define IS_USER(s) 1
  55 +#else
  56 +#define IS_USER(s) (s->user)
  57 +#endif
  58 +
44 59 #define DISAS_JUMP_NEXT 4
45 60  
46 61 #ifdef USE_DIRECT_JUMP
... ... @@ -270,6 +285,18 @@ static inline void gen_bx(DisasContext *s)
270 285 gen_op_bx_T0();
271 286 }
272 287  
  288 +
  289 +#if defined(CONFIG_USER_ONLY)
  290 +#define gen_ldst(name, s) gen_op_##name##_raw()
  291 +#else
  292 +#define gen_ldst(name, s) do { \
  293 + if (IS_USER(s)) \
  294 + gen_op_##name##_user(); \
  295 + else \
  296 + gen_op_##name##_kernel(); \
  297 + } while (0)
  298 +#endif
  299 +
273 300 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
274 301 {
275 302 int val;
... ... @@ -319,6 +346,14 @@ static inline void gen_movl_reg_T1(DisasContext *s, int reg)
319 346 gen_movl_reg_TN(s, reg, 1);
320 347 }
321 348  
  349 +/* Force a TB lookup after an instruction that changes the CPU state. */
  350 +static inline void gen_lookup_tb(DisasContext *s)
  351 +{
  352 + gen_op_movl_T0_im(s->pc);
  353 + gen_movl_reg_T0(s, 15);
  354 + s->is_jmp = DISAS_UPDATE;
  355 +}
  356 +
322 357 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
323 358 {
324 359 int val, rm, shift, shiftop;
... ... @@ -395,11 +430,25 @@ VFP_OP(toui)
395 430 VFP_OP(touiz)
396 431 VFP_OP(tosi)
397 432 VFP_OP(tosiz)
398   -VFP_OP(ld)
399   -VFP_OP(st)
400 433  
401 434 #undef VFP_OP
402 435  
  436 +static inline void gen_vfp_ld(DisasContext *s, int dp)
  437 +{
  438 + if (dp)
  439 + gen_ldst(vfp_ldd, s);
  440 + else
  441 + gen_ldst(vfp_lds, s);
  442 +}
  443 +
  444 +static inline void gen_vfp_st(DisasContext *s, int dp)
  445 +{
  446 + if (dp)
  447 + gen_ldst(vfp_std, s);
  448 + else
  449 + gen_ldst(vfp_sts, s);
  450 +}
  451 +
403 452 static inline long
404 453 vfp_reg_offset (int dp, int reg)
405 454 {
... ... @@ -437,6 +486,30 @@ static inline void gen_mov_vreg_F0(int dp, int reg)
437 486 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
438 487 }
439 488  
  489 +/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
  490 + instruction is not defined. */
  491 +static int disas_cp15_insn(DisasContext *s, uint32_t insn)
  492 +{
  493 + uint32_t rd;
  494 +
  495 + /* ??? Some cp15 registers are accessible from userspace. */
  496 + if (IS_USER(s)) {
  497 + return 1;
  498 + }
  499 + rd = (insn >> 12) & 0xf;
  500 + if (insn & (1 << 20)) {
  501 + gen_op_movl_T0_cp15(insn);
  502 + /* If the destination register is r15 then sets condition codes. */
  503 + if (rd != 15)
  504 + gen_movl_reg_T0(s, rd);
  505 + } else {
  506 + gen_movl_T0_reg(s, rd);
  507 + gen_op_movl_cp15_T0(insn);
  508 + }
  509 + gen_lookup_tb(s);
  510 + return 0;
  511 +}
  512 +
440 513 /* Disassemble a VFP instruction. Returns nonzero if an error occured
441 514 (ie. an undefined instruction). */
442 515 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
... ... @@ -499,8 +572,8 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
499 572 gen_op_vfp_mrs();
500 573 }
501 574 if (rd == 15) {
502   - /* This will only set the 4 flag bits */
503   - gen_op_movl_psr_T0();
  575 + /* Set the 4 flag bits in the CPSR. */
  576 + gen_op_movl_cpsr_T0(0xf0000000);
504 577 } else
505 578 gen_movl_reg_T0(s, rd);
506 579 } else {
... ... @@ -516,9 +589,7 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
516 589 gen_op_vfp_movl_fpscr_T0();
517 590 /* This could change vector settings, so jump to
518 591 the next instuction. */
519   - gen_op_movl_T0_im(s->pc);
520   - gen_movl_reg_T0(s, 15);
521   - s->is_jmp = DISAS_UPDATE;
  592 + gen_lookup_tb(s);
522 593 break;
523 594 default:
524 595 return 1;
... ... @@ -848,11 +919,11 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
848 919 offset = -offset;
849 920 gen_op_addl_T1_im(offset);
850 921 if (insn & (1 << 20)) {
851   - gen_vfp_ld(dp);
  922 + gen_vfp_ld(s, dp);
852 923 gen_mov_vreg_F0(dp, rd);
853 924 } else {
854 925 gen_mov_F0_vreg(dp, rd);
855   - gen_vfp_st(dp);
  926 + gen_vfp_st(s, dp);
856 927 }
857 928 } else {
858 929 /* load/store multiple */
... ... @@ -871,12 +942,12 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
871 942 for (i = 0; i < n; i++) {
872 943 if (insn & (1 << 20)) {
873 944 /* load */
874   - gen_vfp_ld(dp);
  945 + gen_vfp_ld(s, dp);
875 946 gen_mov_vreg_F0(dp, rd + i);
876 947 } else {
877 948 /* store */
878 949 gen_mov_F0_vreg(dp, rd + i);
879   - gen_vfp_st(dp);
  950 + gen_vfp_st(s, dp);
880 951 }
881 952 gen_op_addl_T1_im(offset);
882 953 }
... ... @@ -939,11 +1010,68 @@ static inline void gen_jmp (DisasContext *s, uint32_t dest)
939 1010 }
940 1011 }
941 1012  
  1013 +static inline void gen_mulxy(int x, int y)
  1014 +{
  1015 + if (x & 2)
  1016 + gen_op_sarl_T0_im(16);
  1017 + else
  1018 + gen_op_sxth_T0();
  1019 + if (y & 1)
  1020 + gen_op_sarl_T1_im(16);
  1021 + else
  1022 + gen_op_sxth_T1();
  1023 + gen_op_mul_T0_T1();
  1024 +}
  1025 +
  1026 +/* Return the mask of PSR bits set by a MSR instruction. */
  1027 +static uint32_t msr_mask(DisasContext *s, int flags) {
  1028 + uint32_t mask;
  1029 +
  1030 + mask = 0;
  1031 + if (flags & (1 << 0))
  1032 + mask |= 0xff;
  1033 + if (flags & (1 << 1))
  1034 + mask |= 0xff00;
  1035 + if (flags & (1 << 2))
  1036 + mask |= 0xff0000;
  1037 + if (flags & (1 << 3))
  1038 + mask |= 0xff000000;
  1039 + /* Mask out undefined bits and state bits. */
  1040 + mask &= 0xf89f03df;
  1041 + /* Mask out privileged bits. */
  1042 + if (IS_USER(s))
  1043 + mask &= 0xf80f0200;
  1044 + return mask;
  1045 +}
  1046 +
  1047 +/* Returns nonzero if access to the PSR is not permitted. */
  1048 +static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
  1049 +{
  1050 + if (spsr) {
  1051 + /* ??? This is also undefined in system mode. */
  1052 + if (IS_USER(s))
  1053 + return 1;
  1054 + gen_op_movl_spsr_T0(mask);
  1055 + } else {
  1056 + gen_op_movl_cpsr_T0(mask);
  1057 + }
  1058 + gen_lookup_tb(s);
  1059 + return 0;
  1060 +}
  1061 +
  1062 +static void gen_exception_return(DisasContext *s)
  1063 +{
  1064 + gen_op_movl_reg_TN[0][15]();
  1065 + gen_op_movl_T0_spsr();
  1066 + gen_op_movl_cpsr_T0(0xffffffff);
  1067 + s->is_jmp = DISAS_UPDATE;
  1068 +}
  1069 +
942 1070 static void disas_arm_insn(CPUState * env, DisasContext *s)
943 1071 {
944 1072 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
945 1073  
946   - insn = ldl(s->pc);
  1074 + insn = ldl_code(s->pc);
947 1075 s->pc += 4;
948 1076  
949 1077 cond = insn >> 28;
... ... @@ -971,6 +1099,15 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
971 1099 /* Coprocessor double register transfer. */
972 1100 } else if ((insn & 0x0f000010) == 0x0e000010) {
973 1101 /* Additional coprocessor register transfer. */
  1102 + } else if ((insn & 0x0ff10010) == 0x01000000) {
  1103 + /* cps (privileged) */
  1104 + } else if ((insn & 0x0ffffdff) == 0x01010000) {
  1105 + /* setend */
  1106 + if (insn & (1 << 9)) {
  1107 + /* BE8 mode not implemented. */
  1108 + goto illegal_op;
  1109 + }
  1110 + return;
974 1111 }
975 1112 goto illegal_op;
976 1113 }
... ... @@ -984,7 +1121,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
984 1121 //s->is_jmp = DISAS_JUMP_NEXT;
985 1122 }
986 1123 if ((insn & 0x0f900000) == 0x03000000) {
987   - if ((insn & 0x0ff0f000) != 0x0360f000)
  1124 + if ((insn & 0x0fb0f000) != 0x0320f000)
988 1125 goto illegal_op;
989 1126 /* CPSR = immediate */
990 1127 val = insn & 0xff;
... ... @@ -992,8 +1129,9 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
992 1129 if (shift)
993 1130 val = (val >> shift) | (val << (32 - shift));
994 1131 gen_op_movl_T0_im(val);
995   - if (insn & (1 << 19))
996   - gen_op_movl_psr_T0();
  1132 + if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf),
  1133 + (insn & (1 << 22)) != 0))
  1134 + goto illegal_op;
997 1135 } else if ((insn & 0x0f900000) == 0x01000000
998 1136 && (insn & 0x00000090) != 0x00000090) {
999 1137 /* miscellaneous instructions */
... ... @@ -1002,19 +1140,22 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1002 1140 rm = insn & 0xf;
1003 1141 switch (sh) {
1004 1142 case 0x0: /* move program status register */
1005   - if (op1 & 2) {
1006   - /* SPSR not accessible in user mode */
1007   - goto illegal_op;
1008   - }
1009 1143 if (op1 & 1) {
1010   - /* CPSR = reg */
  1144 + /* PSR = reg */
1011 1145 gen_movl_T0_reg(s, rm);
1012   - if (insn & (1 << 19))
1013   - gen_op_movl_psr_T0();
  1146 + if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf),
  1147 + (op1 & 2) != 0))
  1148 + goto illegal_op;
1014 1149 } else {
1015 1150 /* reg = CPSR */
1016 1151 rd = (insn >> 12) & 0xf;
1017   - gen_op_movl_T0_psr();
  1152 + if (op1 & 2) {
  1153 + if (IS_USER(s))
  1154 + goto illegal_op;
  1155 + gen_op_movl_T0_spsr();
  1156 + } else {
  1157 + gen_op_movl_T0_cpsr();
  1158 + }
1018 1159 gen_movl_reg_T0(s, rd);
1019 1160 }
1020 1161 break;
... ... @@ -1033,6 +1174,16 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1033 1174 goto illegal_op;
1034 1175 }
1035 1176 break;
  1177 + case 0x2:
  1178 + if (op1 == 1) {
  1179 + ARCH(5J); /* bxj */
  1180 + /* Trivial implementation equivalent to bx. */
  1181 + gen_movl_T0_reg(s, rm);
  1182 + gen_bx(s);
  1183 + } else {
  1184 + goto illegal_op;
  1185 + }
  1186 + break;
1036 1187 case 0x3:
1037 1188 if (op1 != 1)
1038 1189 goto illegal_op;
... ... @@ -1071,7 +1222,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1071 1222 if (sh & 4)
1072 1223 gen_op_sarl_T1_im(16);
1073 1224 else
1074   - gen_op_sxl_T1();
  1225 + gen_op_sxth_T1();
1075 1226 gen_op_imulw_T0_T1();
1076 1227 if ((sh & 2) == 0) {
1077 1228 gen_movl_T1_reg(s, rn);
... ... @@ -1081,22 +1232,14 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1081 1232 } else {
1082 1233 /* 16 * 16 */
1083 1234 gen_movl_T0_reg(s, rm);
1084   - if (sh & 2)
1085   - gen_op_sarl_T0_im(16);
1086   - else
1087   - gen_op_sxl_T0();
1088 1235 gen_movl_T1_reg(s, rs);
1089   - if (sh & 4)
1090   - gen_op_sarl_T1_im(16);
1091   - else
1092   - gen_op_sxl_T1();
  1236 + gen_mulxy(sh & 2, sh & 4);
1093 1237 if (op1 == 2) {
1094   - gen_op_imull_T0_T1();
  1238 + gen_op_signbit_T1_T0();
1095 1239 gen_op_addq_T0_T1(rn, rd);
1096 1240 gen_movl_reg_T0(s, rn);
1097 1241 gen_movl_reg_T1(s, rd);
1098 1242 } else {
1099   - gen_op_mul_T0_T1();
1100 1243 if (op1 == 0) {
1101 1244 gen_movl_T1_reg(s, rn);
1102 1245 gen_op_addl_T0_T1_setq();
... ... @@ -1176,11 +1319,19 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1176 1319 gen_op_logic_T0_cc();
1177 1320 break;
1178 1321 case 0x02:
1179   - if (set_cc)
  1322 + if (set_cc && rd == 15) {
  1323 + /* SUBS r15, ... is used for exception return. */
  1324 + if (IS_USER(s))
  1325 + goto illegal_op;
1180 1326 gen_op_subl_T0_T1_cc();
1181   - else
1182   - gen_op_subl_T0_T1();
1183   - gen_movl_reg_T0(s, rd);
  1327 + gen_exception_return(s);
  1328 + } else {
  1329 + if (set_cc)
  1330 + gen_op_subl_T0_T1_cc();
  1331 + else
  1332 + gen_op_subl_T0_T1();
  1333 + gen_movl_reg_T0(s, rd);
  1334 + }
1184 1335 break;
1185 1336 case 0x03:
1186 1337 if (set_cc)
... ... @@ -1246,9 +1397,17 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1246 1397 gen_op_logic_T0_cc();
1247 1398 break;
1248 1399 case 0x0d:
1249   - gen_movl_reg_T1(s, rd);
1250   - if (logic_cc)
1251   - gen_op_logic_T1_cc();
  1400 + if (logic_cc && rd == 15) {
  1401 + /* MOVS r15, ... is used for exception return. */
  1402 + if (IS_USER(s))
  1403 + goto illegal_op;
  1404 + gen_op_movl_T0_T1();
  1405 + gen_exception_return(s);
  1406 + } else {
  1407 + gen_movl_reg_T1(s, rd);
  1408 + if (logic_cc)
  1409 + gen_op_logic_T1_cc();
  1410 + }
1252 1411 break;
1253 1412 case 0x0e:
1254 1413 gen_op_bicl_T0_T1();
... ... @@ -1301,6 +1460,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1301 1460 if (insn & (1 << 21)) /* mult accumulate */
1302 1461 gen_op_addq_T0_T1(rn, rd);
1303 1462 if (!(insn & (1 << 23))) { /* double accumulate */
  1463 + ARCH(6);
1304 1464 gen_op_addq_lo_T0_T1(rn);
1305 1465 gen_op_addq_lo_T0_T1(rd);
1306 1466 }
... ... @@ -1322,9 +1482,9 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1322 1482 gen_movl_T0_reg(s, rm);
1323 1483 gen_movl_T1_reg(s, rn);
1324 1484 if (insn & (1 << 22)) {
1325   - gen_op_swpb_T0_T1();
  1485 + gen_ldst(swpb, s);
1326 1486 } else {
1327   - gen_op_swpl_T0_T1();
  1487 + gen_ldst(swpl, s);
1328 1488 }
1329 1489 gen_movl_reg_T0(s, rd);
1330 1490 }
... ... @@ -1340,14 +1500,14 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1340 1500 /* load */
1341 1501 switch(sh) {
1342 1502 case 1:
1343   - gen_op_lduw_T0_T1();
  1503 + gen_ldst(lduw, s);
1344 1504 break;
1345 1505 case 2:
1346   - gen_op_ldsb_T0_T1();
  1506 + gen_ldst(ldsb, s);
1347 1507 break;
1348 1508 default:
1349 1509 case 3:
1350   - gen_op_ldsw_T0_T1();
  1510 + gen_ldst(ldsw, s);
1351 1511 break;
1352 1512 }
1353 1513 gen_movl_reg_T0(s, rd);
... ... @@ -1356,18 +1516,18 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1356 1516 if (sh & 1) {
1357 1517 /* store */
1358 1518 gen_movl_T0_reg(s, rd);
1359   - gen_op_stl_T0_T1();
  1519 + gen_ldst(stl, s);
1360 1520 gen_op_addl_T1_im(4);
1361 1521 gen_movl_T0_reg(s, rd + 1);
1362   - gen_op_stl_T0_T1();
  1522 + gen_ldst(stl, s);
1363 1523 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1364 1524 gen_op_addl_T1_im(-4);
1365 1525 } else {
1366 1526 /* load */
1367   - gen_op_ldl_T0_T1();
  1527 + gen_ldst(ldl, s);
1368 1528 gen_movl_reg_T0(s, rd);
1369 1529 gen_op_addl_T1_im(4);
1370   - gen_op_ldl_T0_T1();
  1530 + gen_ldst(ldl, s);
1371 1531 gen_movl_reg_T0(s, rd + 1);
1372 1532 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1373 1533 gen_op_addl_T1_im(-4);
... ... @@ -1375,7 +1535,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1375 1535 } else {
1376 1536 /* store */
1377 1537 gen_movl_T0_reg(s, rd);
1378   - gen_op_stw_T0_T1();
  1538 + gen_ldst(stw, s);
1379 1539 }
1380 1540 if (!(insn & (1 << 24))) {
1381 1541 gen_add_datah_offset(s, insn);
... ... @@ -1393,14 +1553,29 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1393 1553 rn = (insn >> 16) & 0xf;
1394 1554 rd = (insn >> 12) & 0xf;
1395 1555 gen_movl_T1_reg(s, rn);
  1556 + i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
1396 1557 if (insn & (1 << 24))
1397 1558 gen_add_data_offset(s, insn);
1398 1559 if (insn & (1 << 20)) {
1399 1560 /* load */
  1561 +#if defined(CONFIG_USER_ONLY)
1400 1562 if (insn & (1 << 22))
1401   - gen_op_ldub_T0_T1();
  1563 + gen_op_ldub_raw();
1402 1564 else
1403   - gen_op_ldl_T0_T1();
  1565 + gen_op_ldl_raw();
  1566 +#else
  1567 + if (insn & (1 << 22)) {
  1568 + if (i)
  1569 + gen_op_ldub_user();
  1570 + else
  1571 + gen_op_ldub_kernel();
  1572 + } else {
  1573 + if (i)
  1574 + gen_op_ldl_user();
  1575 + else
  1576 + gen_op_ldl_kernel();
  1577 + }
  1578 +#endif
1404 1579 if (rd == 15)
1405 1580 gen_bx(s);
1406 1581 else
... ... @@ -1408,10 +1583,24 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1408 1583 } else {
1409 1584 /* store */
1410 1585 gen_movl_T0_reg(s, rd);
  1586 +#if defined(CONFIG_USER_ONLY)
1411 1587 if (insn & (1 << 22))
1412   - gen_op_stb_T0_T1();
  1588 + gen_op_stb_raw();
1413 1589 else
1414   - gen_op_stl_T0_T1();
  1590 + gen_op_stl_raw();
  1591 +#else
  1592 + if (insn & (1 << 22)) {
  1593 + if (i)
  1594 + gen_op_stb_user();
  1595 + else
  1596 + gen_op_stb_kernel();
  1597 + } else {
  1598 + if (i)
  1599 + gen_op_stl_user();
  1600 + else
  1601 + gen_op_stl_kernel();
  1602 + }
  1603 +#endif
1415 1604 }
1416 1605 if (!(insn & (1 << 24))) {
1417 1606 gen_add_data_offset(s, insn);
... ... @@ -1423,11 +1612,17 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1423 1612 case 0x08:
1424 1613 case 0x09:
1425 1614 {
1426   - int j, n;
  1615 + int j, n, user;
1427 1616 /* load/store multiple words */
1428 1617 /* XXX: store correct base if write back */
1429   - if (insn & (1 << 22))
1430   - goto illegal_op; /* only usable in supervisor mode */
  1618 + user = 0;
  1619 + if (insn & (1 << 22)) {
  1620 + if (IS_USER(s))
  1621 + goto illegal_op; /* only usable in supervisor mode */
  1622 +
  1623 + if ((insn & (1 << 15)) == 0)
  1624 + user = 1;
  1625 + }
1431 1626 rn = (insn >> 16) & 0xf;
1432 1627 gen_movl_T1_reg(s, rn);
1433 1628  
... ... @@ -1460,21 +1655,26 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1460 1655 if (insn & (1 << i)) {
1461 1656 if (insn & (1 << 20)) {
1462 1657 /* load */
1463   - gen_op_ldl_T0_T1();
1464   - if (i == 15)
  1658 + gen_ldst(ldl, s);
  1659 + if (i == 15) {
1465 1660 gen_bx(s);
1466   - else
  1661 + } else if (user) {
  1662 + gen_op_movl_user_T0(i);
  1663 + } else {
1467 1664 gen_movl_reg_T0(s, i);
  1665 + }
1468 1666 } else {
1469 1667 /* store */
1470 1668 if (i == 15) {
1471 1669 /* special case: r15 = PC + 12 */
1472 1670 val = (long)s->pc + 8;
1473 1671 gen_op_movl_TN_im[0](val);
  1672 + } else if (user) {
  1673 + gen_op_movl_T0_user(i);
1474 1674 } else {
1475 1675 gen_movl_T0_reg(s, i);
1476 1676 }
1477   - gen_op_stl_T0_T1();
  1677 + gen_ldst(stl, s);
1478 1678 }
1479 1679 j++;
1480 1680 /* no need to add after the last transfer */
... ... @@ -1503,6 +1703,12 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1503 1703 }
1504 1704 gen_movl_reg_T1(s, rn);
1505 1705 }
  1706 + if ((insn & (1 << 22)) && !user) {
  1707 + /* Restore CPSR from SPSR. */
  1708 + gen_op_movl_T0_spsr();
  1709 + gen_op_movl_cpsr_T0(0xffffffff);
  1710 + s->is_jmp = DISAS_UPDATE;
  1711 + }
1506 1712 }
1507 1713 break;
1508 1714 case 0xa:
... ... @@ -1532,6 +1738,10 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
1532 1738 if (disas_vfp_insn (env, s, insn))
1533 1739 goto illegal_op;
1534 1740 break;
  1741 + case 15:
  1742 + if (disas_cp15_insn (s, insn))
  1743 + goto illegal_op;
  1744 + break;
1535 1745 default:
1536 1746 /* unknown coprocessor. */
1537 1747 goto illegal_op;
... ... @@ -1561,9 +1771,9 @@ static void disas_thumb_insn(DisasContext *s)
1561 1771 int32_t offset;
1562 1772 int i;
1563 1773  
1564   - insn = lduw(s->pc);
  1774 + insn = lduw_code(s->pc);
1565 1775 s->pc += 2;
1566   -
  1776 +
1567 1777 switch (insn >> 12) {
1568 1778 case 0: case 1:
1569 1779 rd = insn & 7;
... ... @@ -1628,7 +1838,7 @@ static void disas_thumb_insn(DisasContext *s)
1628 1838 val = s->pc + 2 + ((insn & 0xff) * 4);
1629 1839 val &= ~(uint32_t)2;
1630 1840 gen_op_movl_T1_im(val);
1631   - gen_op_ldl_T0_T1();
  1841 + gen_ldst(ldl, s);
1632 1842 gen_movl_reg_T0(s, rd);
1633 1843 break;
1634 1844 }
... ... @@ -1771,28 +1981,28 @@ static void disas_thumb_insn(DisasContext *s)
1771 1981  
1772 1982 switch (op) {
1773 1983 case 0: /* str */
1774   - gen_op_stl_T0_T1();
  1984 + gen_ldst(stl, s);
1775 1985 break;
1776 1986 case 1: /* strh */
1777   - gen_op_stw_T0_T1();
  1987 + gen_ldst(stw, s);
1778 1988 break;
1779 1989 case 2: /* strb */
1780   - gen_op_stb_T0_T1();
  1990 + gen_ldst(stb, s);
1781 1991 break;
1782 1992 case 3: /* ldrsb */
1783   - gen_op_ldsb_T0_T1();
  1993 + gen_ldst(ldsb, s);
1784 1994 break;
1785 1995 case 4: /* ldr */
1786   - gen_op_ldl_T0_T1();
  1996 + gen_ldst(ldl, s);
1787 1997 break;
1788 1998 case 5: /* ldrh */
1789   - gen_op_lduw_T0_T1();
  1999 + gen_ldst(lduw, s);
1790 2000 break;
1791 2001 case 6: /* ldrb */
1792   - gen_op_ldub_T0_T1();
  2002 + gen_ldst(ldub, s);
1793 2003 break;
1794 2004 case 7: /* ldrsh */
1795   - gen_op_ldsw_T0_T1();
  2005 + gen_ldst(ldsw, s);
1796 2006 break;
1797 2007 }
1798 2008 if (op >= 3) /* load */
... ... @@ -1810,12 +2020,12 @@ static void disas_thumb_insn(DisasContext *s)
1810 2020  
1811 2021 if (insn & (1 << 11)) {
1812 2022 /* load */
1813   - gen_op_ldl_T0_T1();
  2023 + gen_ldst(ldl, s);
1814 2024 gen_movl_reg_T0(s, rd);
1815 2025 } else {
1816 2026 /* store */
1817 2027 gen_movl_T0_reg(s, rd);
1818   - gen_op_stl_T0_T1();
  2028 + gen_ldst(stl, s);
1819 2029 }
1820 2030 break;
1821 2031  
... ... @@ -1830,12 +2040,12 @@ static void disas_thumb_insn(DisasContext *s)
1830 2040  
1831 2041 if (insn & (1 << 11)) {
1832 2042 /* load */
1833   - gen_op_ldub_T0_T1();
  2043 + gen_ldst(ldub, s);
1834 2044 gen_movl_reg_T0(s, rd);
1835 2045 } else {
1836 2046 /* store */
1837 2047 gen_movl_T0_reg(s, rd);
1838   - gen_op_stb_T0_T1();
  2048 + gen_ldst(stb, s);
1839 2049 }
1840 2050 break;
1841 2051  
... ... @@ -1850,12 +2060,12 @@ static void disas_thumb_insn(DisasContext *s)
1850 2060  
1851 2061 if (insn & (1 << 11)) {
1852 2062 /* load */
1853   - gen_op_lduw_T0_T1();
  2063 + gen_ldst(lduw, s);
1854 2064 gen_movl_reg_T0(s, rd);
1855 2065 } else {
1856 2066 /* store */
1857 2067 gen_movl_T0_reg(s, rd);
1858   - gen_op_stw_T0_T1();
  2068 + gen_ldst(stw, s);
1859 2069 }
1860 2070 break;
1861 2071  
... ... @@ -1869,12 +2079,12 @@ static void disas_thumb_insn(DisasContext *s)
1869 2079  
1870 2080 if (insn & (1 << 11)) {
1871 2081 /* load */
1872   - gen_op_ldl_T0_T1();
  2082 + gen_ldst(ldl, s);
1873 2083 gen_movl_reg_T0(s, rd);
1874 2084 } else {
1875 2085 /* store */
1876 2086 gen_movl_T0_reg(s, rd);
1877   - gen_op_stl_T0_T1();
  2087 + gen_ldst(stl, s);
1878 2088 }
1879 2089 break;
1880 2090  
... ... @@ -1929,12 +2139,12 @@ static void disas_thumb_insn(DisasContext *s)
1929 2139 if (insn & (1 << i)) {
1930 2140 if (insn & (1 << 11)) {
1931 2141 /* pop */
1932   - gen_op_ldl_T0_T1();
  2142 + gen_ldst(ldl, s);
1933 2143 gen_movl_reg_T0(s, i);
1934 2144 } else {
1935 2145 /* push */
1936 2146 gen_movl_T0_reg(s, i);
1937   - gen_op_stl_T0_T1();
  2147 + gen_ldst(stl, s);
1938 2148 }
1939 2149 /* advance to the next address. */
1940 2150 gen_op_addl_T1_T2();
... ... @@ -1943,13 +2153,13 @@ static void disas_thumb_insn(DisasContext *s)
1943 2153 if (insn & (1 << 8)) {
1944 2154 if (insn & (1 << 11)) {
1945 2155 /* pop pc */
1946   - gen_op_ldl_T0_T1();
  2156 + gen_ldst(ldl, s);
1947 2157 /* don't set the pc until the rest of the instruction
1948 2158 has completed */
1949 2159 } else {
1950 2160 /* push lr */
1951 2161 gen_movl_T0_reg(s, 14);
1952   - gen_op_stl_T0_T1();
  2162 + gen_ldst(stl, s);
1953 2163 }
1954 2164 gen_op_addl_T1_T2();
1955 2165 }
... ... @@ -1978,19 +2188,20 @@ static void disas_thumb_insn(DisasContext *s)
1978 2188 if (insn & (1 << i)) {
1979 2189 if (insn & (1 << 11)) {
1980 2190 /* load */
1981   - gen_op_ldl_T0_T1();
  2191 + gen_ldst(ldl, s);
1982 2192 gen_movl_reg_T0(s, i);
1983 2193 } else {
1984 2194 /* store */
1985 2195 gen_movl_T0_reg(s, i);
1986   - gen_op_stl_T0_T1();
  2196 + gen_ldst(stl, s);
1987 2197 }
1988 2198 /* advance to the next address */
1989 2199 gen_op_addl_T1_T2();
1990 2200 }
1991 2201 }
1992 2202 /* Base register writeback. */
1993   - gen_movl_reg_T1(s, rn);
  2203 + if ((insn & (1 << rn)) == 0)
  2204 + gen_movl_reg_T1(s, rn);
1994 2205 break;
1995 2206  
1996 2207 case 13:
... ... @@ -2036,7 +2247,7 @@ static void disas_thumb_insn(DisasContext *s)
2036 2247 case 15:
2037 2248 /* branch and link [and switch to arm] */
2038 2249 offset = ((int32_t)insn << 21) >> 10;
2039   - insn = lduw(s->pc);
  2250 + insn = lduw_code(s->pc);
2040 2251 offset |= insn & 0x7ff;
2041 2252  
2042 2253 val = (uint32_t)s->pc + 2;
... ... @@ -2073,6 +2284,7 @@ static inline int gen_intermediate_code_internal(CPUState *env,
2073 2284 uint16_t *gen_opc_end;
2074 2285 int j, lj;
2075 2286 target_ulong pc_start;
  2287 + uint32_t next_page_start;
2076 2288  
2077 2289 /* generate intermediate code */
2078 2290 pc_start = tb->pc;
... ... @@ -2088,6 +2300,10 @@ static inline int gen_intermediate_code_internal(CPUState *env,
2088 2300 dc->singlestep_enabled = env->singlestep_enabled;
2089 2301 dc->condjmp = 0;
2090 2302 dc->thumb = env->thumb;
  2303 +#if !defined(CONFIG_USER_ONLY)
  2304 + dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
  2305 +#endif
  2306 + next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2091 2307 nb_gen_labels = 0;
2092 2308 lj = -1;
2093 2309 do {
... ... @@ -2124,12 +2340,13 @@ static inline int gen_intermediate_code_internal(CPUState *env,
2124 2340 }
2125 2341 /* Translation stops when a conditional branch is enoutered.
2126 2342 * Otherwise the subsequent code could get translated several times.
2127   - */
  2343 + * Also stop translation when a page boundary is reached. This
  2344 + * ensures prefech aborts occur at the right place. */
2128 2345 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2129 2346 !env->singlestep_enabled &&
2130   - (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32));
2131   - /* It this stage dc->condjmp will only be set when the skipped
2132   - * instruction was a conditional branch, and teh PC has already been
  2347 + dc->pc < next_page_start);
  2348 + /* At this stage dc->condjmp will only be set when the skipped
  2349 + * instruction was a conditional branch, and the PC has already been
2133 2350 * written. */
2134 2351 if (__builtin_expect(env->singlestep_enabled, 0)) {
2135 2352 /* Make sure the pc is updated, and raise a debug exception. */
... ... @@ -2180,8 +2397,15 @@ static inline int gen_intermediate_code_internal(CPUState *env,
2180 2397 }
2181 2398 }
2182 2399 #endif
2183   - if (!search_pc)
  2400 + if (search_pc) {
  2401 + j = gen_opc_ptr - gen_opc_buf;
  2402 + lj++;
  2403 + while (lj <= j)
  2404 + gen_opc_instr_start[lj++] = 0;
  2405 + tb->size = 0;
  2406 + } else {
2184 2407 tb->size = dc->pc - pc_start;
  2408 + }
2185 2409 return 0;
2186 2410 }
2187 2411  
... ... @@ -2195,6 +2419,17 @@ int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2195 2419 return gen_intermediate_code_internal(env, tb, 1);
2196 2420 }
2197 2421  
  2422 +void cpu_reset(CPUARMState *env)
  2423 +{
  2424 +#if defined (CONFIG_USER_ONLY)
  2425 + /* SVC mode with interrupts disabled. */
  2426 + env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
  2427 +#else
  2428 + env->uncached_cpsr = ARM_CPU_MODE_USR;
  2429 +#endif
  2430 + env->regs[15] = 0;
  2431 +}
  2432 +
2198 2433 CPUARMState *cpu_arm_init(void)
2199 2434 {
2200 2435 CPUARMState *env;
... ... @@ -2203,6 +2438,8 @@ CPUARMState *cpu_arm_init(void)
2203 2438 if (!env)
2204 2439 return NULL;
2205 2440 cpu_exec_init(env);
  2441 + cpu_reset(env);
  2442 + tlb_flush(env, 1);
2206 2443 return env;
2207 2444 }
2208 2445  
... ... @@ -2211,6 +2448,10 @@ void cpu_arm_close(CPUARMState *env)
2211 2448 free(env);
2212 2449 }
2213 2450  
  2451 +static const char *cpu_mode_names[16] = {
  2452 + "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
  2453 + "???", "???", "???", "und", "???", "???", "???", "sys"
  2454 +};
2214 2455 void cpu_dump_state(CPUState *env, FILE *f,
2215 2456 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
2216 2457 int flags)
... ... @@ -2221,6 +2462,7 @@ void cpu_dump_state(CPUState *env, FILE *f,
2221 2462 float s;
2222 2463 } s0, s1;
2223 2464 CPU_DoubleU d;
  2465 + uint32_t psr;
2224 2466  
2225 2467 for(i=0;i<16;i++) {
2226 2468 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
... ... @@ -2229,13 +2471,15 @@ void cpu_dump_state(CPUState *env, FILE *f,
2229 2471 else
2230 2472 cpu_fprintf(f, " ");
2231 2473 }
2232   - cpu_fprintf(f, "PSR=%08x %c%c%c%c %c\n",
2233   - env->cpsr,
2234   - env->cpsr & (1 << 31) ? 'N' : '-',
2235   - env->cpsr & (1 << 30) ? 'Z' : '-',
2236   - env->cpsr & (1 << 29) ? 'C' : '-',
2237   - env->cpsr & (1 << 28) ? 'V' : '-',
2238   - env->thumb ? 'T' : 'A');
  2474 + psr = cpsr_read(env);
  2475 + cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d %x\n",
  2476 + psr,
  2477 + psr & (1 << 31) ? 'N' : '-',
  2478 + psr & (1 << 30) ? 'Z' : '-',
  2479 + psr & (1 << 29) ? 'C' : '-',
  2480 + psr & (1 << 28) ? 'V' : '-',
  2481 + psr & CPSR_T ? 'T' : 'A',
  2482 + cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
2239 2483  
2240 2484 for (i = 0; i < 16; i++) {
2241 2485 d.d = env->vfp.regs[i];
... ... @@ -2250,27 +2494,3 @@ void cpu_dump_state(CPUState *env, FILE *f,
2250 2494 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.fpscr);
2251 2495 }
2252 2496  
2253   -target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
2254   -{
2255   - return addr;
2256   -}
2257   -
2258   -#if defined(CONFIG_USER_ONLY)
2259   -
2260   -int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
2261   - int is_user, int is_softmmu)
2262   -{
2263   - env->cp15_6 = address;
2264   - if (rw == 2) {
2265   - env->exception_index = EXCP_PREFETCH_ABORT;
2266   - } else {
2267   - env->exception_index = EXCP_DATA_ABORT;
2268   - }
2269   - return 1;
2270   -}
2271   -
2272   -#else
2273   -
2274   -#error not implemented
2275   -
2276   -#endif
... ...
... ... @@ -3359,6 +3359,19 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
3359 3359 tlb_flush(env, 1);
3360 3360 return 0;
3361 3361 }
  3362 +
  3363 +#elif defined(TARGET_ARM)
  3364 +
  3365 +/* ??? Need to implement these. */
  3366 +void cpu_save(QEMUFile *f, void *opaque)
  3367 +{
  3368 +}
  3369 +
  3370 +int cpu_load(QEMUFile *f, void *opaque, int version_id)
  3371 +{
  3372 + return 0;
  3373 +}
  3374 +
3362 3375 #else
3363 3376  
3364 3377 #warning No CPU save/restore functions
... ... @@ -4054,6 +4067,10 @@ void register_machines(void)
4054 4067 #else
4055 4068 qemu_register_machine(&sun4m_machine);
4056 4069 #endif
  4070 +#elif defined(TARGET_ARM)
  4071 + qemu_register_machine(&integratorcp_machine);
  4072 +#else
  4073 +#error unsupported CPU
4057 4074 #endif
4058 4075 }
4059 4076  
... ...
... ... @@ -928,6 +928,9 @@ void do_usb_add(const char *devname);
928 928 void do_usb_del(const char *devname);
929 929 void usb_info(void);
930 930  
  931 +/* integratorcp.c */
  932 +extern QEMUMachine integratorcp_machine;
  933 +
931 934 /* ps2.c */
932 935 void *ps2_kbd_init(void (*update_irq)(void *, int), void *update_arg);
933 936 void *ps2_mouse_init(void (*update_irq)(void *, int), void *update_arg);
... ...