Blame view

cpu-all.h 25.9 KB
bellard authored
1
2
/*
 * defines common to all virtual CPUs
3
 *
bellard authored
4
5
6
7
8
9
10
11
12
13
14
15
16
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard authored
18
19
20
21
 */
#ifndef CPU_ALL_H
#define CPU_ALL_H
22
#include "qemu-common.h"
23
#include "cpu-common.h"
bellard authored
24
25
26
/* some important defines:
 *
bellard authored
27
28
 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
 * memory accesses.
29
 *
30
 * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
bellard authored
31
 * otherwise little endian.
32
 *
bellard authored
33
 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
34
 *
bellard authored
35
36
37
 * TARGET_WORDS_BIGENDIAN : same for target cpu
 */
38
#include "softfloat.h"
39
40
#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#define BSWAP_NEEDED
#endif

#ifdef BSWAP_NEEDED

static inline uint16_t tswap16(uint16_t s)
{
    return bswap16(s);
}

static inline uint32_t tswap32(uint32_t s)
{
    return bswap32(s);
}

static inline uint64_t tswap64(uint64_t s)
{
    return bswap64(s);
}

static inline void tswap16s(uint16_t *s)
{
    *s = bswap16(*s);
}

static inline void tswap32s(uint32_t *s)
{
    *s = bswap32(*s);
}

static inline void tswap64s(uint64_t *s)
{
    *s = bswap64(*s);
}

#else

static inline uint16_t tswap16(uint16_t s)
{
    return s;
}

static inline uint32_t tswap32(uint32_t s)
{
    return s;
}

static inline uint64_t tswap64(uint64_t s)
{
    return s;
}

static inline void tswap16s(uint16_t *s)
{
}

static inline void tswap32s(uint32_t *s)
{
}

static inline void tswap64s(uint64_t *s)
{
}

#endif

#if TARGET_LONG_SIZE == 4
#define tswapl(s) tswap32(s)
#define tswapls(s) tswap32s((uint32_t *)(s))
bellard authored
110
#define bswaptls(s) bswap32s(s)
111
112
113
#else
#define tswapl(s) tswap64(s)
#define tswapls(s) tswap64s((uint64_t *)(s))
bellard authored
114
#define bswaptls(s) bswap64s(s)
115
116
#endif
117
118
119
120
121
typedef union {
    float32 f;
    uint32_t l;
} CPU_FloatU;
bellard authored
122
123
/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
   endian ! */
bellard authored
124
typedef union {
bellard authored
125
    float64 d;
126
#if defined(HOST_WORDS_BIGENDIAN) \
127
    || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
bellard authored
128
129
    struct {
        uint32_t upper;
bellard authored
130
        uint32_t lower;
bellard authored
131
132
133
134
    } l;
#else
    struct {
        uint32_t lower;
bellard authored
135
        uint32_t upper;
bellard authored
136
137
138
139
140
    } l;
#endif
    uint64_t ll;
} CPU_DoubleU;
141
142
143
#ifdef TARGET_SPARC
typedef union {
    float128 q;
144
#if defined(HOST_WORDS_BIGENDIAN) \
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
    || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
    struct {
        uint32_t upmost;
        uint32_t upper;
        uint32_t lower;
        uint32_t lowest;
    } l;
    struct {
        uint64_t upper;
        uint64_t lower;
    } ll;
#else
    struct {
        uint32_t lowest;
        uint32_t lower;
        uint32_t upper;
        uint32_t upmost;
    } l;
    struct {
        uint64_t lower;
        uint64_t upper;
    } ll;
#endif
} CPU_QuadU;
#endif
bellard authored
171
172
/* CPU memory access without any memory or io remapping */
173
174
175
176
177
178
179
180
181
182
/*
 * the generic syntax for the memory accesses is:
 *
 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
 *
 * store: st{type}{size}{endian}_{access_type}(ptr, val)
 *
 * type is:
 * (empty): integer access
 *   f    : float access
183
 *
184
185
186
187
188
189
190
191
192
193
 * sign is:
 * (empty): for floats or 32 bit size
 *   u    : unsigned
 *   s    : signed
 *
 * size is:
 *   b: 8 bits
 *   w: 16 bits
 *   l: 32 bits
 *   q: 64 bits
194
 *
195
196
197
198
199
200
201
202
203
204
205
 * endian is:
 * (empty): target cpu endianness or 8 bit access
 *   r    : reversed target cpu endianness (not implemented yet)
 *   be   : big endian (not implemented yet)
 *   le   : little endian (not implemented yet)
 *
 * access_type is:
 *   raw    : host memory access
 *   user   : user mode access using soft MMU
 *   kernel : kernel mode access using soft MMU
 */
206
static inline int ldub_p(const void *ptr)
bellard authored
207
208
209
210
{
    return *(uint8_t *)ptr;
}
211
static inline int ldsb_p(const void *ptr)
bellard authored
212
213
214
215
{
    return *(int8_t *)ptr;
}
bellard authored
216
static inline void stb_p(void *ptr, int v)
bellard authored
217
218
219
220
221
222
223
{
    *(uint8_t *)ptr = v;
}

/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
   kernel handles unaligned load/stores may give better results, but
   it is a system wide setting : bad */
224
#if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
bellard authored
225
226

/* conservative code for little endian unaligned accesses */
227
static inline int lduw_le_p(const void *ptr)
bellard authored
228
{
229
#ifdef _ARCH_PPC
bellard authored
230
231
232
233
    int val;
    __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
    return val;
#else
234
    const uint8_t *p = ptr;
bellard authored
235
236
237
238
    return p[0] | (p[1] << 8);
#endif
}
239
static inline int ldsw_le_p(const void *ptr)
bellard authored
240
{
241
#ifdef _ARCH_PPC
bellard authored
242
243
244
245
    int val;
    __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
    return (int16_t)val;
#else
246
    const uint8_t *p = ptr;
bellard authored
247
248
249
250
    return (int16_t)(p[0] | (p[1] << 8));
#endif
}
251
static inline int ldl_le_p(const void *ptr)
bellard authored
252
{
253
#ifdef _ARCH_PPC
bellard authored
254
255
256
257
    int val;
    __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
    return val;
#else
258
    const uint8_t *p = ptr;
bellard authored
259
260
261
262
    return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
#endif
}
263
static inline uint64_t ldq_le_p(const void *ptr)
bellard authored
264
{
265
    const uint8_t *p = ptr;
bellard authored
266
    uint32_t v1, v2;
267
268
    v1 = ldl_le_p(p);
    v2 = ldl_le_p(p + 4);
bellard authored
269
270
271
    return v1 | ((uint64_t)v2 << 32);
}
272
static inline void stw_le_p(void *ptr, int v)
bellard authored
273
{
274
#ifdef _ARCH_PPC
bellard authored
275
276
277
278
279
280
281
282
    __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
#else
    uint8_t *p = ptr;
    p[0] = v;
    p[1] = v >> 8;
#endif
}
283
static inline void stl_le_p(void *ptr, int v)
bellard authored
284
{
285
#ifdef _ARCH_PPC
bellard authored
286
287
288
289
290
291
292
293
294
295
    __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
#else
    uint8_t *p = ptr;
    p[0] = v;
    p[1] = v >> 8;
    p[2] = v >> 16;
    p[3] = v >> 24;
#endif
}
296
static inline void stq_le_p(void *ptr, uint64_t v)
bellard authored
297
298
{
    uint8_t *p = ptr;
299
300
    stl_le_p(p, (uint32_t)v);
    stl_le_p(p + 4, v >> 32);
bellard authored
301
302
303
304
}

/* float access */
305
static inline float32 ldfl_le_p(const void *ptr)
bellard authored
306
307
{
    union {
bellard authored
308
        float32 f;
bellard authored
309
310
        uint32_t i;
    } u;
311
    u.i = ldl_le_p(ptr);
bellard authored
312
313
314
    return u.f;
}
315
static inline void stfl_le_p(void *ptr, float32 v)
bellard authored
316
317
{
    union {
bellard authored
318
        float32 f;
bellard authored
319
320
321
        uint32_t i;
    } u;
    u.f = v;
322
    stl_le_p(ptr, u.i);
bellard authored
323
324
}
325
static inline float64 ldfq_le_p(const void *ptr)
bellard authored
326
{
bellard authored
327
    CPU_DoubleU u;
328
329
    u.l.lower = ldl_le_p(ptr);
    u.l.upper = ldl_le_p(ptr + 4);
bellard authored
330
331
332
    return u.d;
}
333
static inline void stfq_le_p(void *ptr, float64 v)
bellard authored
334
{
bellard authored
335
    CPU_DoubleU u;
bellard authored
336
    u.d = v;
337
338
    stl_le_p(ptr, u.l.lower);
    stl_le_p(ptr + 4, u.l.upper);
bellard authored
339
340
}
341
342
#else
343
static inline int lduw_le_p(const void *ptr)
344
345
346
347
{
    return *(uint16_t *)ptr;
}
348
static inline int ldsw_le_p(const void *ptr)
349
350
351
{
    return *(int16_t *)ptr;
}
352
353
static inline int ldl_le_p(const void *ptr)
354
355
356
357
{
    return *(uint32_t *)ptr;
}
358
static inline uint64_t ldq_le_p(const void *ptr)
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
{
    return *(uint64_t *)ptr;
}

static inline void stw_le_p(void *ptr, int v)
{
    *(uint16_t *)ptr = v;
}

static inline void stl_le_p(void *ptr, int v)
{
    *(uint32_t *)ptr = v;
}

static inline void stq_le_p(void *ptr, uint64_t v)
{
    *(uint64_t *)ptr = v;
}

/* float access */
380
static inline float32 ldfl_le_p(const void *ptr)
381
382
383
384
{
    return *(float32 *)ptr;
}
385
static inline float64 ldfq_le_p(const void *ptr)
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
{
    return *(float64 *)ptr;
}

static inline void stfl_le_p(void *ptr, float32 v)
{
    *(float32 *)ptr = v;
}

static inline void stfq_le_p(void *ptr, float64 v)
{
    *(float64 *)ptr = v;
}
#endif
401
#if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
402
403
static inline int lduw_be_p(const void *ptr)
404
{
405
406
407
408
409
410
411
412
#if defined(__i386__)
    int val;
    asm volatile ("movzwl %1, %0\n"
                  "xchgb %b0, %h0\n"
                  : "=q" (val)
                  : "m" (*(uint16_t *)ptr));
    return val;
#else
413
    const uint8_t *b = ptr;
414
415
    return ((b[0] << 8) | b[1]);
#endif
416
417
}
418
static inline int ldsw_be_p(const void *ptr)
419
{
420
421
422
423
424
425
426
427
#if defined(__i386__)
    int val;
    asm volatile ("movzwl %1, %0\n"
                  "xchgb %b0, %h0\n"
                  : "=q" (val)
                  : "m" (*(uint16_t *)ptr));
    return (int16_t)val;
#else
428
    const uint8_t *b = ptr;
429
430
    return (int16_t)((b[0] << 8) | b[1]);
#endif
431
432
}
433
static inline int ldl_be_p(const void *ptr)
434
{
bellard authored
435
#if defined(__i386__) || defined(__x86_64__)
436
437
438
439
440
441
442
    int val;
    asm volatile ("movl %1, %0\n"
                  "bswap %0\n"
                  : "=r" (val)
                  : "m" (*(uint32_t *)ptr));
    return val;
#else
443
    const uint8_t *b = ptr;
444
445
    return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
#endif
446
447
}
448
static inline uint64_t ldq_be_p(const void *ptr)
449
450
{
    uint32_t a,b;
451
    a = ldl_be_p(ptr);
452
    b = ldl_be_p((uint8_t *)ptr + 4);
453
454
455
    return (((uint64_t)a<<32)|b);
}
456
static inline void stw_be_p(void *ptr, int v)
457
{
458
459
460
461
462
463
#if defined(__i386__)
    asm volatile ("xchgb %b0, %h0\n"
                  "movw %w0, %1\n"
                  : "=q" (v)
                  : "m" (*(uint16_t *)ptr), "0" (v));
#else
464
465
466
    uint8_t *d = (uint8_t *) ptr;
    d[0] = v >> 8;
    d[1] = v;
467
#endif
468
469
}
470
static inline void stl_be_p(void *ptr, int v)
471
{
bellard authored
472
#if defined(__i386__) || defined(__x86_64__)
473
474
475
476
477
    asm volatile ("bswap %0\n"
                  "movl %0, %1\n"
                  : "=r" (v)
                  : "m" (*(uint32_t *)ptr), "0" (v));
#else
478
479
480
481
482
    uint8_t *d = (uint8_t *) ptr;
    d[0] = v >> 24;
    d[1] = v >> 16;
    d[2] = v >> 8;
    d[3] = v;
483
#endif
484
485
}
486
static inline void stq_be_p(void *ptr, uint64_t v)
487
{
488
    stl_be_p(ptr, v >> 32);
489
    stl_be_p((uint8_t *)ptr + 4, v);
bellard authored
490
491
492
493
}

/* float access */
494
static inline float32 ldfl_be_p(const void *ptr)
bellard authored
495
496
{
    union {
bellard authored
497
        float32 f;
bellard authored
498
499
        uint32_t i;
    } u;
500
    u.i = ldl_be_p(ptr);
bellard authored
501
502
503
    return u.f;
}
504
static inline void stfl_be_p(void *ptr, float32 v)
bellard authored
505
506
{
    union {
bellard authored
507
        float32 f;
bellard authored
508
509
510
        uint32_t i;
    } u;
    u.f = v;
511
    stl_be_p(ptr, u.i);
bellard authored
512
513
}
514
static inline float64 ldfq_be_p(const void *ptr)
bellard authored
515
516
{
    CPU_DoubleU u;
517
    u.l.upper = ldl_be_p(ptr);
518
    u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
bellard authored
519
520
521
    return u.d;
}
522
static inline void stfq_be_p(void *ptr, float64 v)
bellard authored
523
524
525
{
    CPU_DoubleU u;
    u.d = v;
526
    stl_be_p(ptr, u.l.upper);
527
    stl_be_p((uint8_t *)ptr + 4, u.l.lower);
528
529
}
bellard authored
530
531
#else
532
static inline int lduw_be_p(const void *ptr)
bellard authored
533
534
535
536
{
    return *(uint16_t *)ptr;
}
537
static inline int ldsw_be_p(const void *ptr)
bellard authored
538
539
540
541
{
    return *(int16_t *)ptr;
}
542
static inline int ldl_be_p(const void *ptr)
bellard authored
543
544
545
546
{
    return *(uint32_t *)ptr;
}
547
static inline uint64_t ldq_be_p(const void *ptr)
bellard authored
548
549
550
551
{
    return *(uint64_t *)ptr;
}
552
static inline void stw_be_p(void *ptr, int v)
bellard authored
553
554
555
556
{
    *(uint16_t *)ptr = v;
}
557
static inline void stl_be_p(void *ptr, int v)
bellard authored
558
559
560
561
{
    *(uint32_t *)ptr = v;
}
562
static inline void stq_be_p(void *ptr, uint64_t v)
bellard authored
563
564
565
566
567
568
{
    *(uint64_t *)ptr = v;
}

/* float access */
569
static inline float32 ldfl_be_p(const void *ptr)
bellard authored
570
{
bellard authored
571
    return *(float32 *)ptr;
bellard authored
572
573
}
574
static inline float64 ldfq_be_p(const void *ptr)
bellard authored
575
{
bellard authored
576
    return *(float64 *)ptr;
bellard authored
577
578
}
579
static inline void stfl_be_p(void *ptr, float32 v)
bellard authored
580
{
bellard authored
581
    *(float32 *)ptr = v;
bellard authored
582
583
}
584
static inline void stfq_be_p(void *ptr, float64 v)
bellard authored
585
{
bellard authored
586
    *(float64 *)ptr = v;
bellard authored
587
}
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615

#endif

/* target CPU memory access functions */
#if defined(TARGET_WORDS_BIGENDIAN)
#define lduw_p(p) lduw_be_p(p)
#define ldsw_p(p) ldsw_be_p(p)
#define ldl_p(p) ldl_be_p(p)
#define ldq_p(p) ldq_be_p(p)
#define ldfl_p(p) ldfl_be_p(p)
#define ldfq_p(p) ldfq_be_p(p)
#define stw_p(p, v) stw_be_p(p, v)
#define stl_p(p, v) stl_be_p(p, v)
#define stq_p(p, v) stq_be_p(p, v)
#define stfl_p(p, v) stfl_be_p(p, v)
#define stfq_p(p, v) stfq_be_p(p, v)
#else
#define lduw_p(p) lduw_le_p(p)
#define ldsw_p(p) ldsw_le_p(p)
#define ldl_p(p) ldl_le_p(p)
#define ldq_p(p) ldq_le_p(p)
#define ldfl_p(p) ldfl_le_p(p)
#define ldfq_p(p) ldfq_le_p(p)
#define stw_p(p, v) stw_le_p(p, v)
#define stl_p(p, v) stl_le_p(p, v)
#define stq_p(p, v) stq_le_p(p, v)
#define stfl_p(p, v) stfl_le_p(p, v)
#define stfq_p(p, v) stfq_le_p(p, v)
bellard authored
616
617
#endif
bellard authored
618
619
/* MMU memory access macros */
620
#if defined(CONFIG_USER_ONLY)
621
622
623
#include <assert.h>
#include "qemu-types.h"
624
625
626
/* On some host systems the guest address space is reserved on the host.
 * This allows the guest address space to be offset to a convenient location.
 */
627
628
629
630
631
632
633
#if defined(CONFIG_USE_GUEST_BASE)
extern unsigned long guest_base;
extern int have_guest_base;
#define GUEST_BASE guest_base
#else
#define GUEST_BASE 0ul
#endif
634
635
636

/* All direct uses of g2h and h2g need to go away for usermode softmmu.  */
#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
637
638
639
640
641
642
#define h2g(x) ({ \
    unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
    /* Check if given address fits target address space */ \
    assert(__ret == (abi_ulong)__ret); \
    (abi_ulong)__ret; \
})
643
644
645
646
#define h2g_valid(x) ({ \
    unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
    (__guest == (abi_ulong)__guest); \
})
647
648
649
650
651

#define saddr(x) g2h(x)
#define laddr(x) g2h(x)

#else /* !CONFIG_USER_ONLY */
bellard authored
652
653
/* NOTE: we use double casts if pointers and target_ulong have
   different sizes */
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
#define saddr(x) (uint8_t *)(long)(x)
#define laddr(x) (uint8_t *)(long)(x)
#endif

#define ldub_raw(p) ldub_p(laddr((p)))
#define ldsb_raw(p) ldsb_p(laddr((p)))
#define lduw_raw(p) lduw_p(laddr((p)))
#define ldsw_raw(p) ldsw_p(laddr((p)))
#define ldl_raw(p) ldl_p(laddr((p)))
#define ldq_raw(p) ldq_p(laddr((p)))
#define ldfl_raw(p) ldfl_p(laddr((p)))
#define ldfq_raw(p) ldfq_p(laddr((p)))
#define stb_raw(p, v) stb_p(saddr((p)), v)
#define stw_raw(p, v) stw_p(saddr((p)), v)
#define stl_raw(p, v) stl_p(saddr((p)), v)
#define stq_raw(p, v) stq_p(saddr((p)), v)
#define stfl_raw(p, v) stfl_p(saddr((p)), v)
#define stfq_raw(p, v) stfq_p(saddr((p)), v)
bellard authored
672
673
674
#if defined(CONFIG_USER_ONLY)
bellard authored
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696

/* if user mode, no other memory access functions */
#define ldub(p) ldub_raw(p)
#define ldsb(p) ldsb_raw(p)
#define lduw(p) lduw_raw(p)
#define ldsw(p) ldsw_raw(p)
#define ldl(p) ldl_raw(p)
#define ldq(p) ldq_raw(p)
#define ldfl(p) ldfl_raw(p)
#define ldfq(p) ldfq_raw(p)
#define stb(p, v) stb_raw(p, v)
#define stw(p, v) stw_raw(p, v)
#define stl(p, v) stl_raw(p, v)
#define stq(p, v) stq_raw(p, v)
#define stfl(p, v) stfl_raw(p, v)
#define stfq(p, v) stfq_raw(p, v)

#define ldub_code(p) ldub_raw(p)
#define ldsb_code(p) ldsb_raw(p)
#define lduw_code(p) lduw_raw(p)
#define ldsw_code(p) ldsw_raw(p)
#define ldl_code(p) ldl_raw(p)
697
#define ldq_code(p) ldq_raw(p)
bellard authored
698
699
700
701
702
703

#define ldub_kernel(p) ldub_raw(p)
#define ldsb_kernel(p) ldsb_raw(p)
#define lduw_kernel(p) lduw_raw(p)
#define ldsw_kernel(p) ldsw_raw(p)
#define ldl_kernel(p) ldl_raw(p)
704
#define ldq_kernel(p) ldq_raw(p)
bellard authored
705
706
#define ldfl_kernel(p) ldfl_raw(p)
#define ldfq_kernel(p) ldfq_raw(p)
bellard authored
707
708
709
710
#define stb_kernel(p, v) stb_raw(p, v)
#define stw_kernel(p, v) stw_raw(p, v)
#define stl_kernel(p, v) stl_raw(p, v)
#define stq_kernel(p, v) stq_raw(p, v)
bellard authored
711
712
#define stfl_kernel(p, v) stfl_raw(p, v)
#define stfq_kernel(p, vt) stfq_raw(p, v)
bellard authored
713
714
715

#endif /* defined(CONFIG_USER_ONLY) */
bellard authored
716
717
/* page related stuff */
718
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
bellard authored
719
720
721
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
722
/* ??? These should be the larger of unsigned long and target_ulong.  */
723
724
725
726
extern unsigned long qemu_real_host_page_size;
extern unsigned long qemu_host_page_bits;
extern unsigned long qemu_host_page_size;
extern unsigned long qemu_host_page_mask;
bellard authored
727
728
#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
bellard authored
729
730
731
732
733
734
735
736
737

/* same as PROT_xxx */
#define PAGE_READ      0x0001
#define PAGE_WRITE     0x0002
#define PAGE_EXEC      0x0004
#define PAGE_BITS      (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
#define PAGE_VALID     0x0008
/* original state of the write flag (used when tracking self-modifying
   code */
738
#define PAGE_WRITE_ORG 0x0010
739
#define PAGE_RESERVED  0x0020
bellard authored
740
741

void page_dump(FILE *f);
742
743
int walk_memory_regions(void *,
    int (*fn)(void *, unsigned long, unsigned long, unsigned long));
744
745
int page_get_flags(target_ulong address);
void page_set_flags(target_ulong start, target_ulong end, int flags);
746
int page_check_range(target_ulong start, target_ulong len, int flags);
bellard authored
747
748
void cpu_exec_init_all(unsigned long tb_size);
749
CPUState *cpu_copy(CPUState *env);
750
CPUState *qemu_get_cpu(int cpu);
751
752
void cpu_dump_state(CPUState *env, FILE *f,
bellard authored
753
754
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
                    int flags);
755
756
757
void cpu_dump_statistics (CPUState *env, FILE *f,
                          int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
                          int flags);
bellard authored
758
759
void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...)
760
    __attribute__ ((__format__ (__printf__, 2, 3)));
761
extern CPUState *first_cpu;
bellard authored
762
extern CPUState *cpu_single_env;
pbrook authored
763
764
extern int64_t qemu_icount;
extern int use_icount;
bellard authored
765
766
767
#define CPU_INTERRUPT_HARD   0x02 /* hardware interrupt pending */
#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
bellard authored
768
#define CPU_INTERRUPT_TIMER  0x08 /* internal timer exception pending */
769
#define CPU_INTERRUPT_FIQ    0x10 /* Fast interrupt pending.  */
770
#define CPU_INTERRUPT_HALT   0x20 /* CPU halt wanted */
bellard authored
771
#define CPU_INTERRUPT_SMI    0x40 /* (x86 only) SMI interrupt pending */
772
#define CPU_INTERRUPT_DEBUG  0x80 /* Debug event occured.  */
ths authored
773
#define CPU_INTERRUPT_VIRQ   0x100 /* virtual interrupt pending.  */
774
#define CPU_INTERRUPT_NMI    0x200 /* NMI pending. */
775
776
#define CPU_INTERRUPT_INIT   0x400 /* INIT pending. */
#define CPU_INTERRUPT_SIPI   0x800 /* SIPI pending. */
777
#define CPU_INTERRUPT_MCE    0x1000 /* (x86 only) MCE pending. */
778
bellard authored
779
void cpu_interrupt(CPUState *s, int mask);
780
void cpu_reset_interrupt(CPUState *env, int mask);
bellard authored
781
782
783
void cpu_exit(CPUState *s);
784
785
int qemu_cpu_has_work(CPUState *env);
786
787
788
789
/* Breakpoint/watchpoint flags */
#define BP_MEM_READ           0x01
#define BP_MEM_WRITE          0x02
#define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
790
#define BP_STOP_BEFORE_ACCESS 0x04
791
#define BP_WATCHPOINT_HIT     0x08
792
#define BP_GDB                0x10
793
#define BP_CPU                0x20
794
795
796
797
798
799
800
801
802
803
804
805

int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
                          CPUBreakpoint **breakpoint);
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags);
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
void cpu_breakpoint_remove_all(CPUState *env, int mask);
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
                          int flags, CPUWatchpoint **watchpoint);
int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
                          target_ulong len, int flags);
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
void cpu_watchpoint_remove_all(CPUState *env, int mask);
806
807
808
809
810

#define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
#define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
#define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
811
void cpu_single_step(CPUState *env, int enabled);
bellard authored
812
void cpu_reset(CPUState *s);
bellard authored
813
814
815
816
/* Return the physical page corresponding to a virtual one. Use it
   only for debugging because no protection checks are done. Return -1
   if no page found. */
817
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
818
819
#define CPU_LOG_TB_OUT_ASM (1 << 0)
820
#define CPU_LOG_TB_IN_ASM  (1 << 1)
821
822
823
824
825
#define CPU_LOG_TB_OP      (1 << 2)
#define CPU_LOG_TB_OP_OPT  (1 << 3)
#define CPU_LOG_INT        (1 << 4)
#define CPU_LOG_EXEC       (1 << 5)
#define CPU_LOG_PCALL      (1 << 6)
826
#define CPU_LOG_IOPORT     (1 << 7)
827
#define CPU_LOG_TB_CPU     (1 << 8)
828
#define CPU_LOG_RESET      (1 << 9)
829
830
831
832
833
834
835
836

/* define log items */
typedef struct CPULogItem {
    int mask;
    const char *name;
    const char *help;
} CPULogItem;
837
extern const CPULogItem cpu_log_items[];
838
839
840
void cpu_set_log(int log_flags);
void cpu_set_log_filename(const char *filename);
841
int cpu_str_to_log_mask(const char *str);
842
843
/* IO ports API */
844
#include "ioport.h"
845
846
847
/* memory API */
bellard authored
848
extern int phys_ram_fd;
849
extern uint8_t *phys_ram_dirty;
850
extern ram_addr_t ram_size;
851
extern ram_addr_t last_ram_offset;
bellard authored
852
853

/* physical memory access */
pbrook authored
854
855
856
857
858

/* MMIO pages are identified by a combination of an IO device index and
   3 flags.  The ROMD code stores the page ram offset in iotlb entry, 
   so only a limited number of ids are avaiable.  */
859
#define IO_MEM_NB_ENTRIES  (1 << (TARGET_PAGE_BITS  - IO_MEM_SHIFT))
bellard authored
860
pbrook authored
861
862
863
864
865
866
867
868
869
870
/* Flags stored in the low bits of the TLB virtual address.  These are
   defined so that fast path ram access is all zeros.  */
/* Zero if TLB entry is valid.  */
#define TLB_INVALID_MASK   (1 << 3)
/* Set if TLB entry references a clean RAM page.  The iotlb entry will
   contain the page physical address.  */
#define TLB_NOTDIRTY    (1 << 4)
/* Set if TLB entry is an IO callback.  */
#define TLB_MMIO        (1 << 5)
871
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
872
                        uint8_t *buf, int len, int is_write);
873
874
875
876
877
#define VGA_DIRTY_FLAG       0x01
#define CODE_DIRTY_FLAG      0x02
#define KQEMU_DIRTY_FLAG     0x04
#define MIGRATION_DIRTY_FLAG 0x08
bellard authored
878
879
/* read dirty bit (return 0 or 1) */
bellard authored
880
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
881
{
bellard authored
882
883
884
    return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
}
885
static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
bellard authored
886
887
888
                                                int dirty_flags)
{
    return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
889
890
}
bellard authored
891
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
892
{
bellard authored
893
    phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
894
895
}
bellard authored
896
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard authored
897
                                     int dirty_flags);
bellard authored
898
void cpu_tlb_update_dirty(CPUState *env);
899
900
901
902
903
int cpu_physical_memory_set_dirty_tracking(int enable);

int cpu_physical_memory_get_dirty_tracking(void);
904
905
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
                                   target_phys_addr_t end_addr);
906
bellard authored
907
908
909
void dump_exec_info(FILE *f,
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
aliguori authored
910
911
912
913
914
915
916
917
918
/* Coalesced MMIO regions are areas where write operations can be reordered.
 * This usually implies that write operations are side-effect free.  This allows
 * batching which can make a major impact on performance when using
 * virtualization.
 */
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);

void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
bellard authored
919
920
921
/*******************************************/
/* host CPU ticks (if available) */
922
#if defined(_ARCH_PPC)
bellard authored
923
924
925

static inline int64_t cpu_get_real_ticks(void)
{
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
    int64_t retval;
#ifdef _ARCH_PPC64
    /* This reads timebase in one 64bit go and includes Cell workaround from:
       http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
     */
    __asm__ __volatile__ (
        "mftb    %0\n\t"
        "cmpwi   %0,0\n\t"
        "beq-    $-8"
        : "=r" (retval));
#else
    /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
    unsigned long junk;
    __asm__ __volatile__ (
        "mftbu   %1\n\t"
        "mftb    %L0\n\t"
        "mftbu   %0\n\t"
        "cmpw    %0,%1\n\t"
        "bne     $-16"
        : "=r" (retval), "=r" (junk));
#endif
    return retval;
bellard authored
948
949
950
951
952
}

#elif defined(__i386__)

static inline int64_t cpu_get_real_ticks(void)
bellard authored
953
954
955
956
957
958
{
    int64_t val;
    asm volatile ("rdtsc" : "=A" (val));
    return val;
}
bellard authored
959
960
961
962
963
964
965
966
967
968
969
970
971
#elif defined(__x86_64__)

static inline int64_t cpu_get_real_ticks(void)
{
    uint32_t low,high;
    int64_t val;
    asm volatile("rdtsc" : "=a" (low), "=d" (high));
    val = high;
    val <<= 32;
    val |= low;
    return val;
}
aurel32 authored
972
973
974
975
976
977
978
979
980
#elif defined(__hppa__)

static inline int64_t cpu_get_real_ticks(void)
{
    int val;
    asm volatile ("mfctl %%cr16, %0" : "=r"(val));
    return val;
}
bellard authored
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
#elif defined(__ia64)

static inline int64_t cpu_get_real_ticks(void)
{
	int64_t val;
	asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
	return val;
}

#elif defined(__s390__)

static inline int64_t cpu_get_real_ticks(void)
{
    int64_t val;
    asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
    return val;
}
999
#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
bellard authored
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019

static inline int64_t cpu_get_real_ticks (void)
{
#if     defined(_LP64)
        uint64_t        rval;
        asm volatile("rd %%tick,%0" : "=r"(rval));
        return rval;
#else
        union {
                uint64_t i64;
                struct {
                        uint32_t high;
                        uint32_t low;
                }       i32;
        } rval;
        asm volatile("rd %%tick,%1; srlx %1,32,%0"
                : "=r"(rval.i32.high), "=r"(rval.i32.low));
        return rval.i64;
#endif
}
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040

#elif defined(__mips__)

static inline int64_t cpu_get_real_ticks(void)
{
#if __mips_isa_rev >= 2
    uint32_t count;
    static uint32_t cyc_per_count = 0;

    if (!cyc_per_count)
        __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));

    __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));
    return (int64_t)(count * cyc_per_count);
#else
    /* FIXME */
    static int64_t ticks = 0;
    return ticks++;
#endif
}
pbrook authored
1041
1042
#else
/* The host CPU doesn't have an easily accessible cycle counter.
ths authored
1043
1044
   Just return a monotonically increasing value.  This will be
   totally wrong, but hopefully better than nothing.  */
pbrook authored
1045
1046
1047
1048
1049
static inline int64_t cpu_get_real_ticks (void)
{
    static int64_t ticks = 0;
    return ticks++;
}
bellard authored
1050
1051
1052
1053
1054
1055
1056
1057
1058
#endif

/* profiling */
#ifdef CONFIG_PROFILER
static inline int64_t profile_getclock(void)
{
    return cpu_get_real_ticks();
}
bellard authored
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
extern int64_t kqemu_time, kqemu_time_start;
extern int64_t qemu_time, qemu_time_start;
extern int64_t tlb_flush_time;
extern int64_t kqemu_exec_count;
extern int64_t dev_time;
extern int64_t kqemu_ret_int_count;
extern int64_t kqemu_ret_excp_count;
extern int64_t kqemu_ret_intr_count;
#endif
1069
1070
1071
void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
                        uint64_t mcg_status, uint64_t addr, uint64_t misc);
bellard authored
1072
#endif /* CPU_ALL_H */