Blame view

exec.c 95.2 KB
bellard authored
1
/*
bellard authored
2
 *  virtual page mapping and translated block handling
3
 *
bellard authored
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
20
#include "config.h"
bellard authored
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
bellard authored
23
24
#include <windows.h>
#else
bellard authored
25
#include <sys/types.h>
bellard authored
26
27
#include <sys/mman.h>
#endif
bellard authored
28
29
30
31
32
33
34
35
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <inttypes.h>
bellard authored
36
37
#include "cpu.h"
#include "exec-all.h"
38
#include "qemu-common.h"
bellard authored
39
#include "tcg.h"
40
#include "hw/hw.h"
41
42
43
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
#endif
bellard authored
44
bellard authored
45
//#define DEBUG_TB_INVALIDATE
bellard authored
46
//#define DEBUG_FLUSH
47
//#define DEBUG_TLB
48
//#define DEBUG_UNASSIGNED
bellard authored
49
50

/* make various TB consistency checks */
51
52
//#define DEBUG_TB_CHECK
//#define DEBUG_TLB_CHECK
bellard authored
53
ths authored
54
//#define DEBUG_IOPORT
55
//#define DEBUG_SUBPAGE
ths authored
56
57
58
59
60
61
#if !defined(CONFIG_USER_ONLY)
/* TB consistency checks only implemented for usermode emulation.  */
#undef DEBUG_TB_CHECK
#endif
62
63
64
65
#define SMC_BITMAP_USE_THRESHOLD 10

#define MMAP_AREA_START        0x00000000
#define MMAP_AREA_END          0xa8000000
bellard authored
66
67
68
#if defined(TARGET_SPARC64)
#define TARGET_PHYS_ADDR_SPACE_BITS 41
69
70
#elif defined(TARGET_SPARC)
#define TARGET_PHYS_ADDR_SPACE_BITS 36
71
72
73
#elif defined(TARGET_ALPHA)
#define TARGET_PHYS_ADDR_SPACE_BITS 42
#define TARGET_VIRT_ADDR_SPACE_BITS 42
74
75
#elif defined(TARGET_PPC64)
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
77
78
79
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
#define TARGET_PHYS_ADDR_SPACE_BITS 42
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
#define TARGET_PHYS_ADDR_SPACE_BITS 36
80
81
82
83
84
#else
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#endif
85
TranslationBlock *tbs;
86
int code_gen_max_blocks;
87
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bellard authored
88
int nb_tbs;
bellard authored
89
90
/* any access to the tbs or the page table must use this lock */
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellard authored
91
92
93
94
#if defined(__arm__) || defined(__sparc_v9__)
/* The prologue must be reachable with a direct jump. ARM and Sparc64
 have limited branch ranges (possibly also PPC) so place it in a
95
96
97
98
99
100
101
102
103
104
 section close to code segment. */
#define code_gen_section                                \
    __attribute__((__section__(".gen_code")))           \
    __attribute__((aligned (32)))
#else
#define code_gen_section                                \
    __attribute__((aligned (32)))
#endif

uint8_t code_gen_prologue[1024] code_gen_section;
105
106
107
108
uint8_t *code_gen_buffer;
unsigned long code_gen_buffer_size;
/* threshold to flush the translated code buffer */
unsigned long code_gen_buffer_max_size; 
bellard authored
109
110
uint8_t *code_gen_ptr;
111
#if !defined(CONFIG_USER_ONLY)
112
ram_addr_t phys_ram_size;
113
114
int phys_ram_fd;
uint8_t *phys_ram_base;
115
uint8_t *phys_ram_dirty;
bellard authored
116
static ram_addr_t phys_ram_alloc_offset = 0;
117
#endif
118
bellard authored
119
120
121
CPUState *first_cpu;
/* current CPU in the current thread. It is only valid inside
   cpu_exec() */
122
CPUState *cpu_single_env;
pbrook authored
123
/* 0 = Do not count executed instructions.
124
   1 = Precise instruction counting.
pbrook authored
125
126
127
128
129
   2 = Adaptive rate instruction counting.  */
int use_icount = 0;
/* Current instruction counter.  While executing translated code this may
   include some instructions that have not yet been executed.  */
int64_t qemu_icount;
bellard authored
130
bellard authored
131
typedef struct PageDesc {
132
    /* list of TBs intersecting this ram page */
bellard authored
133
    TranslationBlock *first_tb;
134
135
136
137
138
139
140
    /* in order to optimize self modifying code, we count the number
       of lookups we do to a given page to use a bitmap */
    unsigned int code_write_count;
    uint8_t *code_bitmap;
#if defined(CONFIG_USER_ONLY)
    unsigned long flags;
#endif
bellard authored
141
142
} PageDesc;
143
typedef struct PhysPageDesc {
pbrook authored
144
    /* offset in host memory of the page + io_index in the low bits */
145
    ram_addr_t phys_offset;
146
147
} PhysPageDesc;
bellard authored
148
#define L2_BITS 10
149
150
151
152
153
154
155
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
/* XXX: this is a temporary hack for alpha target.
 *      In the future, this is to be replaced by a multi-level table
 *      to actually be able to handle the complete 64 bits address space.
 */
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
#else
156
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
157
#endif
bellard authored
158
159
160
161

#define L1_SIZE (1 << L1_BITS)
#define L2_SIZE (1 << L2_BITS)
162
163
164
165
unsigned long qemu_real_host_page_size;
unsigned long qemu_host_page_bits;
unsigned long qemu_host_page_size;
unsigned long qemu_host_page_mask;
bellard authored
166
167
/* XXX: for system emulation, it could just be an array */
bellard authored
168
static PageDesc *l1_map[L1_SIZE];
bellard authored
169
PhysPageDesc **l1_phys_map;
bellard authored
170
171
172
173
#if !defined(CONFIG_USER_ONLY)
static void io_mem_init(void);
174
175
176
/* io memory support */
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
177
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
178
static int io_mem_nb;
179
180
static int io_mem_watch;
#endif
181
182
183
184
185
/* log support */
char *logfilename = "/tmp/qemu.log";
FILE *logfile;
int loglevel;
pbrook authored
186
static int log_append = 0;
187
bellard authored
188
189
190
191
192
/* statistics */
static int tlb_flush_count;
static int tb_flush_count;
static int tb_phys_invalidate_count;
193
194
195
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
typedef struct subpage_t {
    target_phys_addr_t base;
196
197
198
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
    void *opaque[TARGET_PAGE_SIZE][2][4];
199
200
} subpage_t;
201
202
203
204
205
206
207
208
209
210
211
#ifdef _WIN32
static void map_exec(void *addr, long size)
{
    DWORD old_protect;
    VirtualProtect(addr, size,
                   PAGE_EXECUTE_READWRITE, &old_protect);

}
#else
static void map_exec(void *addr, long size)
{
212
    unsigned long start, end, page_size;
213
214
    page_size = getpagesize();
215
    start = (unsigned long)addr;
216
    start &= ~(page_size - 1);
217
218

    end = (unsigned long)addr + size;
219
220
    end += page_size - 1;
    end &= ~(page_size - 1);
221
222
223
224
225
226

    mprotect((void *)start, end - start,
             PROT_READ | PROT_WRITE | PROT_EXEC);
}
#endif
bellard authored
227
static void page_init(void)
bellard authored
228
{
229
    /* NOTE: we can always suppose that qemu_host_page_size >=
bellard authored
230
       TARGET_PAGE_SIZE */
231
#ifdef _WIN32
bellard authored
232
233
234
    {
        SYSTEM_INFO system_info;
        DWORD old_protect;
235
bellard authored
236
237
238
        GetSystemInfo(&system_info);
        qemu_real_host_page_size = system_info.dwPageSize;
    }
239
#else
240
    qemu_real_host_page_size = getpagesize();
241
#endif
242
243
244
245
246
247
248
249
    if (qemu_host_page_size == 0)
        qemu_host_page_size = qemu_real_host_page_size;
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
        qemu_host_page_size = TARGET_PAGE_SIZE;
    qemu_host_page_bits = 0;
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
        qemu_host_page_bits++;
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
250
251
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
252
253
254
255
256
257
258

#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
    {
        long long startaddr, endaddr;
        FILE *f;
        int n;
259
        mmap_lock();
260
        last_brk = (unsigned long)sbrk(0);
261
262
263
264
265
        f = fopen("/proc/self/maps", "r");
        if (f) {
            do {
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
                if (n == 2) {
266
267
268
269
                    startaddr = MIN(startaddr,
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
                    endaddr = MIN(endaddr,
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
pbrook authored
270
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
271
272
273
274
275
276
                                   TARGET_PAGE_ALIGN(endaddr),
                                   PAGE_RESERVED); 
                }
            } while (!feof(f));
            fclose(f);
        }
277
        mmap_unlock();
278
279
    }
#endif
bellard authored
280
281
}
282
static inline PageDesc *page_find_alloc(target_ulong index)
bellard authored
283
284
285
{
    PageDesc **lp, *p;
286
287
288
289
290
291
#if TARGET_LONG_BITS > 32
    /* Host memory outside guest VM.  For 32-bit targets we have already
       excluded high addresses.  */
    if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
        return NULL;
#endif
bellard authored
292
293
294
295
    lp = &l1_map[index >> L2_BITS];
    p = *lp;
    if (!p) {
        /* allocate if not found */
296
297
298
299
300
301
#if defined(CONFIG_USER_ONLY)
        unsigned long addr;
        size_t len = sizeof(PageDesc) * L2_SIZE;
        /* Don't use qemu_malloc because it may recurse.  */
        p = mmap(0, len, PROT_READ | PROT_WRITE,
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
bellard authored
302
        *lp = p;
303
304
305
306
307
308
309
310
311
312
        addr = h2g(p);
        if (addr == (target_ulong)addr) {
            page_set_flags(addr & TARGET_PAGE_MASK,
                           TARGET_PAGE_ALIGN(addr + len),
                           PAGE_RESERVED); 
        }
#else
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
        *lp = p;
#endif
bellard authored
313
314
315
316
    }
    return p + (index & (L2_SIZE - 1));
}
317
static inline PageDesc *page_find(target_ulong index)
bellard authored
318
319
320
321
322
323
{
    PageDesc *p;

    p = l1_map[index >> L2_BITS];
    if (!p)
        return 0;
bellard authored
324
325
326
    return p + (index & (L2_SIZE - 1));
}
327
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
328
{
329
    void **lp, **p;
330
    PhysPageDesc *pd;
331
332
333
334
335
336
337
338
    p = (void **)l1_phys_map;
#if TARGET_PHYS_ADDR_SPACE_BITS > 32

#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
#endif
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
339
340
341
    p = *lp;
    if (!p) {
        /* allocate if not found */
342
343
344
345
346
347
348
349
        if (!alloc)
            return NULL;
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
        memset(p, 0, sizeof(void *) * L1_SIZE);
        *lp = p;
    }
#endif
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
350
351
352
    pd = *lp;
    if (!pd) {
        int i;
353
354
355
        /* allocate if not found */
        if (!alloc)
            return NULL;
356
357
358
359
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
        *lp = pd;
        for (i = 0; i < L2_SIZE; i++)
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
360
    }
361
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
362
363
}
364
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
365
{
366
    return phys_page_find_alloc(index, 0);
367
368
}
369
#if !defined(CONFIG_USER_ONLY)
bellard authored
370
static void tlb_protect_code(ram_addr_t ram_addr);
371
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
372
                                    target_ulong vaddr);
373
374
#define mmap_lock() do { } while(0)
#define mmap_unlock() do { } while(0)
375
#endif
bellard authored
376
377
378
379
380
381
382
383
384
385
386
387
388
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)

#if defined(CONFIG_USER_ONLY)
/* Currently it is not recommanded to allocate big chunks of data in
   user mode. It will change when a dedicated libc will be used */
#define USE_STATIC_CODE_GEN_BUFFER
#endif

#ifdef USE_STATIC_CODE_GEN_BUFFER
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
#endif
389
390
void code_gen_alloc(unsigned long tb_size)
{
391
392
393
394
395
#ifdef USE_STATIC_CODE_GEN_BUFFER
    code_gen_buffer = static_code_gen_buffer;
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
    map_exec(code_gen_buffer, code_gen_buffer_size);
#else
396
397
    code_gen_buffer_size = tb_size;
    if (code_gen_buffer_size == 0) {
398
399
400
401
#if defined(CONFIG_USER_ONLY)
        /* in user mode, phys_ram_size is not meaningful */
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
#else
402
403
        /* XXX: needs ajustments */
        code_gen_buffer_size = (int)(phys_ram_size / 4);
404
#endif
405
406
407
408
409
410
411
412
    }
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
    /* The code gen buffer location may have constraints depending on
       the host cpu and OS */
#if defined(__linux__) 
    {
        int flags;
413
414
        void *start = NULL;
415
416
417
418
419
420
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
#if defined(__x86_64__)
        flags |= MAP_32BIT;
        /* Cannot map more than that */
        if (code_gen_buffer_size > (800 * 1024 * 1024))
            code_gen_buffer_size = (800 * 1024 * 1024);
421
422
423
424
425
426
#elif defined(__sparc_v9__)
        // Map the buffer below 2G, so we can use direct calls and branches
        flags |= MAP_FIXED;
        start = (void *) 0x60000000UL;
        if (code_gen_buffer_size > (512 * 1024 * 1024))
            code_gen_buffer_size = (512 * 1024 * 1024);
427
#endif
428
429
        code_gen_buffer = mmap(start, code_gen_buffer_size,
                               PROT_WRITE | PROT_READ | PROT_EXEC,
430
431
432
433
434
435
436
437
438
439
440
441
442
443
                               flags, -1, 0);
        if (code_gen_buffer == MAP_FAILED) {
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
            exit(1);
        }
    }
#else
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
    if (!code_gen_buffer) {
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
        exit(1);
    }
    map_exec(code_gen_buffer, code_gen_buffer_size);
#endif
444
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
    code_gen_buffer_max_size = code_gen_buffer_size - 
        code_gen_max_block_size();
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
}

/* Must be called before using the QEMU cpus. 'tb_size' is the size
   (in bytes) allocated to the translation buffer. Zero means default
   size. */
void cpu_exec_init_all(unsigned long tb_size)
{
    cpu_gen_init();
    code_gen_alloc(tb_size);
    code_gen_ptr = code_gen_buffer;
460
    page_init();
461
#if !defined(CONFIG_USER_ONLY)
462
    io_mem_init();
463
#endif
464
465
}
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)

#define CPU_COMMON_SAVE_VERSION 1

static void cpu_common_save(QEMUFile *f, void *opaque)
{
    CPUState *env = opaque;

    qemu_put_be32s(f, &env->halted);
    qemu_put_be32s(f, &env->interrupt_request);
}

static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
{
    CPUState *env = opaque;

    if (version_id != CPU_COMMON_SAVE_VERSION)
        return -EINVAL;

    qemu_get_be32s(f, &env->halted);
pbrook authored
486
    qemu_get_be32s(f, &env->interrupt_request);
487
488
489
490
491
492
    tlb_flush(env, 1);

    return 0;
}
#endif
bellard authored
493
void cpu_exec_init(CPUState *env)
bellard authored
494
{
bellard authored
495
496
497
498
499
500
501
502
503
504
505
    CPUState **penv;
    int cpu_index;

    env->next_cpu = NULL;
    penv = &first_cpu;
    cpu_index = 0;
    while (*penv != NULL) {
        penv = (CPUState **)&(*penv)->next_cpu;
        cpu_index++;
    }
    env->cpu_index = cpu_index;
506
    env->nb_watchpoints = 0;
bellard authored
507
    *penv = env;
508
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
509
510
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
                    cpu_common_save, cpu_common_load, env);
511
512
513
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
                    cpu_save, cpu_load, env);
#endif
bellard authored
514
515
}
516
517
518
static inline void invalidate_page_bitmap(PageDesc *p)
{
    if (p->code_bitmap) {
519
        qemu_free(p->code_bitmap);
520
521
522
523
524
        p->code_bitmap = NULL;
    }
    p->code_write_count = 0;
}
bellard authored
525
526
527
528
529
530
531
532
533
/* set to NULL all the 'first_tb' fields in all PageDescs */
static void page_flush_tb(void)
{
    int i, j;
    PageDesc *p;

    for(i = 0; i < L1_SIZE; i++) {
        p = l1_map[i];
        if (p) {
534
535
536
537
538
            for(j = 0; j < L2_SIZE; j++) {
                p->first_tb = NULL;
                invalidate_page_bitmap(p);
                p++;
            }
bellard authored
539
540
541
542
543
        }
    }
}

/* flush all the translation blocks */
544
/* XXX: tb_flush is currently not thread safe */
bellard authored
545
void tb_flush(CPUState *env1)
bellard authored
546
{
bellard authored
547
    CPUState *env;
548
#if defined(DEBUG_FLUSH)
blueswir1 authored
549
550
551
552
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
           (unsigned long)(code_gen_ptr - code_gen_buffer),
           nb_tbs, nb_tbs > 0 ?
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellard authored
553
#endif
554
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrook authored
555
556
        cpu_abort(env1, "Internal error: code buffer overflow\n");
bellard authored
557
    nb_tbs = 0;
558
bellard authored
559
560
561
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
    }
562
bellard authored
563
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellard authored
564
    page_flush_tb();
565
bellard authored
566
    code_gen_ptr = code_gen_buffer;
567
568
    /* XXX: flush processor icache at this point if cache flush is
       expensive */
bellard authored
569
    tb_flush_count++;
bellard authored
570
571
572
573
}

#ifdef DEBUG_TB_CHECK
574
static void tb_invalidate_check(target_ulong address)
bellard authored
575
576
577
578
{
    TranslationBlock *tb;
    int i;
    address &= TARGET_PAGE_MASK;
579
580
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellard authored
581
582
583
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
                  address >= tb->pc + tb->size)) {
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
584
                       address, (long)tb->pc, tb->size);
bellard authored
585
586
587
588
589
590
591
592
593
594
            }
        }
    }
}

/* verify that all the pages have correct rights for code */
static void tb_page_check(void)
{
    TranslationBlock *tb;
    int i, flags1, flags2;
595
596
597
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellard authored
598
599
600
601
            flags1 = page_get_flags(tb->pc);
            flags2 = page_get_flags(tb->pc + tb->size - 1);
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
602
                       (long)tb->pc, tb->size, flags1, flags2);
bellard authored
603
604
605
606
607
            }
        }
    }
}
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
void tb_jmp_check(TranslationBlock *tb)
{
    TranslationBlock *tb1;
    unsigned int n1;

    /* suppress any remaining jumps to this TB */
    tb1 = tb->jmp_first;
    for(;;) {
        n1 = (long)tb1 & 3;
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
        if (n1 == 2)
            break;
        tb1 = tb1->jmp_next[n1];
    }
    /* check end of list */
    if (tb1 != tb) {
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
    }
}
bellard authored
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
#endif

/* invalidate one TB */
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
                             int next_offset)
{
    TranslationBlock *tb1;
    for(;;) {
        tb1 = *ptb;
        if (tb1 == tb) {
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
            break;
        }
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
    }
}
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
{
    TranslationBlock *tb1;
    unsigned int n1;

    for(;;) {
        tb1 = *ptb;
        n1 = (long)tb1 & 3;
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
        if (tb1 == tb) {
            *ptb = tb1->page_next[n1];
            break;
        }
        ptb = &tb1->page_next[n1];
    }
}
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
{
    TranslationBlock *tb1, **ptb;
    unsigned int n1;

    ptb = &tb->jmp_next[n];
    tb1 = *ptb;
    if (tb1) {
        /* find tb(n) in circular list */
        for(;;) {
            tb1 = *ptb;
            n1 = (long)tb1 & 3;
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
            if (n1 == n && tb1 == tb)
                break;
            if (n1 == 2) {
                ptb = &tb1->jmp_first;
            } else {
                ptb = &tb1->jmp_next[n1];
            }
        }
        /* now we can suppress tb(n) from the list */
        *ptb = tb->jmp_next[n];

        tb->jmp_next[n] = NULL;
    }
}

/* reset the jump entry 'n' of a TB so that it is not chained to
   another TB */
static inline void tb_reset_jump(TranslationBlock *tb, int n)
{
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
}
pbrook authored
697
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
bellard authored
698
{
bellard authored
699
    CPUState *env;
700
    PageDesc *p;
701
    unsigned int h, n1;
702
    target_phys_addr_t phys_pc;
703
    TranslationBlock *tb1, *tb2;
704
705
706
707
    /* remove the TB from the hash list */
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
    h = tb_phys_hash_func(phys_pc);
708
    tb_remove(&tb_phys_hash[h], tb,
709
710
711
712
713
714
715
716
717
718
719
720
721
722
              offsetof(TranslationBlock, phys_hash_next));

    /* remove the TB from the page list */
    if (tb->page_addr[0] != page_addr) {
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
        tb_page_remove(&p->first_tb, tb);
        invalidate_page_bitmap(p);
    }
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
        tb_page_remove(&p->first_tb, tb);
        invalidate_page_bitmap(p);
    }
723
    tb_invalidated_flag = 1;
724
bellard authored
725
    /* remove the TB from the hash list */
726
    h = tb_jmp_cache_hash_func(tb->pc);
bellard authored
727
728
729
730
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
        if (env->tb_jmp_cache[h] == tb)
            env->tb_jmp_cache[h] = NULL;
    }
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748

    /* suppress this TB from the two jump lists */
    tb_jmp_remove(tb, 0);
    tb_jmp_remove(tb, 1);

    /* suppress any remaining jumps to this TB */
    tb1 = tb->jmp_first;
    for(;;) {
        n1 = (long)tb1 & 3;
        if (n1 == 2)
            break;
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
        tb2 = tb1->jmp_next[n1];
        tb_reset_jump(tb1, n1);
        tb1->jmp_next[n1] = NULL;
        tb1 = tb2;
    }
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
749
bellard authored
750
    tb_phys_invalidate_count++;
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
}

static inline void set_bits(uint8_t *tab, int start, int len)
{
    int end, mask, end1;

    end = start + len;
    tab += start >> 3;
    mask = 0xff << (start & 7);
    if ((start & ~7) == (end & ~7)) {
        if (start < end) {
            mask &= ~(0xff << (end & 7));
            *tab |= mask;
        }
    } else {
        *tab++ |= mask;
        start = (start + 8) & ~7;
        end1 = end & ~7;
        while (start < end1) {
            *tab++ = 0xff;
            start += 8;
        }
        if (start < end) {
            mask = ~(0xff << (end & 7));
            *tab |= mask;
        }
    }
}

static void build_page_bitmap(PageDesc *p)
{
    int n, tb_start, tb_end;
    TranslationBlock *tb;
784
pbrook authored
785
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
    if (!p->code_bitmap)
        return;

    tb = p->first_tb;
    while (tb != NULL) {
        n = (long)tb & 3;
        tb = (TranslationBlock *)((long)tb & ~3);
        /* NOTE: this is subtle as a TB may span two physical pages */
        if (n == 0) {
            /* NOTE: tb_end may be after the end of the page, but
               it is not a problem */
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
            tb_end = tb_start + tb->size;
            if (tb_end > TARGET_PAGE_SIZE)
                tb_end = TARGET_PAGE_SIZE;
        } else {
            tb_start = 0;
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
        }
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
        tb = tb->page_next[n];
    }
}
pbrook authored
810
811
812
TranslationBlock *tb_gen_code(CPUState *env,
                              target_ulong pc, target_ulong cs_base,
                              int flags, int cflags)
813
814
815
816
817
818
{
    TranslationBlock *tb;
    uint8_t *tc_ptr;
    target_ulong phys_pc, phys_page2, virt_page2;
    int code_gen_size;
bellard authored
819
820
    phys_pc = get_phys_addr_code(env, pc);
    tb = tb_alloc(pc);
821
822
823
824
    if (!tb) {
        /* flush must be done */
        tb_flush(env);
        /* cannot fail at this point */
bellard authored
825
        tb = tb_alloc(pc);
pbrook authored
826
827
        /* Don't forget to invalidate previous TB info.  */
        tb_invalidated_flag = 1;
828
829
830
831
832
833
    }
    tc_ptr = code_gen_ptr;
    tb->tc_ptr = tc_ptr;
    tb->cs_base = cs_base;
    tb->flags = flags;
    tb->cflags = cflags;
834
    cpu_gen_code(env, tb, &code_gen_size);
835
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
836
837
    /* check next page if needed */
bellard authored
838
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
839
    phys_page2 = -1;
bellard authored
840
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
841
842
843
        phys_page2 = get_phys_addr_code(env, virt_page2);
    }
    tb_link_phys(tb, phys_pc, phys_page2);
pbrook authored
844
    return tb;
845
}
846
847
848
/* invalidate all TBs which intersect with the target physical page
   starting in range [start;end[. NOTE: start and end must refer to
849
850
851
   the same physical page. 'is_cpu_write_access' should be true if called
   from a real cpu write access: the virtual CPU will exit the current
   TB if code is modified inside this TB. */
852
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
853
854
855
856
                                   int is_cpu_write_access)
{
    int n, current_tb_modified, current_tb_not_found, current_flags;
    CPUState *env = cpu_single_env;
857
    PageDesc *p;
858
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
859
    target_ulong tb_start, tb_end;
860
    target_ulong current_pc, current_cs_base;
861
862

    p = page_find(start >> TARGET_PAGE_BITS);
863
    if (!p)
864
        return;
865
    if (!p->code_bitmap &&
866
867
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
        is_cpu_write_access) {
868
869
870
871
872
873
        /* build code bitmap */
        build_page_bitmap(p);
    }

    /* we remove all the TBs in the range [start, end[ */
    /* XXX: see if in some cases it could be faster to invalidate all the code */
874
875
876
877
878
879
    current_tb_not_found = is_cpu_write_access;
    current_tb_modified = 0;
    current_tb = NULL; /* avoid warning */
    current_pc = 0; /* avoid warning */
    current_cs_base = 0; /* avoid warning */
    current_flags = 0; /* avoid warning */
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
    tb = p->first_tb;
    while (tb != NULL) {
        n = (long)tb & 3;
        tb = (TranslationBlock *)((long)tb & ~3);
        tb_next = tb->page_next[n];
        /* NOTE: this is subtle as a TB may span two physical pages */
        if (n == 0) {
            /* NOTE: tb_end may be after the end of the page, but
               it is not a problem */
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
            tb_end = tb_start + tb->size;
        } else {
            tb_start = tb->page_addr[1];
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
        }
        if (!(tb_end <= start || tb_start >= end)) {
896
897
898
899
#ifdef TARGET_HAS_PRECISE_SMC
            if (current_tb_not_found) {
                current_tb_not_found = 0;
                current_tb = NULL;
pbrook authored
900
                if (env->mem_io_pc) {
901
                    /* now we have a real cpu fault */
pbrook authored
902
                    current_tb = tb_find_pc(env->mem_io_pc);
903
904
905
                }
            }
            if (current_tb == tb &&
pbrook authored
906
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
907
908
909
910
911
                /* If we are modifying the current TB, we must stop
                its execution. We could be more precise by checking
                that the modification is after the current PC, but it
                would require a specialized function to partially
                restore the CPU state */
912
913
                current_tb_modified = 1;
914
                cpu_restore_state(current_tb, env,
pbrook authored
915
                                  env->mem_io_pc, NULL);
916
917
918
919
920
921
922
923
924
925
#if defined(TARGET_I386)
                current_flags = env->hflags;
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
                current_cs_base = (target_ulong)env->segs[R_CS].base;
                current_pc = current_cs_base + env->eip;
#else
#error unsupported CPU
#endif
            }
#endif /* TARGET_HAS_PRECISE_SMC */
926
927
928
929
930
931
932
            /* we need to do that to handle the case where a signal
               occurs while doing tb_phys_invalidate() */
            saved_tb = NULL;
            if (env) {
                saved_tb = env->current_tb;
                env->current_tb = NULL;
            }
933
            tb_phys_invalidate(tb, -1);
934
935
936
937
938
            if (env) {
                env->current_tb = saved_tb;
                if (env->interrupt_request && env->current_tb)
                    cpu_interrupt(env, env->interrupt_request);
            }
939
940
941
942
943
944
945
        }
        tb = tb_next;
    }
#if !defined(CONFIG_USER_ONLY)
    /* if no code remaining, no need to continue to use slow writes */
    if (!p->first_tb) {
        invalidate_page_bitmap(p);
946
        if (is_cpu_write_access) {
pbrook authored
947
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
948
949
950
951
952
953
954
955
        }
    }
#endif
#ifdef TARGET_HAS_PRECISE_SMC
    if (current_tb_modified) {
        /* we generate a block containing just the instruction
           modifying the memory. It will ensure that it cannot modify
           itself */
956
        env->current_tb = NULL;
pbrook authored
957
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
958
        cpu_resume_from_signal(env, NULL);
959
    }
bellard authored
960
#endif
961
}
bellard authored
962
963
/* len must be <= 8 and start must be a multiple of len */
964
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
965
966
967
{
    PageDesc *p;
    int offset, b;
968
#if 0
969
970
    if (1) {
        if (loglevel) {
971
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
pbrook authored
972
                   cpu_single_env->mem_io_vaddr, len,
973
                   cpu_single_env->eip,
974
975
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
        }
976
977
    }
#endif
978
    p = page_find(start >> TARGET_PAGE_BITS);
979
    if (!p)
980
981
982
983
984
985
986
987
        return;
    if (p->code_bitmap) {
        offset = start & ~TARGET_PAGE_MASK;
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
        if (b & ((1 << len) - 1))
            goto do_invalidate;
    } else {
    do_invalidate:
988
        tb_invalidate_phys_page_range(start, start + len, 1);
989
990
991
992
    }
}

#if !defined(CONFIG_SOFTMMU)
993
static void tb_invalidate_phys_page(target_phys_addr_t addr,
994
                                    unsigned long pc, void *puc)
995
{
996
997
    int n, current_flags, current_tb_modified;
    target_ulong current_pc, current_cs_base;
998
    PageDesc *p;
999
1000
1001
1002
    TranslationBlock *tb, *current_tb;
#ifdef TARGET_HAS_PRECISE_SMC
    CPUState *env = cpu_single_env;
#endif
1003
1004
1005

    addr &= TARGET_PAGE_MASK;
    p = page_find(addr >> TARGET_PAGE_BITS);
1006
    if (!p)
1007
1008
        return;
    tb = p->first_tb;
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
    current_tb_modified = 0;
    current_tb = NULL;
    current_pc = 0; /* avoid warning */
    current_cs_base = 0; /* avoid warning */
    current_flags = 0; /* avoid warning */
#ifdef TARGET_HAS_PRECISE_SMC
    if (tb && pc != 0) {
        current_tb = tb_find_pc(pc);
    }
#endif
1019
1020
1021
    while (tb != NULL) {
        n = (long)tb & 3;
        tb = (TranslationBlock *)((long)tb & ~3);
1022
1023
#ifdef TARGET_HAS_PRECISE_SMC
        if (current_tb == tb &&
pbrook authored
1024
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1025
1026
1027
1028
1029
                /* If we are modifying the current TB, we must stop
                   its execution. We could be more precise by checking
                   that the modification is after the current PC, but it
                   would require a specialized function to partially
                   restore the CPU state */
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
            current_tb_modified = 1;
            cpu_restore_state(current_tb, env, pc, puc);
#if defined(TARGET_I386)
            current_flags = env->hflags;
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
            current_cs_base = (target_ulong)env->segs[R_CS].base;
            current_pc = current_cs_base + env->eip;
#else
#error unsupported CPU
#endif
        }
#endif /* TARGET_HAS_PRECISE_SMC */
1043
1044
1045
        tb_phys_invalidate(tb, addr);
        tb = tb->page_next[n];
    }
bellard authored
1046
    p->first_tb = NULL;
1047
1048
1049
1050
1051
#ifdef TARGET_HAS_PRECISE_SMC
    if (current_tb_modified) {
        /* we generate a block containing just the instruction
           modifying the memory. It will ensure that it cannot modify
           itself */
1052
        env->current_tb = NULL;
pbrook authored
1053
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1054
1055
1056
        cpu_resume_from_signal(env, puc);
    }
#endif
bellard authored
1057
}
1058
#endif
bellard authored
1059
1060

/* add the tb in the target page and protect it if necessary */
1061
static inline void tb_alloc_page(TranslationBlock *tb,
1062
                                 unsigned int n, target_ulong page_addr)
bellard authored
1063
1064
{
    PageDesc *p;
1065
1066
1067
    TranslationBlock *last_first_tb;

    tb->page_addr[n] = page_addr;
1068
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1069
1070
1071
1072
    tb->page_next[n] = p->first_tb;
    last_first_tb = p->first_tb;
    p->first_tb = (TranslationBlock *)((long)tb | n);
    invalidate_page_bitmap(p);
bellard authored
1073
1074
#if defined(TARGET_HAS_SMC) || 1
1075
1076
#if defined(CONFIG_USER_ONLY)
bellard authored
1077
    if (p->flags & PAGE_WRITE) {
1078
1079
        target_ulong addr;
        PageDesc *p2;
1080
1081
        int prot;
bellard authored
1082
1083
        /* force the host page as non writable (writes will have a
           page fault + mprotect overhead) */
1084
        page_addr &= qemu_host_page_mask;
bellard authored
1085
        prot = 0;
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
            addr += TARGET_PAGE_SIZE) {

            p2 = page_find (addr >> TARGET_PAGE_BITS);
            if (!p2)
                continue;
            prot |= p2->flags;
            p2->flags &= ~PAGE_WRITE;
            page_get_flags(addr);
          }
1096
        mprotect(g2h(page_addr), qemu_host_page_size,
bellard authored
1097
1098
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
#ifdef DEBUG_TB_INVALIDATE
blueswir1 authored
1099
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1100
               page_addr);
bellard authored
1101
1102
#endif
    }
1103
1104
1105
1106
1107
#else
    /* if some code is already present, then the pages are already
       protected. So we handle the case where only the first TB is
       allocated in a physical page */
    if (!last_first_tb) {
bellard authored
1108
        tlb_protect_code(page_addr);
1109
1110
    }
#endif
1111
1112

#endif /* TARGET_HAS_SMC */
bellard authored
1113
1114
1115
1116
}

/* Allocate a new translation block. Flush the translation buffer if
   too many translation blocks or too much generated code. */
bellard authored
1117
TranslationBlock *tb_alloc(target_ulong pc)
bellard authored
1118
1119
1120
{
    TranslationBlock *tb;
1121
1122
    if (nb_tbs >= code_gen_max_blocks ||
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1123
        return NULL;
bellard authored
1124
1125
    tb = &tbs[nb_tbs++];
    tb->pc = pc;
1126
    tb->cflags = 0;
1127
1128
1129
    return tb;
}
pbrook authored
1130
1131
void tb_free(TranslationBlock *tb)
{
1132
    /* In practice this is mostly used for single use temporary TB
pbrook authored
1133
1134
1135
1136
1137
1138
1139
1140
       Ignore the hard cases and just back up if this TB happens to
       be the last one generated.  */
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
        code_gen_ptr = tb->tc_ptr;
        nb_tbs--;
    }
}
1141
1142
/* add a new TB and link it to the physical page tables. phys_page2 is
   (-1) to indicate that only one page contains the TB. */
1143
void tb_link_phys(TranslationBlock *tb,
1144
                  target_ulong phys_pc, target_ulong phys_page2)
1145
{
1146
1147
1148
    unsigned int h;
    TranslationBlock **ptb;
1149
1150
1151
    /* Grab the mmap lock to stop another thread invalidating this TB
       before we are done.  */
    mmap_lock();
1152
1153
1154
1155
1156
    /* add in the physical hash table */
    h = tb_phys_hash_func(phys_pc);
    ptb = &tb_phys_hash[h];
    tb->phys_hash_next = *ptb;
    *ptb = tb;
bellard authored
1157
1158

    /* add in the page list */
1159
1160
1161
1162
1163
1164
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
    if (phys_page2 != -1)
        tb_alloc_page(tb, 1, phys_page2);
    else
        tb->page_addr[1] = -1;
1165
1166
1167
1168
1169
1170
1171
1172
1173
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
    tb->jmp_next[0] = NULL;
    tb->jmp_next[1] = NULL;

    /* init original jump addresses */
    if (tb->tb_next_offset[0] != 0xffff)
        tb_reset_jump(tb, 0);
    if (tb->tb_next_offset[1] != 0xffff)
        tb_reset_jump(tb, 1);
1174
1175
1176
1177

#ifdef DEBUG_TB_CHECK
    tb_page_check();
#endif
1178
    mmap_unlock();
bellard authored
1179
1180
}
1181
1182
1183
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
   tb[1].tc_ptr. Return NULL if not found */
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
bellard authored
1184
{
1185
1186
1187
    int m_min, m_max, m;
    unsigned long v;
    TranslationBlock *tb;
bellard authored
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207

    if (nb_tbs <= 0)
        return NULL;
    if (tc_ptr < (unsigned long)code_gen_buffer ||
        tc_ptr >= (unsigned long)code_gen_ptr)
        return NULL;
    /* binary search (cf Knuth) */
    m_min = 0;
    m_max = nb_tbs - 1;
    while (m_min <= m_max) {
        m = (m_min + m_max) >> 1;
        tb = &tbs[m];
        v = (unsigned long)tb->tc_ptr;
        if (v == tc_ptr)
            return tb;
        else if (tc_ptr < v) {
            m_max = m - 1;
        } else {
            m_min = m + 1;
        }
1208
    }
bellard authored
1209
1210
    return &tbs[m_max];
}
bellard authored
1211
bellard authored
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
static void tb_reset_jump_recursive(TranslationBlock *tb);

static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
{
    TranslationBlock *tb1, *tb_next, **ptb;
    unsigned int n1;

    tb1 = tb->jmp_next[n];
    if (tb1 != NULL) {
        /* find head of list */
        for(;;) {
            n1 = (long)tb1 & 3;
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
            if (n1 == 2)
                break;
            tb1 = tb1->jmp_next[n1];
        }
        /* we are now sure now that tb jumps to tb1 */
        tb_next = tb1;

        /* remove tb from the jmp_first list */
        ptb = &tb_next->jmp_first;
        for(;;) {
            tb1 = *ptb;
            n1 = (long)tb1 & 3;
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
            if (n1 == n && tb1 == tb)
                break;
            ptb = &tb1->jmp_next[n1];
        }
        *ptb = tb->jmp_next[n];
        tb->jmp_next[n] = NULL;
1244
bellard authored
1245
1246
1247
        /* suppress the jump to next tb in generated code */
        tb_reset_jump(tb, n);
1248
        /* suppress jumps in the tb on which we could have jumped */
bellard authored
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
        tb_reset_jump_recursive(tb_next);
    }
}

static void tb_reset_jump_recursive(TranslationBlock *tb)
{
    tb_reset_jump_recursive2(tb, 0);
    tb_reset_jump_recursive2(tb, 1);
}
1259
#if defined(TARGET_HAS_ICE)
1260
1261
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
{
1262
1263
    target_phys_addr_t addr;
    target_ulong pd;
1264
1265
    ram_addr_t ram_addr;
    PhysPageDesc *p;
1266
1267
1268
1269
1270
1271
1272
1273
1274
    addr = cpu_get_phys_page_debug(env, pc);
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
    if (!p) {
        pd = IO_MEM_UNASSIGNED;
    } else {
        pd = p->phys_offset;
    }
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook authored
1275
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1276
}
bellard authored
1277
#endif
1278
1279
/* Add a watchpoint.  */
pbrook authored
1280
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
{
    int i;

    for (i = 0; i < env->nb_watchpoints; i++) {
        if (addr == env->watchpoint[i].vaddr)
            return 0;
    }
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
        return -1;

    i = env->nb_watchpoints++;
    env->watchpoint[i].vaddr = addr;
pbrook authored
1293
    env->watchpoint[i].type = type;
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
    tlb_flush_page(env, addr);
    /* FIXME: This flush is needed because of the hack to make memory ops
       terminate the TB.  It can be removed once the proper IO trap and
       re-execute bits are in.  */
    tb_flush(env);
    return i;
}

/* Remove a watchpoint.  */
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
{
    int i;

    for (i = 0; i < env->nb_watchpoints; i++) {
        if (addr == env->watchpoint[i].vaddr) {
            env->nb_watchpoints--;
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
            tlb_flush_page(env, addr);
            return 0;
        }
    }
    return -1;
}
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
/* Remove all watchpoints. */
void cpu_watchpoint_remove_all(CPUState *env) {
    int i;

    for (i = 0; i < env->nb_watchpoints; i++) {
        tlb_flush_page(env, env->watchpoint[i].vaddr);
    }
    env->nb_watchpoints = 0;
}
1328
1329
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
   breakpoint is reached */
1330
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
bellard authored
1331
{
1332
#if defined(TARGET_HAS_ICE)
bellard authored
1333
    int i;
1334
bellard authored
1335
1336
1337
1338
1339
1340
1341
1342
    for(i = 0; i < env->nb_breakpoints; i++) {
        if (env->breakpoints[i] == pc)
            return 0;
    }

    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
        return -1;
    env->breakpoints[env->nb_breakpoints++] = pc;
1343
1344
    breakpoint_invalidate(env, pc);
bellard authored
1345
1346
1347
1348
1349
1350
    return 0;
#else
    return -1;
#endif
}
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
/* remove all breakpoints */
void cpu_breakpoint_remove_all(CPUState *env) {
#if defined(TARGET_HAS_ICE)
    int i;
    for(i = 0; i < env->nb_breakpoints; i++) {
        breakpoint_invalidate(env, env->breakpoints[i]);
    }
    env->nb_breakpoints = 0;
#endif
}
bellard authored
1362
/* remove a breakpoint */
1363
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
bellard authored
1364
{
1365
#if defined(TARGET_HAS_ICE)
bellard authored
1366
1367
1368
1369
1370
1371
1372
1373
    int i;
    for(i = 0; i < env->nb_breakpoints; i++) {
        if (env->breakpoints[i] == pc)
            goto found;
    }
    return -1;
 found:
    env->nb_breakpoints--;
1374
1375
    if (i < env->nb_breakpoints)
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1376
1377

    breakpoint_invalidate(env, pc);
bellard authored
1378
1379
1380
1381
1382
1383
    return 0;
#else
    return -1;
#endif
}
1384
1385
1386
1387
/* enable or disable single step mode. EXCP_DEBUG is returned by the
   CPU loop after each instruction */
void cpu_single_step(CPUState *env, int enabled)
{
1388
#if defined(TARGET_HAS_ICE)
1389
1390
1391
    if (env->singlestep_enabled != enabled) {
        env->singlestep_enabled = enabled;
        /* must flush all the translated code to avoid inconsistancies */
1392
        /* XXX: only flush what is necessary */
1393
        tb_flush(env);
1394
1395
1396
1397
    }
#endif
}
1398
1399
1400
1401
1402
/* enable or disable low levels log */
void cpu_set_log(int log_flags)
{
    loglevel = log_flags;
    if (loglevel && !logfile) {
pbrook authored
1403
        logfile = fopen(logfilename, log_append ? "a" : "w");
1404
1405
1406
1407
        if (!logfile) {
            perror(logfilename);
            _exit(1);
        }
1408
1409
1410
1411
1412
1413
1414
#if !defined(CONFIG_SOFTMMU)
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
        {
            static uint8_t logfile_buf[4096];
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
        }
#else
1415
        setvbuf(logfile, NULL, _IOLBF, 0);
1416
#endif
pbrook authored
1417
1418
1419
1420
1421
        log_append = 1;
    }
    if (!loglevel && logfile) {
        fclose(logfile);
        logfile = NULL;
1422
1423
1424
1425
1426
1427
    }
}

void cpu_set_log_filename(const char *filename)
{
    logfilename = strdup(filename);
pbrook authored
1428
1429
1430
1431
1432
    if (logfile) {
        fclose(logfile);
        logfile = NULL;
    }
    cpu_set_log(loglevel);
1433
}
1434
1435
/* mask must never be zero, except for A20 change call */
bellard authored
1436
void cpu_interrupt(CPUState *env, int mask)
bellard authored
1437
{
pbrook authored
1438
#if !defined(USE_NPTL)
bellard authored
1439
    TranslationBlock *tb;
1440
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
pbrook authored
1441
#endif
pbrook authored
1442
    int old_mask;
1443
pbrook authored
1444
    old_mask = env->interrupt_request;
pbrook authored
1445
    /* FIXME: This is probably not threadsafe.  A different thread could
1446
       be in the middle of a read-modify-write operation.  */
bellard authored
1447
    env->interrupt_request |= mask;
pbrook authored
1448
1449
1450
1451
1452
1453
#if defined(USE_NPTL)
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
       problem and hope the cpu will stop of its own accord.  For userspace
       emulation this often isn't actually as bad as it sounds.  Often
       signals are used primarily to interrupt blocking syscalls.  */
#else
pbrook authored
1454
    if (use_icount) {
pbrook authored
1455
        env->icount_decr.u16.high = 0xffff;
pbrook authored
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
#ifndef CONFIG_USER_ONLY
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
           an async event happened and we need to process it.  */
        if (!can_do_io(env)
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
            cpu_abort(env, "Raised interrupt while not in I/O function");
        }
#endif
    } else {
        tb = env->current_tb;
        /* if the cpu is currently executing code, we must unlink it and
           all the potentially executing TB */
        if (tb && !testandset(&interrupt_lock)) {
            env->current_tb = NULL;
            tb_reset_jump_recursive(tb);
            resetlock(&interrupt_lock);
        }
bellard authored
1473
    }
pbrook authored
1474
#endif
bellard authored
1475
1476
}
1477
1478
1479
1480
1481
void cpu_reset_interrupt(CPUState *env, int mask)
{
    env->interrupt_request &= ~mask;
}
1482
CPULogItem cpu_log_items[] = {
1483
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1484
1485
1486
      "show generated host assembly code for each compiled TB" },
    { CPU_LOG_TB_IN_ASM, "in_asm",
      "show target assembly code for each compiled TB" },
1487
    { CPU_LOG_TB_OP, "op",
bellard authored
1488
      "show micro ops for each compiled TB" },
1489
    { CPU_LOG_TB_OP_OPT, "op_opt",
1490
1491
1492
      "show micro ops "
#ifdef TARGET_I386
      "before eflags optimization and "
1493
#endif
1494
      "after liveness analysis" },
1495
1496
1497
1498
    { CPU_LOG_INT, "int",
      "show interrupts/exceptions in short format" },
    { CPU_LOG_EXEC, "exec",
      "show trace before each executed TB (lots of logs)" },
1499
    { CPU_LOG_TB_CPU, "cpu",
ths authored
1500
      "show CPU state before block translation" },
1501
1502
1503
1504
#ifdef TARGET_I386
    { CPU_LOG_PCALL, "pcall",
      "show protected mode far calls/returns/exceptions" },
#endif
1505
#ifdef DEBUG_IOPORT
1506
1507
    { CPU_LOG_IOPORT, "ioport",
      "show all i/o ports accesses" },
1508
#endif
1509
1510
1511
1512
1513
1514
1515
1516
1517
    { 0, NULL, NULL },
};

static int cmp1(const char *s1, int n, const char *s2)
{
    if (strlen(s2) != n)
        return 0;
    return memcmp(s1, s2, n) == 0;
}
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
/* takes a comma separated list of log masks. Return 0 if error. */
int cpu_str_to_log_mask(const char *str)
{
    CPULogItem *item;
    int mask;
    const char *p, *p1;

    p = str;
    mask = 0;
    for(;;) {
        p1 = strchr(p, ',');
        if (!p1)
            p1 = p + strlen(p);
1532
1533
1534
1535
1536
	if(cmp1(p,p1-p,"all")) {
		for(item = cpu_log_items; item->mask != 0; item++) {
			mask |= item->mask;
		}
	} else {
1537
1538
1539
1540
1541
        for(item = cpu_log_items; item->mask != 0; item++) {
            if (cmp1(p, p1 - p, item->name))
                goto found;
        }
        return 0;
1542
	}
1543
1544
1545
1546
1547
1548
1549
1550
    found:
        mask |= item->mask;
        if (*p1 != ',')
            break;
        p = p1 + 1;
    }
    return mask;
}
bellard authored
1551
bellard authored
1552
1553
1554
void cpu_abort(CPUState *env, const char *fmt, ...)
{
    va_list ap;
1555
    va_list ap2;
bellard authored
1556
1557

    va_start(ap, fmt);
1558
    va_copy(ap2, ap);
bellard authored
1559
1560
1561
1562
    fprintf(stderr, "qemu: fatal: ");
    vfprintf(stderr, fmt, ap);
    fprintf(stderr, "\n");
#ifdef TARGET_I386
bellard authored
1563
1564
1565
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
#else
    cpu_dump_state(env, stderr, fprintf, 0);
bellard authored
1566
#endif
1567
    if (logfile) {
1568
        fprintf(logfile, "qemu: fatal: ");
1569
        vfprintf(logfile, fmt, ap2);
1570
1571
1572
1573
1574
1575
        fprintf(logfile, "\n");
#ifdef TARGET_I386
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
#else
        cpu_dump_state(env, logfile, fprintf, 0);
#endif
1576
1577
1578
        fflush(logfile);
        fclose(logfile);
    }
1579
    va_end(ap2);
1580
    va_end(ap);
bellard authored
1581
1582
1583
    abort();
}
1584
1585
CPUState *cpu_copy(CPUState *env)
{
1586
    CPUState *new_env = cpu_init(env->cpu_model_str);
1587
1588
1589
1590
1591
1592
1593
1594
1595
    /* preserve chaining and index */
    CPUState *next_cpu = new_env->next_cpu;
    int cpu_index = new_env->cpu_index;
    memcpy(new_env, env, sizeof(CPUState));
    new_env->next_cpu = next_cpu;
    new_env->cpu_index = cpu_index;
    return new_env;
}
1596
1597
#if !defined(CONFIG_USER_ONLY)
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
{
    unsigned int i;

    /* Discard jump cache entries for any tb which might potentially
       overlap the flushed page.  */
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
    memset (&env->tb_jmp_cache[i], 0, 
	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));

    i = tb_jmp_cache_hash_page(addr);
    memset (&env->tb_jmp_cache[i], 0, 
	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
}
1613
1614
1615
/* NOTE: if flush_global is true, also flush global entries (not
   implemented yet) */
void tlb_flush(CPUState *env, int flush_global)
1616
1617
{
    int i;
1618
1619
1620
1621
#if defined(DEBUG_TLB)
    printf("tlb_flush:\n");
#endif
1622
1623
1624
1625
    /* must reset current TB so that interrupts cannot modify the
       links while we are modifying them */
    env->current_tb = NULL;
1626
    for(i = 0; i < CPU_TLB_SIZE; i++) {
bellard authored
1627
1628
1629
1630
1631
1632
        env->tlb_table[0][i].addr_read = -1;
        env->tlb_table[0][i].addr_write = -1;
        env->tlb_table[0][i].addr_code = -1;
        env->tlb_table[1][i].addr_read = -1;
        env->tlb_table[1][i].addr_write = -1;
        env->tlb_table[1][i].addr_code = -1;
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
#if (NB_MMU_MODES >= 3)
        env->tlb_table[2][i].addr_read = -1;
        env->tlb_table[2][i].addr_write = -1;
        env->tlb_table[2][i].addr_code = -1;
#if (NB_MMU_MODES == 4)
        env->tlb_table[3][i].addr_read = -1;
        env->tlb_table[3][i].addr_write = -1;
        env->tlb_table[3][i].addr_code = -1;
#endif
#endif
1643
    }
1644
1645
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1646
bellard authored
1647
1648
1649
1650
1651
#ifdef USE_KQEMU
    if (env->kqemu_enabled) {
        kqemu_flush(env, flush_global);
    }
#endif
bellard authored
1652
    tlb_flush_count++;
1653
1654
}
bellard authored
1655
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard authored
1656
{
1657
    if (addr == (tlb_entry->addr_read &
bellard authored
1658
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1659
        addr == (tlb_entry->addr_write &
bellard authored
1660
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1661
        addr == (tlb_entry->addr_code &
bellard authored
1662
1663
1664
1665
1666
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
        tlb_entry->addr_read = -1;
        tlb_entry->addr_write = -1;
        tlb_entry->addr_code = -1;
    }
bellard authored
1667
1668
}
1669
void tlb_flush_page(CPUState *env, target_ulong addr)
1670
{
1671
    int i;
1672
1673
#if defined(DEBUG_TLB)
1674
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1675
#endif
1676
1677
1678
    /* must reset current TB so that interrupts cannot modify the
       links while we are modifying them */
    env->current_tb = NULL;
bellard authored
1679
1680
1681

    addr &= TARGET_PAGE_MASK;
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
bellard authored
1682
1683
    tlb_flush_entry(&env->tlb_table[0][i], addr);
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1684
1685
1686
1687
1688
1689
#if (NB_MMU_MODES >= 3)
    tlb_flush_entry(&env->tlb_table[2][i], addr);
#if (NB_MMU_MODES == 4)
    tlb_flush_entry(&env->tlb_table[3][i], addr);
#endif
#endif
1690
1691
    tlb_flush_jmp_cache(env, addr);
1692
bellard authored
1693
1694
1695
1696
1697
#ifdef USE_KQEMU
    if (env->kqemu_enabled) {
        kqemu_flush_page(env, addr);
    }
#endif
1698
1699
1700
1701
}

/* update the TLBs so that writes to code in the virtual page 'addr'
   can be detected */
bellard authored
1702
static void tlb_protect_code(ram_addr_t ram_addr)
1703
{
1704
    cpu_physical_memory_reset_dirty(ram_addr,
bellard authored
1705
1706
                                    ram_addr + TARGET_PAGE_SIZE,
                                    CODE_DIRTY_FLAG);
1707
1708
1709
}

/* update the TLB so that writes in physical page 'phys_addr' are no longer
1710
   tested for self modifying code */
1711
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1712
                                    target_ulong vaddr)
1713
{
1714
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1715
1716
}
1717
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1718
1719
1720
                                         unsigned long start, unsigned long length)
{
    unsigned long addr;
bellard authored
1721
1722
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1723
        if ((addr - start) < length) {
pbrook authored
1724
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1725
1726
1727
1728
        }
    }
}
1729
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard authored
1730
                                     int dirty_flags)
1731
1732
{
    CPUState *env;
bellard authored
1733
    unsigned long length, start1;
bellard authored
1734
1735
    int i, mask, len;
    uint8_t *p;
1736
1737
1738
1739
1740
1741
1742

    start &= TARGET_PAGE_MASK;
    end = TARGET_PAGE_ALIGN(end);

    length = end - start;
    if (length == 0)
        return;
bellard authored
1743
    len = length >> TARGET_PAGE_BITS;
1744
#ifdef USE_KQEMU
bellard authored
1745
1746
    /* XXX: should not depend on cpu context */
    env = first_cpu;
1747
    if (env->kqemu_enabled) {
1748
1749
1750
1751
1752
1753
        ram_addr_t addr;
        addr = start;
        for(i = 0; i < len; i++) {
            kqemu_set_notdirty(env, addr);
            addr += TARGET_PAGE_SIZE;
        }
1754
1755
    }
#endif
1756
1757
1758
1759
1760
    mask = ~dirty_flags;
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
    for(i = 0; i < len; i++)
        p[i] &= mask;
1761
1762
    /* we modify the TLB cache so that the dirty bit will be set again
       when accessing the range */
1763
    start1 = start + (unsigned long)phys_ram_base;
bellard authored
1764
1765
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
        for(i = 0; i < CPU_TLB_SIZE; i++)
bellard authored
1766
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
bellard authored
1767
        for(i = 0; i < CPU_TLB_SIZE; i++)
bellard authored
1768
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1769
1770
1771
1772
1773
1774
1775
1776
#if (NB_MMU_MODES >= 3)
        for(i = 0; i < CPU_TLB_SIZE; i++)
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
#if (NB_MMU_MODES == 4)
        for(i = 0; i < CPU_TLB_SIZE; i++)
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
#endif
#endif
bellard authored
1777
    }
1778
1779
}
1780
1781
1782
1783
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
{
    ram_addr_t ram_addr;
bellard authored
1784
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1785
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1786
1787
            tlb_entry->addend - (unsigned long)phys_ram_base;
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook authored
1788
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1789
1790
1791
1792
1793
1794
1795
1796
1797
        }
    }
}

/* update the TLB according to the current state of the dirty bits */
void cpu_tlb_update_dirty(CPUState *env)
{
    int i;
    for(i = 0; i < CPU_TLB_SIZE; i++)
bellard authored
1798
        tlb_update_dirty(&env->tlb_table[0][i]);
1799
    for(i = 0; i < CPU_TLB_SIZE; i++)
bellard authored
1800
        tlb_update_dirty(&env->tlb_table[1][i]);
1801
1802
1803
1804
1805
1806
1807
1808
#if (NB_MMU_MODES >= 3)
    for(i = 0; i < CPU_TLB_SIZE; i++)
        tlb_update_dirty(&env->tlb_table[2][i]);
#if (NB_MMU_MODES == 4)
    for(i = 0; i < CPU_TLB_SIZE; i++)
        tlb_update_dirty(&env->tlb_table[3][i]);
#endif
#endif
1809
1810
}
pbrook authored
1811
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1812
{
pbrook authored
1813
1814
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
        tlb_entry->addr_write = vaddr;
1815
1816
}
pbrook authored
1817
1818
1819
/* update the TLB corresponding to virtual page vaddr
   so that it is no longer dirty */
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1820
1821
1822
{
    int i;
pbrook authored
1823
    vaddr &= TARGET_PAGE_MASK;
1824
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
pbrook authored
1825
1826
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1827
#if (NB_MMU_MODES >= 3)
pbrook authored
1828
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1829
#if (NB_MMU_MODES == 4)
pbrook authored
1830
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1831
1832
#endif
#endif
1833
1834
}
1835
1836
1837
1838
/* add a new TLB entry. At most one entry for a given virtual address
   is permitted. Return 0 if OK or 2 if the page could not be mapped
   (can only happen in non SOFTMMU mode for I/O pages or pages
   conflicting with the host address space). */
1839
1840
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
                      target_phys_addr_t paddr, int prot,
1841
                      int mmu_idx, int is_softmmu)
1842
{
1843
    PhysPageDesc *p;
bellard authored
1844
    unsigned long pd;
1845
    unsigned int index;
bellard authored
1846
    target_ulong address;
pbrook authored
1847
    target_ulong code_address;
1848
    target_phys_addr_t addend;
1849
    int ret;
bellard authored
1850
    CPUTLBEntry *te;
1851
    int i;
pbrook authored
1852
    target_phys_addr_t iotlb;
1853
1854
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1855
1856
1857
1858
1859
1860
    if (!p) {
        pd = IO_MEM_UNASSIGNED;
    } else {
        pd = p->phys_offset;
    }
#if defined(DEBUG_TLB)
1861
1862
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1863
1864
1865
#endif

    ret = 0;
pbrook authored
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
    address = vaddr;
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
        /* IO memory case (romd handled later) */
        address |= TLB_MMIO;
    }
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
        /* Normal RAM.  */
        iotlb = pd & TARGET_PAGE_MASK;
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
            iotlb |= IO_MEM_NOTDIRTY;
        else
            iotlb |= IO_MEM_ROM;
    } else {
        /* IO handlers are currently passed a phsical address.
           It would be nice to pass an offset from the base address
           of that region.  This would avoid having to special case RAM,
           and avoid full address decoding in every device.
           We can't use the high bits of pd for this because
           IO_MEM_ROMD uses these as a ram address.  */
        iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
    }

    code_address = address;
    /* Make accesses to pages with watchpoints go via the
       watchpoint trap routines.  */
    for (i = 0; i < env->nb_watchpoints; i++) {
        if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
            iotlb = io_mem_watch + paddr;
            /* TODO: The memory case can be optimized by not trapping
               reads of pages with a write breakpoint.  */
            address |= TLB_MMIO;
1898
        }
pbrook authored
1899
    }
1900
pbrook authored
1901
1902
1903
1904
1905
1906
1907
1908
1909
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
    te = &env->tlb_table[mmu_idx][index];
    te->addend = addend - vaddr;
    if (prot & PAGE_READ) {
        te->addr_read = address;
    } else {
        te->addr_read = -1;
    }
1910
pbrook authored
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
    if (prot & PAGE_EXEC) {
        te->addr_code = code_address;
    } else {
        te->addr_code = -1;
    }
    if (prot & PAGE_WRITE) {
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
            (pd & IO_MEM_ROMD)) {
            /* Write access calls the I/O callback.  */
            te->addr_write = address | TLB_MMIO;
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
                   !cpu_physical_memory_is_dirty(pd)) {
            te->addr_write = address | TLB_NOTDIRTY;
1924
        } else {
pbrook authored
1925
            te->addr_write = address;
1926
        }
pbrook authored
1927
1928
    } else {
        te->addr_write = -1;
1929
1930
1931
1932
    }
    return ret;
}
1933
1934
#else
1935
void tlb_flush(CPUState *env, int flush_global)
1936
1937
1938
{
}
1939
void tlb_flush_page(CPUState *env, target_ulong addr)
1940
1941
1942
{
}
1943
1944
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
                      target_phys_addr_t paddr, int prot,
1945
                      int mmu_idx, int is_softmmu)
1946
1947
1948
{
    return 0;
}
1949
1950
1951
/* dump memory mappings */
void page_dump(FILE *f)
1952
{
1953
1954
1955
    unsigned long start, end;
    int i, j, prot, prot1;
    PageDesc *p;
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
    fprintf(f, "%-8s %-8s %-8s %s\n",
            "start", "end", "size", "prot");
    start = -1;
    end = -1;
    prot = 0;
    for(i = 0; i <= L1_SIZE; i++) {
        if (i < L1_SIZE)
            p = l1_map[i];
        else
            p = NULL;
        for(j = 0;j < L2_SIZE; j++) {
            if (!p)
                prot1 = 0;
            else
                prot1 = p[j].flags;
            if (prot1 != prot) {
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
                if (start != -1) {
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1976
                            start, end, end - start,
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
                            prot & PAGE_READ ? 'r' : '-',
                            prot & PAGE_WRITE ? 'w' : '-',
                            prot & PAGE_EXEC ? 'x' : '-');
                }
                if (prot1 != 0)
                    start = end;
                else
                    start = -1;
                prot = prot1;
            }
            if (!p)
                break;
        }
1990
1991
1992
    }
}
1993
int page_get_flags(target_ulong address)
1994
{
1995
1996
1997
    PageDesc *p;

    p = page_find(address >> TARGET_PAGE_BITS);
1998
    if (!p)
1999
2000
2001
2002
2003
2004
2005
        return 0;
    return p->flags;
}

/* modify the flags of a page and invalidate the code if
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
   depending on PAGE_WRITE */
2006
void page_set_flags(target_ulong start, target_ulong end, int flags)
2007
2008
{
    PageDesc *p;
2009
    target_ulong addr;
2010
2011
    /* mmap_lock should already be held.  */
2012
2013
2014
2015
2016
2017
    start = start & TARGET_PAGE_MASK;
    end = TARGET_PAGE_ALIGN(end);
    if (flags & PAGE_WRITE)
        flags |= PAGE_WRITE_ORG;
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2018
2019
2020
2021
        /* We may be called for host regions that are outside guest
           address space.  */
        if (!p)
            return;
2022
2023
        /* if the write protection is set, then we invalidate the code
           inside */
2024
        if (!(p->flags & PAGE_WRITE) &&
2025
2026
            (flags & PAGE_WRITE) &&
            p->first_tb) {
2027
            tb_invalidate_phys_page(addr, 0, NULL);
2028
2029
2030
        }
        p->flags = flags;
    }
2031
2032
}
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
int page_check_range(target_ulong start, target_ulong len, int flags)
{
    PageDesc *p;
    target_ulong end;
    target_ulong addr;

    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
    start = start & TARGET_PAGE_MASK;

    if( end < start )
        /* we've wrapped around */
        return -1;
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
        p = page_find(addr >> TARGET_PAGE_BITS);
        if( !p )
            return -1;
        if( !(p->flags & PAGE_VALID) )
            return -1;
2052
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2053
            return -1;
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
        if (flags & PAGE_WRITE) {
            if (!(p->flags & PAGE_WRITE_ORG))
                return -1;
            /* unprotect the page if it was put read-only because it
               contains translated code */
            if (!(p->flags & PAGE_WRITE)) {
                if (!page_unprotect(addr, 0, NULL))
                    return -1;
            }
            return 0;
        }
2065
2066
2067
2068
    }
    return 0;
}
2069
2070
/* called from signal handler: invalidate the code and unprotect the
   page. Return TRUE if the fault was succesfully handled. */
2071
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2072
2073
2074
{
    unsigned int page_index, prot, pindex;
    PageDesc *p, *p1;
2075
    target_ulong host_start, host_end, addr;
2076
2077
2078
2079
2080
2081
    /* Technically this isn't safe inside a signal handler.  However we
       know this only ever happens in a synchronous SEGV handler, so in
       practice it seems to be ok.  */
    mmap_lock();
2082
    host_start = address & qemu_host_page_mask;
2083
2084
    page_index = host_start >> TARGET_PAGE_BITS;
    p1 = page_find(page_index);
2085
2086
    if (!p1) {
        mmap_unlock();
2087
        return 0;
2088
    }
2089
    host_end = host_start + qemu_host_page_size;
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
    p = p1;
    prot = 0;
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
        prot |= p->flags;
        p++;
    }
    /* if the page was really writable, then we change its
       protection back to writable */
    if (prot & PAGE_WRITE_ORG) {
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2101
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2102
2103
2104
2105
                     (prot & PAGE_BITS) | PAGE_WRITE);
            p1[pindex].flags |= PAGE_WRITE;
            /* and since the content will be modified, we must invalidate
               the corresponding translated code. */
2106
            tb_invalidate_phys_page(address, pc, puc);
2107
2108
2109
#ifdef DEBUG_TB_CHECK
            tb_invalidate_check(address);
#endif
2110
            mmap_unlock();
2111
2112
2113
            return 1;
        }
    }
2114
    mmap_unlock();
2115
2116
2117
    return 0;
}
bellard authored
2118
2119
static inline void tlb_set_dirty(CPUState *env,
                                 unsigned long addr, target_ulong vaddr)
2120
2121
{
}
2122
2123
#endif /* defined(CONFIG_USER_ONLY) */
2124
#if !defined(CONFIG_USER_ONLY)
2125
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2126
2127
2128
                             ram_addr_t memory);
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
                           ram_addr_t orig_memory);
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
                      need_subpage)                                     \
    do {                                                                \
        if (addr > start_addr)                                          \
            start_addr2 = 0;                                            \
        else {                                                          \
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
            if (start_addr2 > 0)                                        \
                need_subpage = 1;                                       \
        }                                                               \
                                                                        \
2140
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2141
2142
2143
2144
2145
2146
2147
2148
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
        else {                                                          \
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
                need_subpage = 1;                                       \
        }                                                               \
    } while (0)
2149
2150
2151
/* register physical memory. 'size' must be a multiple of the target
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
   io memory page */
2152
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2153
2154
                                  ram_addr_t size,
                                  ram_addr_t phys_offset)
2155
{
2156
    target_phys_addr_t addr, end_addr;
2157
    PhysPageDesc *p;
2158
    CPUState *env;
2159
    ram_addr_t orig_size = size;
2160
    void *subpage;
2161
2162
2163
2164
2165
2166
2167
2168
#ifdef USE_KQEMU
    /* XXX: should not depend on cpu context */
    env = first_cpu;
    if (env->kqemu_enabled) {
        kqemu_set_phys_mem(start_addr, size, phys_offset);
    }
#endif
bellard authored
2169
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2170
2171
    end_addr = start_addr + (target_phys_addr_t)size;
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2172
2173
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2174
            ram_addr_t orig_memory = p->phys_offset;
2175
2176
2177
2178
2179
            target_phys_addr_t start_addr2, end_addr2;
            int need_subpage = 0;

            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
                          need_subpage);
2180
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
                                           &p->phys_offset, orig_memory);
                } else {
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
                                            >> IO_MEM_SHIFT];
                }
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
            } else {
                p->phys_offset = phys_offset;
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
                    (phys_offset & IO_MEM_ROMD))
                    phys_offset += TARGET_PAGE_SIZE;
            }
        } else {
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
            p->phys_offset = phys_offset;
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
                (phys_offset & IO_MEM_ROMD))
                phys_offset += TARGET_PAGE_SIZE;
            else {
                target_phys_addr_t start_addr2, end_addr2;
                int need_subpage = 0;

                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
                              end_addr2, need_subpage);
2208
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2209
2210
2211
2212
2213
2214
2215
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
                    subpage_register(subpage, start_addr2, end_addr2,
                                     phys_offset);
                }
            }
        }
2216
    }
2217
2218
2219
2220
2221
2222
2223
    /* since each CPU stores ram addresses in its TLB cache, we must
       reset the modified entries */
    /* XXX: slow ! */
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
        tlb_flush(env, 1);
    }
2224
2225
}
2226
/* XXX: temporary until new memory mapping API */
2227
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2228
2229
2230
2231
2232
2233
2234
2235
2236
{
    PhysPageDesc *p;

    p = phys_page_find(addr >> TARGET_PAGE_BITS);
    if (!p)
        return IO_MEM_UNASSIGNED;
    return p->phys_offset;
}
bellard authored
2237
/* XXX: better than nothing */
2238
ram_addr_t qemu_ram_alloc(ram_addr_t size)
bellard authored
2239
2240
{
    ram_addr_t addr;
2241
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
bellard authored
2242
2243
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
                (uint64_t)size, (uint64_t)phys_ram_size);
bellard authored
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
        abort();
    }
    addr = phys_ram_alloc_offset;
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
    return addr;
}

void qemu_ram_free(ram_addr_t addr)
{
}
2255
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2256
{
2257
#ifdef DEBUG_UNASSIGNED
blueswir1 authored
2258
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2259
#endif
2260
#ifdef TARGET_SPARC
2261
    do_unassigned_access(addr, 0, 0, 0);
2262
2263
#elif TARGET_CRIS
    do_unassigned_access(addr, 0, 0, 0);
2264
#endif
2265
2266
2267
    return 0;
}
2268
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2269
{
2270
#ifdef DEBUG_UNASSIGNED
blueswir1 authored
2271
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2272
#endif
2273
#ifdef TARGET_SPARC
2274
    do_unassigned_access(addr, 1, 0, 0);
2275
2276
#elif TARGET_CRIS
    do_unassigned_access(addr, 1, 0, 0);
2277
#endif
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
}

static CPUReadMemoryFunc *unassigned_mem_read[3] = {
    unassigned_mem_readb,
    unassigned_mem_readb,
    unassigned_mem_readb,
};

static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
    unassigned_mem_writeb,
    unassigned_mem_writeb,
    unassigned_mem_writeb,
};
pbrook authored
2292
2293
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
                                uint32_t val)
2294
{
2295
2296
2297
    int dirty_flags;
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2298
#if !defined(CONFIG_USER_ONLY)
2299
2300
        tb_invalidate_phys_page_fast(ram_addr, 1);
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2301
#endif
2302
    }
pbrook authored
2303
    stb_p(phys_ram_base + ram_addr, val);
2304
2305
2306
2307
2308
#ifdef USE_KQEMU
    if (cpu_single_env->kqemu_enabled &&
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
        kqemu_modify_page(cpu_single_env, ram_addr);
#endif
2309
2310
2311
2312
2313
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
    /* we remove the notdirty callback only if the code has been
       flushed */
    if (dirty_flags == 0xff)
pbrook authored
2314
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2315
2316
}
pbrook authored
2317
2318
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
                                uint32_t val)
2319
{
2320
2321
2322
    int dirty_flags;
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2323
#if !defined(CONFIG_USER_ONLY)
2324
2325
        tb_invalidate_phys_page_fast(ram_addr, 2);
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2326
#endif
2327
    }
pbrook authored
2328
    stw_p(phys_ram_base + ram_addr, val);
2329
2330
2331
2332
2333
#ifdef USE_KQEMU
    if (cpu_single_env->kqemu_enabled &&
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
        kqemu_modify_page(cpu_single_env, ram_addr);
#endif
2334
2335
2336
2337
2338
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
    /* we remove the notdirty callback only if the code has been
       flushed */
    if (dirty_flags == 0xff)
pbrook authored
2339
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2340
2341
}
pbrook authored
2342
2343
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
                                uint32_t val)
2344
{
2345
2346
2347
    int dirty_flags;
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2348
#if !defined(CONFIG_USER_ONLY)
2349
2350
        tb_invalidate_phys_page_fast(ram_addr, 4);
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2351
#endif
2352
    }
pbrook authored
2353
    stl_p(phys_ram_base + ram_addr, val);
2354
2355
2356
2357
2358
#ifdef USE_KQEMU
    if (cpu_single_env->kqemu_enabled &&
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
        kqemu_modify_page(cpu_single_env, ram_addr);
#endif
2359
2360
2361
2362
2363
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
    /* we remove the notdirty callback only if the code has been
       flushed */
    if (dirty_flags == 0xff)
pbrook authored
2364
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2365
2366
}
2367
static CPUReadMemoryFunc *error_mem_read[3] = {
2368
2369
2370
2371
2372
    NULL, /* never used */
    NULL, /* never used */
    NULL, /* never used */
};
2373
2374
2375
2376
2377
2378
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
    notdirty_mem_writeb,
    notdirty_mem_writew,
    notdirty_mem_writel,
};
pbrook authored
2379
2380
2381
2382
2383
2384
2385
/* Generate a debug exception if a watchpoint has been hit.  */
static void check_watchpoint(int offset, int flags)
{
    CPUState *env = cpu_single_env;
    target_ulong vaddr;
    int i;
pbrook authored
2386
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
pbrook authored
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
    for (i = 0; i < env->nb_watchpoints; i++) {
        if (vaddr == env->watchpoint[i].vaddr
                && (env->watchpoint[i].type & flags)) {
            env->watchpoint_hit = i + 1;
            cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
            break;
        }
    }
}
2397
2398
2399
2400
2401
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
   so these check for a hit then pass through to the normal out-of-line
   phys routines.  */
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
{
pbrook authored
2402
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2403
2404
2405
2406
2407
    return ldub_phys(addr);
}

static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
{
pbrook authored
2408
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2409
2410
2411
2412
2413
    return lduw_phys(addr);
}

static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
{
pbrook authored
2414
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2415
2416
2417
2418
2419
2420
    return ldl_phys(addr);
}

static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
                             uint32_t val)
{
pbrook authored
2421
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2422
2423
2424
2425
2426
2427
    stb_phys(addr, val);
}

static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
                             uint32_t val)
{
pbrook authored
2428
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2429
2430
2431
2432
2433
2434
    stw_phys(addr, val);
}

static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
                             uint32_t val)
{
pbrook authored
2435
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
    stl_phys(addr, val);
}

static CPUReadMemoryFunc *watch_mem_read[3] = {
    watch_mem_readb,
    watch_mem_readw,
    watch_mem_readl,
};

static CPUWriteMemoryFunc *watch_mem_write[3] = {
    watch_mem_writeb,
    watch_mem_writew,
    watch_mem_writel,
};
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
                                 unsigned int len)
{
    uint32_t ret;
    unsigned int idx;

    idx = SUBPAGE_IDX(addr - mmio->base);
#if defined(DEBUG_SUBPAGE)
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
           mmio, len, addr, idx);
#endif
2462
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476

    return ret;
}

static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
                              uint32_t value, unsigned int len)
{
    unsigned int idx;

    idx = SUBPAGE_IDX(addr - mmio->base);
#if defined(DEBUG_SUBPAGE)
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
           mmio, len, addr, idx, value);
#endif
2477
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
}

static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
{
#if defined(DEBUG_SUBPAGE)
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
#endif

    return subpage_readlen(opaque, addr, 0);
}

static void subpage_writeb (void *opaque, target_phys_addr_t addr,
                            uint32_t value)
{
#if defined(DEBUG_SUBPAGE)
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
#endif
    subpage_writelen(opaque, addr, value, 0);
}

static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
{
#if defined(DEBUG_SUBPAGE)
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
#endif

    return subpage_readlen(opaque, addr, 1);
}

static void subpage_writew (void *opaque, target_phys_addr_t addr,
                            uint32_t value)
{
#if defined(DEBUG_SUBPAGE)
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
#endif
    subpage_writelen(opaque, addr, value, 1);
}

static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
{
#if defined(DEBUG_SUBPAGE)
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
#endif

    return subpage_readlen(opaque, addr, 2);
}

static void subpage_writel (void *opaque,
                         target_phys_addr_t addr, uint32_t value)
{
#if defined(DEBUG_SUBPAGE)
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
#endif
    subpage_writelen(opaque, addr, value, 2);
}

static CPUReadMemoryFunc *subpage_read[] = {
    &subpage_readb,
    &subpage_readw,
    &subpage_readl,
};

static CPUWriteMemoryFunc *subpage_write[] = {
    &subpage_writeb,
    &subpage_writew,
    &subpage_writel,
};

static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2547
                             ram_addr_t memory)
2548
2549
{
    int idx, eidx;
2550
    unsigned int i;
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561

    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
        return -1;
    idx = SUBPAGE_IDX(start);
    eidx = SUBPAGE_IDX(end);
#if defined(DEBUG_SUBPAGE)
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
           mmio, start, end, idx, eidx, memory);
#endif
    memory >>= IO_MEM_SHIFT;
    for (; idx <= eidx; idx++) {
2562
        for (i = 0; i < 4; i++) {
2563
2564
2565
2566
2567
2568
2569
2570
            if (io_mem_read[memory][i]) {
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
            }
            if (io_mem_write[memory][i]) {
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
            }
2571
        }
2572
2573
2574
2575
2576
    }

    return 0;
}
2577
2578
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
                           ram_addr_t orig_memory)
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
{
    subpage_t *mmio;
    int subpage_memory;

    mmio = qemu_mallocz(sizeof(subpage_t));
    if (mmio != NULL) {
        mmio->base = base;
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
#if defined(DEBUG_SUBPAGE)
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
#endif
        *phys = subpage_memory | IO_MEM_SUBPAGE;
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
    }

    return mmio;
}
2598
2599
static void io_mem_init(void)
{
2600
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2601
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2602
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2603
2604
    io_mem_nb = 5;
pbrook authored
2605
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2606
                                          watch_mem_write, NULL);
2607
    /* alloc dirty bits array */
bellard authored
2608
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2609
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2610
2611
2612
2613
}

/* mem_read and mem_write are arrays of functions containing the
   function to access byte (index 0), word (index 1) and dword (index
2614
2615
2616
   2). Functions can be omitted with a NULL function pointer. The
   registered functions may be modified dynamically later.
   If io_index is non zero, the corresponding io zone is
2617
2618
2619
   modified. If it is zero, a new io zone is allocated. The return
   value can be used with cpu_register_physical_memory(). (-1) is
   returned if error. */
2620
2621
int cpu_register_io_memory(int io_index,
                           CPUReadMemoryFunc **mem_read,
2622
2623
                           CPUWriteMemoryFunc **mem_write,
                           void *opaque)
2624
{
2625
    int i, subwidth = 0;
2626
2627

    if (io_index <= 0) {
2628
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2629
2630
2631
2632
2633
2634
            return -1;
        io_index = io_mem_nb++;
    } else {
        if (io_index >= IO_MEM_NB_ENTRIES)
            return -1;
    }
2635
2636
    for(i = 0;i < 3; i++) {
2637
2638
        if (!mem_read[i] || !mem_write[i])
            subwidth = IO_MEM_SUBWIDTH;
2639
2640
2641
        io_mem_read[io_index][i] = mem_read[i];
        io_mem_write[io_index][i] = mem_write[i];
    }
2642
    io_mem_opaque[io_index] = opaque;
2643
    return (io_index << IO_MEM_SHIFT) | subwidth;
2644
}
bellard authored
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
{
    return io_mem_write[io_index >> IO_MEM_SHIFT];
}

CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
{
    return io_mem_read[io_index >> IO_MEM_SHIFT];
}
2656
2657
#endif /* !defined(CONFIG_USER_ONLY) */
2658
2659
/* physical memory access (slow version, mainly for debug) */
#if defined(CONFIG_USER_ONLY)
2660
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2661
2662
2663
2664
                            int len, int is_write)
{
    int l, flags;
    target_ulong page;
2665
    void * p;
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677

    while (len > 0) {
        page = addr & TARGET_PAGE_MASK;
        l = (page + TARGET_PAGE_SIZE) - addr;
        if (l > len)
            l = len;
        flags = page_get_flags(page);
        if (!(flags & PAGE_VALID))
            return;
        if (is_write) {
            if (!(flags & PAGE_WRITE))
                return;
2678
            /* XXX: this code should not depend on lock_user */
2679
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2680
2681
                /* FIXME - should this return an error rather than just fail? */
                return;
2682
2683
            memcpy(p, buf, l);
            unlock_user(p, addr, l);
2684
2685
2686
        } else {
            if (!(flags & PAGE_READ))
                return;
2687
            /* XXX: this code should not depend on lock_user */
2688
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2689
2690
                /* FIXME - should this return an error rather than just fail? */
                return;
2691
            memcpy(buf, p, l);
2692
            unlock_user(p, addr, 0);
2693
2694
2695
2696
2697
2698
        }
        len -= l;
        buf += l;
        addr += l;
    }
}
2699
2700
#else
2701
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2702
2703
2704
2705
2706
                            int len, int is_write)
{
    int l, io_index;
    uint8_t *ptr;
    uint32_t val;
2707
2708
    target_phys_addr_t page;
    unsigned long pd;
2709
    PhysPageDesc *p;
2710
2711
2712
2713
2714
2715
    while (len > 0) {
        page = addr & TARGET_PAGE_MASK;
        l = (page + TARGET_PAGE_SIZE) - addr;
        if (l > len)
            l = len;
2716
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2717
2718
2719
2720
2721
        if (!p) {
            pd = IO_MEM_UNASSIGNED;
        } else {
            pd = p->phys_offset;
        }
2722
2723
        if (is_write) {
2724
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2725
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
bellard authored
2726
2727
                /* XXX: could force cpu_single_env to NULL to avoid
                   potential bugs */
2728
                if (l >= 4 && ((addr & 3) == 0)) {
bellard authored
2729
                    /* 32 bit write access */
bellard authored
2730
                    val = ldl_p(buf);
2731
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2732
2733
                    l = 4;
                } else if (l >= 2 && ((addr & 1) == 0)) {
bellard authored
2734
                    /* 16 bit write access */
bellard authored
2735
                    val = lduw_p(buf);
2736
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2737
2738
                    l = 2;
                } else {
bellard authored
2739
                    /* 8 bit write access */
bellard authored
2740
                    val = ldub_p(buf);
2741
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2742
2743
2744
                    l = 1;
                }
            } else {
2745
2746
                unsigned long addr1;
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2747
                /* RAM case */
2748
                ptr = phys_ram_base + addr1;
2749
                memcpy(ptr, buf, l);
2750
2751
2752
2753
                if (!cpu_physical_memory_is_dirty(addr1)) {
                    /* invalidate code */
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
                    /* set dirty bit */
2754
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2755
                        (0xff & ~CODE_DIRTY_FLAG);
2756
                }
2757
2758
            }
        } else {
2759
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2760
                !(pd & IO_MEM_ROMD)) {
2761
2762
2763
2764
                /* I/O case */
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
                if (l >= 4 && ((addr & 3) == 0)) {
                    /* 32 bit read access */
2765
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
bellard authored
2766
                    stl_p(buf, val);
2767
2768
2769
                    l = 4;
                } else if (l >= 2 && ((addr & 1) == 0)) {
                    /* 16 bit read access */
2770
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
bellard authored
2771
                    stw_p(buf, val);
2772
2773
                    l = 2;
                } else {
bellard authored
2774
                    /* 8 bit read access */
2775
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
bellard authored
2776
                    stb_p(buf, val);
2777
2778
2779
2780
                    l = 1;
                }
            } else {
                /* RAM case */
2781
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2782
2783
2784
2785
2786
2787
2788
2789
2790
                    (addr & ~TARGET_PAGE_MASK);
                memcpy(buf, ptr, l);
            }
        }
        len -= l;
        buf += l;
        addr += l;
    }
}
2791
2792
/* used for ROM loading : can write in RAM and ROM */
2793
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2794
2795
2796
2797
2798
2799
2800
                                   const uint8_t *buf, int len)
{
    int l;
    uint8_t *ptr;
    target_phys_addr_t page;
    unsigned long pd;
    PhysPageDesc *p;
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
    while (len > 0) {
        page = addr & TARGET_PAGE_MASK;
        l = (page + TARGET_PAGE_SIZE) - addr;
        if (l > len)
            l = len;
        p = phys_page_find(page >> TARGET_PAGE_BITS);
        if (!p) {
            pd = IO_MEM_UNASSIGNED;
        } else {
            pd = p->phys_offset;
        }
2813
2814
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2815
2816
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
            !(pd & IO_MEM_ROMD)) {
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
            /* do nothing */
        } else {
            unsigned long addr1;
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
            /* ROM/RAM case */
            ptr = phys_ram_base + addr1;
            memcpy(ptr, buf, l);
        }
        len -= l;
        buf += l;
        addr += l;
    }
}
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
/* warning: addr must be aligned */
uint32_t ldl_phys(target_phys_addr_t addr)
{
    int io_index;
    uint8_t *ptr;
    uint32_t val;
    unsigned long pd;
    PhysPageDesc *p;

    p = phys_page_find(addr >> TARGET_PAGE_BITS);
    if (!p) {
        pd = IO_MEM_UNASSIGNED;
    } else {
        pd = p->phys_offset;
    }
2847
2848
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2849
        !(pd & IO_MEM_ROMD)) {
2850
2851
2852
2853
2854
        /* I/O case */
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
    } else {
        /* RAM case */
2855
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2856
2857
2858
2859
2860
2861
            (addr & ~TARGET_PAGE_MASK);
        val = ldl_p(ptr);
    }
    return val;
}
bellard authored
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
/* warning: addr must be aligned */
uint64_t ldq_phys(target_phys_addr_t addr)
{
    int io_index;
    uint8_t *ptr;
    uint64_t val;
    unsigned long pd;
    PhysPageDesc *p;

    p = phys_page_find(addr >> TARGET_PAGE_BITS);
    if (!p) {
        pd = IO_MEM_UNASSIGNED;
    } else {
        pd = p->phys_offset;
    }
2877
2878
2879
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
        !(pd & IO_MEM_ROMD)) {
bellard authored
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
        /* I/O case */
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
#ifdef TARGET_WORDS_BIGENDIAN
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
#else
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
#endif
    } else {
        /* RAM case */
2891
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bellard authored
2892
2893
2894
2895
2896
2897
            (addr & ~TARGET_PAGE_MASK);
        val = ldq_p(ptr);
    }
    return val;
}
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
/* XXX: optimize */
uint32_t ldub_phys(target_phys_addr_t addr)
{
    uint8_t val;
    cpu_physical_memory_read(addr, &val, 1);
    return val;
}

/* XXX: optimize */
uint32_t lduw_phys(target_phys_addr_t addr)
{
    uint16_t val;
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
    return tswap16(val);
}
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
/* warning: addr must be aligned. The ram page is not masked as dirty
   and the code inside is not invalidated. It is useful if the dirty
   bits are used to track modified PTEs */
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
{
    int io_index;
    uint8_t *ptr;
    unsigned long pd;
    PhysPageDesc *p;

    p = phys_page_find(addr >> TARGET_PAGE_BITS);
    if (!p) {
        pd = IO_MEM_UNASSIGNED;
    } else {
        pd = p->phys_offset;
    }
2930
2931
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2932
2933
2934
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
    } else {
2935
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2936
2937
2938
2939
2940
            (addr & ~TARGET_PAGE_MASK);
        stl_p(ptr, val);
    }
}
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
{
    int io_index;
    uint8_t *ptr;
    unsigned long pd;
    PhysPageDesc *p;

    p = phys_page_find(addr >> TARGET_PAGE_BITS);
    if (!p) {
        pd = IO_MEM_UNASSIGNED;
    } else {
        pd = p->phys_offset;
    }
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
#ifdef TARGET_WORDS_BIGENDIAN
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
#else
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
#endif
    } else {
2965
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2966
2967
2968
2969
2970
            (addr & ~TARGET_PAGE_MASK);
        stq_p(ptr, val);
    }
}
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
/* warning: addr must be aligned */
void stl_phys(target_phys_addr_t addr, uint32_t val)
{
    int io_index;
    uint8_t *ptr;
    unsigned long pd;
    PhysPageDesc *p;

    p = phys_page_find(addr >> TARGET_PAGE_BITS);
    if (!p) {
        pd = IO_MEM_UNASSIGNED;
    } else {
        pd = p->phys_offset;
    }
2985
2986
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2987
2988
2989
2990
2991
2992
2993
2994
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
    } else {
        unsigned long addr1;
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
        /* RAM case */
        ptr = phys_ram_base + addr1;
        stl_p(ptr, val);
2995
2996
2997
2998
        if (!cpu_physical_memory_is_dirty(addr1)) {
            /* invalidate code */
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
            /* set dirty bit */
2999
3000
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
                (0xff & ~CODE_DIRTY_FLAG);
3001
        }
3002
3003
3004
    }
}
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
/* XXX: optimize */
void stb_phys(target_phys_addr_t addr, uint32_t val)
{
    uint8_t v = val;
    cpu_physical_memory_write(addr, &v, 1);
}

/* XXX: optimize */
void stw_phys(target_phys_addr_t addr, uint32_t val)
{
    uint16_t v = tswap16(val);
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
}

/* XXX: optimize */
void stq_phys(target_phys_addr_t addr, uint64_t val)
{
    val = tswap64(val);
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
}
3026
3027
3028
#endif

/* virtual memory access for debug */
3029
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3030
                        uint8_t *buf, int len, int is_write)
3031
3032
{
    int l;
3033
3034
    target_phys_addr_t phys_addr;
    target_ulong page;
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044

    while (len > 0) {
        page = addr & TARGET_PAGE_MASK;
        phys_addr = cpu_get_phys_page_debug(env, page);
        /* if no physical page mapped, return an error */
        if (phys_addr == -1)
            return -1;
        l = (page + TARGET_PAGE_SIZE) - addr;
        if (l > len)
            l = len;
3045
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3046
                               buf, l, is_write);
3047
3048
3049
3050
3051
3052
3053
        len -= l;
        buf += l;
        addr += l;
    }
    return 0;
}
pbrook authored
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
/* in deterministic execution mode, instructions doing device I/Os
   must be at the end of the TB */
void cpu_io_recompile(CPUState *env, void *retaddr)
{
    TranslationBlock *tb;
    uint32_t n, cflags;
    target_ulong pc, cs_base;
    uint64_t flags;

    tb = tb_find_pc((unsigned long)retaddr);
    if (!tb) {
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
                  retaddr);
    }
    n = env->icount_decr.u16.low + tb->icount;
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
    /* Calculate how many instructions had been executed before the fault
3071
       occurred.  */
pbrook authored
3072
3073
3074
3075
3076
    n = n - env->icount_decr.u16.low;
    /* Generate a new TB ending on the I/O insn.  */
    n++;
    /* On MIPS and SH, delay slot instructions can only be restarted if
       they were already the first instruction in the TB.  If this is not
3077
       the first instruction in a TB then re-execute the preceding
pbrook authored
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
       branch.  */
#if defined(TARGET_MIPS)
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
        env->active_tc.PC -= 4;
        env->icount_decr.u16.low++;
        env->hflags &= ~MIPS_HFLAG_BMASK;
    }
#elif defined(TARGET_SH4)
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
            && n > 1) {
        env->pc -= 2;
        env->icount_decr.u16.low++;
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
    }
#endif
    /* This should never happen.  */
    if (n > CF_COUNT_MASK)
        cpu_abort(env, "TB too big during recompile");

    cflags = n | CF_LAST_IO;
    pc = tb->pc;
    cs_base = tb->cs_base;
    flags = tb->flags;
    tb_phys_invalidate(tb, -1);
    /* FIXME: In theory this could raise an exception.  In practice
       we have already translated the block once so it's probably ok.  */
    tb_gen_code(env, pc, cs_base, flags, cflags);
3105
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook authored
3106
3107
3108
3109
3110
3111
3112
       the first in the TB) then we end up generating a whole new TB and
       repeating the fault, which is horribly inefficient.
       Better would be to execute just this insn uncached, or generate a
       second new TB.  */
    cpu_resume_from_signal(env, NULL);
}
bellard authored
3113
3114
3115
3116
3117
3118
void dump_exec_info(FILE *f,
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
{
    int i, target_code_size, max_target_code_size;
    int direct_jmp_count, direct_jmp2_count, cross_page;
    TranslationBlock *tb;
3119
bellard authored
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
    target_code_size = 0;
    max_target_code_size = 0;
    cross_page = 0;
    direct_jmp_count = 0;
    direct_jmp2_count = 0;
    for(i = 0; i < nb_tbs; i++) {
        tb = &tbs[i];
        target_code_size += tb->size;
        if (tb->size > max_target_code_size)
            max_target_code_size = tb->size;
        if (tb->page_addr[1] != -1)
            cross_page++;
        if (tb->tb_next_offset[0] != 0xffff) {
            direct_jmp_count++;
            if (tb->tb_next_offset[1] != 0xffff) {
                direct_jmp2_count++;
            }
        }
    }
    /* XXX: avoid using doubles ? */
bellard authored
3140
    cpu_fprintf(f, "Translation buffer state:\n");
3141
3142
3143
3144
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
    cpu_fprintf(f, "TB count            %d/%d\n", 
                nb_tbs, code_gen_max_blocks);
3145
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
bellard authored
3146
3147
                nb_tbs ? target_code_size / nb_tbs : 0,
                max_target_code_size);
3148
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
bellard authored
3149
3150
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3151
3152
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
            cross_page,
bellard authored
3153
3154
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3155
                direct_jmp_count,
bellard authored
3156
3157
3158
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
                direct_jmp2_count,
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard authored
3159
    cpu_fprintf(f, "\nStatistics:\n");
bellard authored
3160
3161
3162
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
bellard authored
3163
    tcg_dump_info(f, cpu_fprintf);
bellard authored
3164
3165
}
3166
#if !defined(CONFIG_USER_ONLY)
bellard authored
3167
3168
3169
3170

#define MMUSUFFIX _cmmu
#define GETPC() NULL
#define env cpu_single_env
bellard authored
3171
#define SOFTMMU_CODE_ACCESS
bellard authored
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187

#define SHIFT 0
#include "softmmu_template.h"

#define SHIFT 1
#include "softmmu_template.h"

#define SHIFT 2
#include "softmmu_template.h"

#define SHIFT 3
#include "softmmu_template.h"

#undef env

#endif