Commit e2eef1703b536d42b89f8046faf7adeaa0e42ba1

Authored by pbrook
1 parent 2c44375d

Remove dead and bitrotten "qemu-fast" code.

Only build softmmu+MMIO handlers for system emulation.


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4695 c046a42c-6fe2-441c-8c8c-71466251a162
Showing 2 changed files with 16 additions and 130 deletions
@@ -95,11 +95,13 @@ unsigned long code_gen_buffer_size; @@ -95,11 +95,13 @@ unsigned long code_gen_buffer_size;
95 unsigned long code_gen_buffer_max_size; 95 unsigned long code_gen_buffer_max_size;
96 uint8_t *code_gen_ptr; 96 uint8_t *code_gen_ptr;
97 97
  98 +#if !defined(CONFIG_USER_ONLY)
98 ram_addr_t phys_ram_size; 99 ram_addr_t phys_ram_size;
99 int phys_ram_fd; 100 int phys_ram_fd;
100 uint8_t *phys_ram_base; 101 uint8_t *phys_ram_base;
101 uint8_t *phys_ram_dirty; 102 uint8_t *phys_ram_dirty;
102 static ram_addr_t phys_ram_alloc_offset = 0; 103 static ram_addr_t phys_ram_alloc_offset = 0;
  104 +#endif
103 105
104 CPUState *first_cpu; 106 CPUState *first_cpu;
105 /* current CPU in the current thread. It is only valid inside 107 /* current CPU in the current thread. It is only valid inside
@@ -137,8 +139,6 @@ typedef struct PhysPageDesc { @@ -137,8 +139,6 @@ typedef struct PhysPageDesc {
137 #define L1_SIZE (1 << L1_BITS) 139 #define L1_SIZE (1 << L1_BITS)
138 #define L2_SIZE (1 << L2_BITS) 140 #define L2_SIZE (1 << L2_BITS)
139 141
140 -static void io_mem_init(void);  
141 -  
142 unsigned long qemu_real_host_page_size; 142 unsigned long qemu_real_host_page_size;
143 unsigned long qemu_host_page_bits; 143 unsigned long qemu_host_page_bits;
144 unsigned long qemu_host_page_size; 144 unsigned long qemu_host_page_size;
@@ -148,12 +148,14 @@ unsigned long qemu_host_page_mask; @@ -148,12 +148,14 @@ unsigned long qemu_host_page_mask;
148 static PageDesc *l1_map[L1_SIZE]; 148 static PageDesc *l1_map[L1_SIZE];
149 PhysPageDesc **l1_phys_map; 149 PhysPageDesc **l1_phys_map;
150 150
  151 +#if !defined(CONFIG_USER_ONLY)
  152 +static void io_mem_init(void);
  153 +
151 /* io memory support */ 154 /* io memory support */
152 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; 155 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
153 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; 156 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
154 void *io_mem_opaque[IO_MEM_NB_ENTRIES]; 157 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
155 static int io_mem_nb; 158 static int io_mem_nb;
156 -#if defined(CONFIG_SOFTMMU)  
157 static int io_mem_watch; 159 static int io_mem_watch;
158 #endif 160 #endif
159 161
@@ -408,7 +410,9 @@ void cpu_exec_init_all(unsigned long tb_size) @@ -408,7 +410,9 @@ void cpu_exec_init_all(unsigned long tb_size)
408 code_gen_alloc(tb_size); 410 code_gen_alloc(tb_size);
409 code_gen_ptr = code_gen_buffer; 411 code_gen_ptr = code_gen_buffer;
410 page_init(); 412 page_init();
  413 +#if !defined(CONFIG_USER_ONLY)
411 io_mem_init(); 414 io_mem_init();
  415 +#endif
412 } 416 }
413 417
414 void cpu_exec_init(CPUState *env) 418 void cpu_exec_init(CPUState *env)
@@ -1536,9 +1540,6 @@ void tlb_flush(CPUState *env, int flush_global) @@ -1536,9 +1540,6 @@ void tlb_flush(CPUState *env, int flush_global)
1536 1540
1537 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); 1541 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1538 1542
1539 -#if !defined(CONFIG_SOFTMMU)  
1540 - munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);  
1541 -#endif  
1542 #ifdef USE_KQEMU 1543 #ifdef USE_KQEMU
1543 if (env->kqemu_enabled) { 1544 if (env->kqemu_enabled) {
1544 kqemu_flush(env, flush_global); 1545 kqemu_flush(env, flush_global);
@@ -1585,10 +1586,6 @@ void tlb_flush_page(CPUState *env, target_ulong addr) @@ -1585,10 +1586,6 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
1585 1586
1586 tlb_flush_jmp_cache(env, addr); 1587 tlb_flush_jmp_cache(env, addr);
1587 1588
1588 -#if !defined(CONFIG_SOFTMMU)  
1589 - if (addr < MMAP_AREA_END)  
1590 - munmap((void *)addr, TARGET_PAGE_SIZE);  
1591 -#endif  
1592 #ifdef USE_KQEMU 1589 #ifdef USE_KQEMU
1593 if (env->kqemu_enabled) { 1590 if (env->kqemu_enabled) {
1594 kqemu_flush_page(env, addr); 1591 kqemu_flush_page(env, addr);
@@ -1674,34 +1671,6 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, @@ -1674,34 +1671,6 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1674 #endif 1671 #endif
1675 #endif 1672 #endif
1676 } 1673 }
1677 -  
1678 -#if !defined(CONFIG_SOFTMMU)  
1679 - /* XXX: this is expensive */  
1680 - {  
1681 - VirtPageDesc *p;  
1682 - int j;  
1683 - target_ulong addr;  
1684 -  
1685 - for(i = 0; i < L1_SIZE; i++) {  
1686 - p = l1_virt_map[i];  
1687 - if (p) {  
1688 - addr = i << (TARGET_PAGE_BITS + L2_BITS);  
1689 - for(j = 0; j < L2_SIZE; j++) {  
1690 - if (p->valid_tag == virt_valid_tag &&  
1691 - p->phys_addr >= start && p->phys_addr < end &&  
1692 - (p->prot & PROT_WRITE)) {  
1693 - if (addr < MMAP_AREA_END) {  
1694 - mprotect((void *)addr, TARGET_PAGE_SIZE,  
1695 - p->prot & ~PROT_WRITE);  
1696 - }  
1697 - }  
1698 - addr += TARGET_PAGE_SIZE;  
1699 - p++;  
1700 - }  
1701 - }  
1702 - }  
1703 - }  
1704 -#endif  
1705 } 1674 }
1706 1675
1707 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) 1676 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
@@ -1795,9 +1764,6 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, @@ -1795,9 +1764,6 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1795 #endif 1764 #endif
1796 1765
1797 ret = 0; 1766 ret = 0;
1798 -#if !defined(CONFIG_SOFTMMU)  
1799 - if (is_softmmu)  
1800 -#endif  
1801 { 1767 {
1802 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { 1768 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1803 /* IO memory case */ 1769 /* IO memory case */
@@ -1857,92 +1823,9 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, @@ -1857,92 +1823,9 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1857 te->addr_write = -1; 1823 te->addr_write = -1;
1858 } 1824 }
1859 } 1825 }
1860 -#if !defined(CONFIG_SOFTMMU)  
1861 - else {  
1862 - if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {  
1863 - /* IO access: no mapping is done as it will be handled by the  
1864 - soft MMU */  
1865 - if (!(env->hflags & HF_SOFTMMU_MASK))  
1866 - ret = 2;  
1867 - } else {  
1868 - void *map_addr;  
1869 -  
1870 - if (vaddr >= MMAP_AREA_END) {  
1871 - ret = 2;  
1872 - } else {  
1873 - if (prot & PROT_WRITE) {  
1874 - if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||  
1875 -#if defined(TARGET_HAS_SMC) || 1  
1876 - first_tb ||  
1877 -#endif  
1878 - ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&  
1879 - !cpu_physical_memory_is_dirty(pd))) {  
1880 - /* ROM: we do as if code was inside */  
1881 - /* if code is present, we only map as read only and save the  
1882 - original mapping */  
1883 - VirtPageDesc *vp;  
1884 -  
1885 - vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);  
1886 - vp->phys_addr = pd;  
1887 - vp->prot = prot;  
1888 - vp->valid_tag = virt_valid_tag;  
1889 - prot &= ~PAGE_WRITE;  
1890 - }  
1891 - }  
1892 - map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,  
1893 - MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));  
1894 - if (map_addr == MAP_FAILED) {  
1895 - cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",  
1896 - paddr, vaddr);  
1897 - }  
1898 - }  
1899 - }  
1900 - }  
1901 -#endif  
1902 return ret; 1826 return ret;
1903 } 1827 }
1904 1828
1905 -/* called from signal handler: invalidate the code and unprotect the  
1906 - page. Return TRUE if the fault was succesfully handled. */  
1907 -int page_unprotect(target_ulong addr, unsigned long pc, void *puc)  
1908 -{  
1909 -#if !defined(CONFIG_SOFTMMU)  
1910 - VirtPageDesc *vp;  
1911 -  
1912 -#if defined(DEBUG_TLB)  
1913 - printf("page_unprotect: addr=0x%08x\n", addr);  
1914 -#endif  
1915 - addr &= TARGET_PAGE_MASK;  
1916 -  
1917 - /* if it is not mapped, no need to worry here */  
1918 - if (addr >= MMAP_AREA_END)  
1919 - return 0;  
1920 - vp = virt_page_find(addr >> TARGET_PAGE_BITS);  
1921 - if (!vp)  
1922 - return 0;  
1923 - /* NOTE: in this case, validate_tag is _not_ tested as it  
1924 - validates only the code TLB */  
1925 - if (vp->valid_tag != virt_valid_tag)  
1926 - return 0;  
1927 - if (!(vp->prot & PAGE_WRITE))  
1928 - return 0;  
1929 -#if defined(DEBUG_TLB)  
1930 - printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",  
1931 - addr, vp->phys_addr, vp->prot);  
1932 -#endif  
1933 - if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)  
1934 - cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",  
1935 - (unsigned long)addr, vp->prot);  
1936 - /* set the dirty bit */  
1937 - phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;  
1938 - /* flush the code inside */  
1939 - tb_invalidate_phys_page(vp->phys_addr, pc, puc);  
1940 - return 1;  
1941 -#else  
1942 - return 0;  
1943 -#endif  
1944 -}  
1945 -  
1946 #else 1829 #else
1947 1830
1948 void tlb_flush(CPUState *env, int flush_global) 1831 void tlb_flush(CPUState *env, int flush_global)
@@ -2130,6 +2013,7 @@ static inline void tlb_set_dirty(CPUState *env, @@ -2130,6 +2013,7 @@ static inline void tlb_set_dirty(CPUState *env,
2130 } 2013 }
2131 #endif /* defined(CONFIG_USER_ONLY) */ 2014 #endif /* defined(CONFIG_USER_ONLY) */
2132 2015
  2016 +#if !defined(CONFIG_USER_ONLY)
2133 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 2017 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2134 ram_addr_t memory); 2018 ram_addr_t memory);
2135 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 2019 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
@@ -2387,7 +2271,6 @@ static CPUWriteMemoryFunc *notdirty_mem_write[3] = { @@ -2387,7 +2271,6 @@ static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2387 notdirty_mem_writel, 2271 notdirty_mem_writel,
2388 }; 2272 };
2389 2273
2390 -#if defined(CONFIG_SOFTMMU)  
2391 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, 2274 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2392 so these check for a hit then pass through to the normal out-of-line 2275 so these check for a hit then pass through to the normal out-of-line
2393 phys routines. */ 2276 phys routines. */
@@ -2463,7 +2346,6 @@ static CPUWriteMemoryFunc *watch_mem_write[3] = { @@ -2463,7 +2346,6 @@ static CPUWriteMemoryFunc *watch_mem_write[3] = {
2463 watch_mem_writew, 2346 watch_mem_writew,
2464 watch_mem_writel, 2347 watch_mem_writel,
2465 }; 2348 };
2466 -#endif  
2467 2349
2468 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr, 2350 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2469 unsigned int len) 2351 unsigned int len)
@@ -2619,10 +2501,8 @@ static void io_mem_init(void) @@ -2619,10 +2501,8 @@ static void io_mem_init(void)
2619 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL); 2501 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2620 io_mem_nb = 5; 2502 io_mem_nb = 5;
2621 2503
2622 -#if defined(CONFIG_SOFTMMU)  
2623 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read, 2504 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2624 watch_mem_write, NULL); 2505 watch_mem_write, NULL);
2625 -#endif  
2626 /* alloc dirty bits array */ 2506 /* alloc dirty bits array */
2627 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS); 2507 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2628 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS); 2508 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
@@ -2672,6 +2552,8 @@ CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index) @@ -2672,6 +2552,8 @@ CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2672 return io_mem_read[io_index >> IO_MEM_SHIFT]; 2552 return io_mem_read[io_index >> IO_MEM_SHIFT];
2673 } 2553 }
2674 2554
  2555 +#endif /* !defined(CONFIG_USER_ONLY) */
  2556 +
2675 /* physical memory access (slow version, mainly for debug) */ 2557 /* physical memory access (slow version, mainly for debug) */
2676 #if defined(CONFIG_USER_ONLY) 2558 #if defined(CONFIG_USER_ONLY)
2677 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 2559 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
target-cris/op_helper.c
@@ -24,6 +24,10 @@ @@ -24,6 +24,10 @@
24 #include "mmu.h" 24 #include "mmu.h"
25 #include "helper.h" 25 #include "helper.h"
26 26
  27 +#define D(x)
  28 +
  29 +#if !defined(CONFIG_USER_ONLY)
  30 +
27 #define MMUSUFFIX _mmu 31 #define MMUSUFFIX _mmu
28 32
29 #define SHIFT 0 33 #define SHIFT 0
@@ -38,8 +42,6 @@ @@ -38,8 +42,6 @@
38 #define SHIFT 3 42 #define SHIFT 3
39 #include "softmmu_template.h" 43 #include "softmmu_template.h"
40 44
41 -#define D(x)  
42 -  
43 /* Try to fill the TLB and return an exception if error. If retaddr is 45 /* Try to fill the TLB and return an exception if error. If retaddr is
44 NULL, it means that the function was called in C code (i.e. not 46 NULL, it means that the function was called in C code (i.e. not
45 from generated code or from helper.c) */ 47 from generated code or from helper.c) */
@@ -78,6 +80,8 @@ void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr) @@ -78,6 +80,8 @@ void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
78 env = saved_env; 80 env = saved_env;
79 } 81 }
80 82
  83 +#endif
  84 +
81 void helper_raise_exception(uint32_t index) 85 void helper_raise_exception(uint32_t index)
82 { 86 {
83 env->exception_index = index; 87 env->exception_index = index;