Commit 84b7b8e778937f1ec3cbdb8914261a2fe0067ef2

Authored by bellard
1 parent 5cf38396

PAGE_EXEC support in TLBs


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1676 c046a42c-6fe2-441c-8c8c-71466251a162
cpu-defs.h
@@ -80,7 +80,8 @@ typedef unsigned long ram_addr_t; @@ -80,7 +80,8 @@ typedef unsigned long ram_addr_t;
80 #define TB_JMP_CACHE_BITS 12 80 #define TB_JMP_CACHE_BITS 12
81 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) 81 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
82 82
83 -#define CPU_TLB_SIZE 256 83 +#define CPU_TLB_BITS 8
  84 +#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
84 85
85 typedef struct CPUTLBEntry { 86 typedef struct CPUTLBEntry {
86 /* bit 31 to TARGET_PAGE_BITS : virtual address 87 /* bit 31 to TARGET_PAGE_BITS : virtual address
@@ -89,7 +90,9 @@ typedef struct CPUTLBEntry { @@ -89,7 +90,9 @@ typedef struct CPUTLBEntry {
89 bit 3 : indicates that the entry is invalid 90 bit 3 : indicates that the entry is invalid
90 bit 2..0 : zero 91 bit 2..0 : zero
91 */ 92 */
92 - target_ulong address; 93 + target_ulong addr_read;
  94 + target_ulong addr_write;
  95 + target_ulong addr_code;
93 /* addend to virtual address to get physical address */ 96 /* addend to virtual address to get physical address */
94 target_phys_addr_t addend; 97 target_phys_addr_t addend;
95 } CPUTLBEntry; 98 } CPUTLBEntry;
@@ -105,8 +108,7 @@ typedef struct CPUTLBEntry { @@ -105,8 +108,7 @@ typedef struct CPUTLBEntry {
105 target_ulong mem_write_vaddr; /* target virtual addr at which the \ 108 target_ulong mem_write_vaddr; /* target virtual addr at which the \
106 memory was written */ \ 109 memory was written */ \
107 /* 0 = kernel, 1 = user */ \ 110 /* 0 = kernel, 1 = user */ \
108 - CPUTLBEntry tlb_read[2][CPU_TLB_SIZE]; \  
109 - CPUTLBEntry tlb_write[2][CPU_TLB_SIZE]; \ 111 + CPUTLBEntry tlb_table[2][CPU_TLB_SIZE]; \
110 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ 112 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
111 \ 113 \
112 /* from this point: preserved by CPU reset */ \ 114 /* from this point: preserved by CPU reset */ \
exec-all.h
@@ -98,9 +98,17 @@ void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, @@ -98,9 +98,17 @@ void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
98 void tb_invalidate_page_range(target_ulong start, target_ulong end); 98 void tb_invalidate_page_range(target_ulong start, target_ulong end);
99 void tlb_flush_page(CPUState *env, target_ulong addr); 99 void tlb_flush_page(CPUState *env, target_ulong addr);
100 void tlb_flush(CPUState *env, int flush_global); 100 void tlb_flush(CPUState *env, int flush_global);
101 -int tlb_set_page(CPUState *env, target_ulong vaddr,  
102 - target_phys_addr_t paddr, int prot,  
103 - int is_user, int is_softmmu); 101 +int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
  102 + target_phys_addr_t paddr, int prot,
  103 + int is_user, int is_softmmu);
  104 +static inline int tlb_set_page(CPUState *env, target_ulong vaddr,
  105 + target_phys_addr_t paddr, int prot,
  106 + int is_user, int is_softmmu)
  107 +{
  108 + if (prot & PAGE_READ)
  109 + prot |= PAGE_EXEC;
  110 + return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
  111 +}
104 112
105 #define CODE_GEN_MAX_SIZE 65536 113 #define CODE_GEN_MAX_SIZE 65536
106 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ 114 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
@@ -554,15 +562,15 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) @@ -554,15 +562,15 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
554 #else 562 #else
555 #error unimplemented CPU 563 #error unimplemented CPU
556 #endif 564 #endif
557 - if (__builtin_expect(env->tlb_read[is_user][index].address != 565 + if (__builtin_expect(env->tlb_table[is_user][index].addr_code !=
558 (addr & TARGET_PAGE_MASK), 0)) { 566 (addr & TARGET_PAGE_MASK), 0)) {
559 ldub_code(addr); 567 ldub_code(addr);
560 } 568 }
561 - pd = env->tlb_read[is_user][index].address & ~TARGET_PAGE_MASK; 569 + pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK;
562 if (pd > IO_MEM_ROM) { 570 if (pd > IO_MEM_ROM) {
563 cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr); 571 cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr);
564 } 572 }
565 - return addr + env->tlb_read[is_user][index].addend - (unsigned long)phys_ram_base; 573 + return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base;
566 } 574 }
567 #endif 575 #endif
568 576
@@ -1209,10 +1209,12 @@ void tlb_flush(CPUState *env, int flush_global) @@ -1209,10 +1209,12 @@ void tlb_flush(CPUState *env, int flush_global)
1209 env->current_tb = NULL; 1209 env->current_tb = NULL;
1210 1210
1211 for(i = 0; i < CPU_TLB_SIZE; i++) { 1211 for(i = 0; i < CPU_TLB_SIZE; i++) {
1212 - env->tlb_read[0][i].address = -1;  
1213 - env->tlb_write[0][i].address = -1;  
1214 - env->tlb_read[1][i].address = -1;  
1215 - env->tlb_write[1][i].address = -1; 1212 + env->tlb_table[0][i].addr_read = -1;
  1213 + env->tlb_table[0][i].addr_write = -1;
  1214 + env->tlb_table[0][i].addr_code = -1;
  1215 + env->tlb_table[1][i].addr_read = -1;
  1216 + env->tlb_table[1][i].addr_write = -1;
  1217 + env->tlb_table[1][i].addr_code = -1;
1216 } 1218 }
1217 1219
1218 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); 1220 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
@@ -1230,9 +1232,16 @@ void tlb_flush(CPUState *env, int flush_global) @@ -1230,9 +1232,16 @@ void tlb_flush(CPUState *env, int flush_global)
1230 1232
1231 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) 1233 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1232 { 1234 {
1233 - if (addr == (tlb_entry->address &  
1234 - (TARGET_PAGE_MASK | TLB_INVALID_MASK)))  
1235 - tlb_entry->address = -1; 1235 + if (addr == (tlb_entry->addr_read &
  1236 + (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
  1237 + addr == (tlb_entry->addr_write &
  1238 + (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
  1239 + addr == (tlb_entry->addr_code &
  1240 + (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
  1241 + tlb_entry->addr_read = -1;
  1242 + tlb_entry->addr_write = -1;
  1243 + tlb_entry->addr_code = -1;
  1244 + }
1236 } 1245 }
1237 1246
1238 void tlb_flush_page(CPUState *env, target_ulong addr) 1247 void tlb_flush_page(CPUState *env, target_ulong addr)
@@ -1249,10 +1258,8 @@ void tlb_flush_page(CPUState *env, target_ulong addr) @@ -1249,10 +1258,8 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
1249 1258
1250 addr &= TARGET_PAGE_MASK; 1259 addr &= TARGET_PAGE_MASK;
1251 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 1260 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1252 - tlb_flush_entry(&env->tlb_read[0][i], addr);  
1253 - tlb_flush_entry(&env->tlb_write[0][i], addr);  
1254 - tlb_flush_entry(&env->tlb_read[1][i], addr);  
1255 - tlb_flush_entry(&env->tlb_write[1][i], addr); 1261 + tlb_flush_entry(&env->tlb_table[0][i], addr);
  1262 + tlb_flush_entry(&env->tlb_table[1][i], addr);
1256 1263
1257 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) { 1264 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1258 tb = env->tb_jmp_cache[i]; 1265 tb = env->tb_jmp_cache[i];
@@ -1295,10 +1302,10 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, @@ -1295,10 +1302,10 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1295 unsigned long start, unsigned long length) 1302 unsigned long start, unsigned long length)
1296 { 1303 {
1297 unsigned long addr; 1304 unsigned long addr;
1298 - if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {  
1299 - addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend; 1305 + if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
  1306 + addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1300 if ((addr - start) < length) { 1307 if ((addr - start) < length) {
1301 - tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; 1308 + tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1302 } 1309 }
1303 } 1310 }
1304 } 1311 }
@@ -1340,9 +1347,9 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, @@ -1340,9 +1347,9 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1340 start1 = start + (unsigned long)phys_ram_base; 1347 start1 = start + (unsigned long)phys_ram_base;
1341 for(env = first_cpu; env != NULL; env = env->next_cpu) { 1348 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1342 for(i = 0; i < CPU_TLB_SIZE; i++) 1349 for(i = 0; i < CPU_TLB_SIZE; i++)
1343 - tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length); 1350 + tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1344 for(i = 0; i < CPU_TLB_SIZE; i++) 1351 for(i = 0; i < CPU_TLB_SIZE; i++)
1345 - tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length); 1352 + tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1346 } 1353 }
1347 1354
1348 #if !defined(CONFIG_SOFTMMU) 1355 #if !defined(CONFIG_SOFTMMU)
@@ -1378,11 +1385,11 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) @@ -1378,11 +1385,11 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1378 { 1385 {
1379 ram_addr_t ram_addr; 1386 ram_addr_t ram_addr;
1380 1387
1381 - if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {  
1382 - ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) + 1388 + if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
  1389 + ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1383 tlb_entry->addend - (unsigned long)phys_ram_base; 1390 tlb_entry->addend - (unsigned long)phys_ram_base;
1384 if (!cpu_physical_memory_is_dirty(ram_addr)) { 1391 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1385 - tlb_entry->address |= IO_MEM_NOTDIRTY; 1392 + tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1386 } 1393 }
1387 } 1394 }
1388 } 1395 }
@@ -1392,19 +1399,19 @@ void cpu_tlb_update_dirty(CPUState *env) @@ -1392,19 +1399,19 @@ void cpu_tlb_update_dirty(CPUState *env)
1392 { 1399 {
1393 int i; 1400 int i;
1394 for(i = 0; i < CPU_TLB_SIZE; i++) 1401 for(i = 0; i < CPU_TLB_SIZE; i++)
1395 - tlb_update_dirty(&env->tlb_write[0][i]); 1402 + tlb_update_dirty(&env->tlb_table[0][i]);
1396 for(i = 0; i < CPU_TLB_SIZE; i++) 1403 for(i = 0; i < CPU_TLB_SIZE; i++)
1397 - tlb_update_dirty(&env->tlb_write[1][i]); 1404 + tlb_update_dirty(&env->tlb_table[1][i]);
1398 } 1405 }
1399 1406
1400 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 1407 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1401 unsigned long start) 1408 unsigned long start)
1402 { 1409 {
1403 unsigned long addr; 1410 unsigned long addr;
1404 - if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {  
1405 - addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend; 1411 + if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
  1412 + addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1406 if (addr == start) { 1413 if (addr == start) {
1407 - tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM; 1414 + tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1408 } 1415 }
1409 } 1416 }
1410 } 1417 }
@@ -1418,17 +1425,17 @@ static inline void tlb_set_dirty(CPUState *env, @@ -1418,17 +1425,17 @@ static inline void tlb_set_dirty(CPUState *env,
1418 1425
1419 addr &= TARGET_PAGE_MASK; 1426 addr &= TARGET_PAGE_MASK;
1420 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 1427 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1421 - tlb_set_dirty1(&env->tlb_write[0][i], addr);  
1422 - tlb_set_dirty1(&env->tlb_write[1][i], addr); 1428 + tlb_set_dirty1(&env->tlb_table[0][i], addr);
  1429 + tlb_set_dirty1(&env->tlb_table[1][i], addr);
1423 } 1430 }
1424 1431
1425 /* add a new TLB entry. At most one entry for a given virtual address 1432 /* add a new TLB entry. At most one entry for a given virtual address
1426 is permitted. Return 0 if OK or 2 if the page could not be mapped 1433 is permitted. Return 0 if OK or 2 if the page could not be mapped
1427 (can only happen in non SOFTMMU mode for I/O pages or pages 1434 (can only happen in non SOFTMMU mode for I/O pages or pages
1428 conflicting with the host address space). */ 1435 conflicting with the host address space). */
1429 -int tlb_set_page(CPUState *env, target_ulong vaddr,  
1430 - target_phys_addr_t paddr, int prot,  
1431 - int is_user, int is_softmmu) 1436 +int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
  1437 + target_phys_addr_t paddr, int prot,
  1438 + int is_user, int is_softmmu)
1432 { 1439 {
1433 PhysPageDesc *p; 1440 PhysPageDesc *p;
1434 unsigned long pd; 1441 unsigned long pd;
@@ -1436,6 +1443,7 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, @@ -1436,6 +1443,7 @@ int tlb_set_page(CPUState *env, target_ulong vaddr,
1436 target_ulong address; 1443 target_ulong address;
1437 target_phys_addr_t addend; 1444 target_phys_addr_t addend;
1438 int ret; 1445 int ret;
  1446 + CPUTLBEntry *te;
1439 1447
1440 p = phys_page_find(paddr >> TARGET_PAGE_BITS); 1448 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1441 if (!p) { 1449 if (!p) {
@@ -1445,7 +1453,7 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, @@ -1445,7 +1453,7 @@ int tlb_set_page(CPUState *env, target_ulong vaddr,
1445 } 1453 }
1446 #if defined(DEBUG_TLB) 1454 #if defined(DEBUG_TLB)
1447 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n", 1455 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1448 - vaddr, paddr, prot, is_user, is_softmmu, pd); 1456 + vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1449 #endif 1457 #endif
1450 1458
1451 ret = 0; 1459 ret = 0;
@@ -1465,29 +1473,30 @@ int tlb_set_page(CPUState *env, target_ulong vaddr, @@ -1465,29 +1473,30 @@ int tlb_set_page(CPUState *env, target_ulong vaddr,
1465 1473
1466 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 1474 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1467 addend -= vaddr; 1475 addend -= vaddr;
  1476 + te = &env->tlb_table[is_user][index];
  1477 + te->addend = addend;
1468 if (prot & PAGE_READ) { 1478 if (prot & PAGE_READ) {
1469 - env->tlb_read[is_user][index].address = address;  
1470 - env->tlb_read[is_user][index].addend = addend; 1479 + te->addr_read = address;
  1480 + } else {
  1481 + te->addr_read = -1;
  1482 + }
  1483 + if (prot & PAGE_EXEC) {
  1484 + te->addr_code = address;
1471 } else { 1485 } else {
1472 - env->tlb_read[is_user][index].address = -1;  
1473 - env->tlb_read[is_user][index].addend = -1; 1486 + te->addr_code = -1;
1474 } 1487 }
1475 if (prot & PAGE_WRITE) { 1488 if (prot & PAGE_WRITE) {
1476 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) { 1489 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1477 /* ROM: access is ignored (same as unassigned) */ 1490 /* ROM: access is ignored (same as unassigned) */
1478 - env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;  
1479 - env->tlb_write[is_user][index].addend = addend; 1491 + te->addr_write = vaddr | IO_MEM_ROM;
1480 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 1492 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1481 !cpu_physical_memory_is_dirty(pd)) { 1493 !cpu_physical_memory_is_dirty(pd)) {
1482 - env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;  
1483 - env->tlb_write[is_user][index].addend = addend; 1494 + te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1484 } else { 1495 } else {
1485 - env->tlb_write[is_user][index].address = address;  
1486 - env->tlb_write[is_user][index].addend = addend; 1496 + te->addr_write = address;
1487 } 1497 }
1488 } else { 1498 } else {
1489 - env->tlb_write[is_user][index].address = -1;  
1490 - env->tlb_write[is_user][index].addend = -1; 1499 + te->addr_write = -1;
1491 } 1500 }
1492 } 1501 }
1493 #if !defined(CONFIG_SOFTMMU) 1502 #if !defined(CONFIG_SOFTMMU)
@@ -1586,9 +1595,9 @@ void tlb_flush_page(CPUState *env, target_ulong addr) @@ -1586,9 +1595,9 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
1586 { 1595 {
1587 } 1596 }
1588 1597
1589 -int tlb_set_page(CPUState *env, target_ulong vaddr,  
1590 - target_phys_addr_t paddr, int prot,  
1591 - int is_user, int is_softmmu) 1598 +int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
  1599 + target_phys_addr_t paddr, int prot,
  1600 + int is_user, int is_softmmu)
1592 { 1601 {
1593 return 0; 1602 return 0;
1594 } 1603 }
@@ -2052,6 +2061,41 @@ uint32_t ldl_phys(target_phys_addr_t addr) @@ -2052,6 +2061,41 @@ uint32_t ldl_phys(target_phys_addr_t addr)
2052 return val; 2061 return val;
2053 } 2062 }
2054 2063
  2064 +/* warning: addr must be aligned */
  2065 +uint64_t ldq_phys(target_phys_addr_t addr)
  2066 +{
  2067 + int io_index;
  2068 + uint8_t *ptr;
  2069 + uint64_t val;
  2070 + unsigned long pd;
  2071 + PhysPageDesc *p;
  2072 +
  2073 + p = phys_page_find(addr >> TARGET_PAGE_BITS);
  2074 + if (!p) {
  2075 + pd = IO_MEM_UNASSIGNED;
  2076 + } else {
  2077 + pd = p->phys_offset;
  2078 + }
  2079 +
  2080 + if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
  2081 + /* I/O case */
  2082 + io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
  2083 +#ifdef TARGET_WORDS_BIGENDIAN
  2084 + val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
  2085 + val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
  2086 +#else
  2087 + val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
  2088 + val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
  2089 +#endif
  2090 + } else {
  2091 + /* RAM case */
  2092 + ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
  2093 + (addr & ~TARGET_PAGE_MASK);
  2094 + val = ldq_p(ptr);
  2095 + }
  2096 + return val;
  2097 +}
  2098 +
2055 /* XXX: optimize */ 2099 /* XXX: optimize */
2056 uint32_t ldub_phys(target_phys_addr_t addr) 2100 uint32_t ldub_phys(target_phys_addr_t addr)
2057 { 2101 {
@@ -2068,14 +2112,6 @@ uint32_t lduw_phys(target_phys_addr_t addr) @@ -2068,14 +2112,6 @@ uint32_t lduw_phys(target_phys_addr_t addr)
2068 return tswap16(val); 2112 return tswap16(val);
2069 } 2113 }
2070 2114
2071 -/* XXX: optimize */  
2072 -uint64_t ldq_phys(target_phys_addr_t addr)  
2073 -{  
2074 - uint64_t val;  
2075 - cpu_physical_memory_read(addr, (uint8_t *)&val, 8);  
2076 - return tswap64(val);  
2077 -}  
2078 -  
2079 /* warning: addr must be aligned. The ram page is not masked as dirty 2115 /* warning: addr must be aligned. The ram page is not masked as dirty
2080 and the code inside is not invalidated. It is useful if the dirty 2116 and the code inside is not invalidated. It is useful if the dirty
2081 bits are used to track modified PTEs */ 2117 bits are used to track modified PTEs */
softmmu_header.h
@@ -93,6 +93,11 @@ @@ -93,6 +93,11 @@
93 #define RES_TYPE int 93 #define RES_TYPE int
94 #endif 94 #endif
95 95
  96 +#if ACCESS_TYPE == 3
  97 +#define ADDR_READ addr_code
  98 +#else
  99 +#define ADDR_READ addr_read
  100 +#endif
96 101
97 DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, 102 DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
98 int is_user); 103 int is_user);
@@ -101,6 +106,8 @@ void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE @@ -101,6 +106,8 @@ void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE
101 #if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \ 106 #if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \
102 (ACCESS_TYPE <= 1) && defined(ASM_SOFTMMU) 107 (ACCESS_TYPE <= 1) && defined(ASM_SOFTMMU)
103 108
  109 +#define CPU_TLB_ENTRY_BITS 4
  110 +
104 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) 111 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
105 { 112 {
106 int res; 113 int res;
@@ -120,7 +127,7 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) @@ -120,7 +127,7 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
120 "movl %%eax, %0\n" 127 "movl %%eax, %0\n"
121 "jmp 2f\n" 128 "jmp 2f\n"
122 "1:\n" 129 "1:\n"
123 - "addl 4(%%edx), %%eax\n" 130 + "addl 12(%%edx), %%eax\n"
124 #if DATA_SIZE == 1 131 #if DATA_SIZE == 1
125 "movzbl (%%eax), %0\n" 132 "movzbl (%%eax), %0\n"
126 #elif DATA_SIZE == 2 133 #elif DATA_SIZE == 2
@@ -133,10 +140,10 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) @@ -133,10 +140,10 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
133 "2:\n" 140 "2:\n"
134 : "=r" (res) 141 : "=r" (res)
135 : "r" (ptr), 142 : "r" (ptr),
136 - "i" ((CPU_TLB_SIZE - 1) << 3),  
137 - "i" (TARGET_PAGE_BITS - 3), 143 + "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
  144 + "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
138 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)), 145 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
139 - "m" (*(uint32_t *)offsetof(CPUState, tlb_read[CPU_MEM_INDEX][0].address)), 146 + "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_read)),
140 "i" (CPU_MEM_INDEX), 147 "i" (CPU_MEM_INDEX),
141 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX)) 148 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
142 : "%eax", "%ecx", "%edx", "memory", "cc"); 149 : "%eax", "%ecx", "%edx", "memory", "cc");
@@ -169,7 +176,7 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr) @@ -169,7 +176,7 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
169 #endif 176 #endif
170 "jmp 2f\n" 177 "jmp 2f\n"
171 "1:\n" 178 "1:\n"
172 - "addl 4(%%edx), %%eax\n" 179 + "addl 12(%%edx), %%eax\n"
173 #if DATA_SIZE == 1 180 #if DATA_SIZE == 1
174 "movsbl (%%eax), %0\n" 181 "movsbl (%%eax), %0\n"
175 #elif DATA_SIZE == 2 182 #elif DATA_SIZE == 2
@@ -180,10 +187,10 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr) @@ -180,10 +187,10 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
180 "2:\n" 187 "2:\n"
181 : "=r" (res) 188 : "=r" (res)
182 : "r" (ptr), 189 : "r" (ptr),
183 - "i" ((CPU_TLB_SIZE - 1) << 3),  
184 - "i" (TARGET_PAGE_BITS - 3), 190 + "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
  191 + "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
185 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)), 192 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
186 - "m" (*(uint32_t *)offsetof(CPUState, tlb_read[CPU_MEM_INDEX][0].address)), 193 + "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_read)),
187 "i" (CPU_MEM_INDEX), 194 "i" (CPU_MEM_INDEX),
188 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX)) 195 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
189 : "%eax", "%ecx", "%edx", "memory", "cc"); 196 : "%eax", "%ecx", "%edx", "memory", "cc");
@@ -216,7 +223,7 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE @@ -216,7 +223,7 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE
216 "popl %%eax\n" 223 "popl %%eax\n"
217 "jmp 2f\n" 224 "jmp 2f\n"
218 "1:\n" 225 "1:\n"
219 - "addl 4(%%edx), %%eax\n" 226 + "addl 8(%%edx), %%eax\n"
220 #if DATA_SIZE == 1 227 #if DATA_SIZE == 1
221 "movb %b1, (%%eax)\n" 228 "movb %b1, (%%eax)\n"
222 #elif DATA_SIZE == 2 229 #elif DATA_SIZE == 2
@@ -232,10 +239,10 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE @@ -232,10 +239,10 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE
232 /* NOTE: 'q' would be needed as constraint, but we could not use it 239 /* NOTE: 'q' would be needed as constraint, but we could not use it
233 with T1 ! */ 240 with T1 ! */
234 "r" (v), 241 "r" (v),
235 - "i" ((CPU_TLB_SIZE - 1) << 3),  
236 - "i" (TARGET_PAGE_BITS - 3), 242 + "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
  243 + "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
237 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)), 244 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
238 - "m" (*(uint32_t *)offsetof(CPUState, tlb_write[CPU_MEM_INDEX][0].address)), 245 + "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_write)),
239 "i" (CPU_MEM_INDEX), 246 "i" (CPU_MEM_INDEX),
240 "m" (*(uint8_t *)&glue(glue(__st, SUFFIX), MMUSUFFIX)) 247 "m" (*(uint8_t *)&glue(glue(__st, SUFFIX), MMUSUFFIX))
241 : "%eax", "%ecx", "%edx", "memory", "cc"); 248 : "%eax", "%ecx", "%edx", "memory", "cc");
@@ -256,11 +263,11 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) @@ -256,11 +263,11 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
256 addr = ptr; 263 addr = ptr;
257 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 264 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
258 is_user = CPU_MEM_INDEX; 265 is_user = CPU_MEM_INDEX;
259 - if (__builtin_expect(env->tlb_read[is_user][index].address != 266 + if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ !=
260 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 267 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
261 res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user); 268 res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user);
262 } else { 269 } else {
263 - physaddr = addr + env->tlb_read[is_user][index].addend; 270 + physaddr = addr + env->tlb_table[is_user][index].addend;
264 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr); 271 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
265 } 272 }
266 return res; 273 return res;
@@ -277,17 +284,19 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr) @@ -277,17 +284,19 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
277 addr = ptr; 284 addr = ptr;
278 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 285 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
279 is_user = CPU_MEM_INDEX; 286 is_user = CPU_MEM_INDEX;
280 - if (__builtin_expect(env->tlb_read[is_user][index].address != 287 + if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ !=
281 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 288 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
282 res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user); 289 res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user);
283 } else { 290 } else {
284 - physaddr = addr + env->tlb_read[is_user][index].addend; 291 + physaddr = addr + env->tlb_table[is_user][index].addend;
285 res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr); 292 res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr);
286 } 293 }
287 return res; 294 return res;
288 } 295 }
289 #endif 296 #endif
290 297
  298 +#if ACCESS_TYPE != 3
  299 +
291 /* generic store macro */ 300 /* generic store macro */
292 301
293 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v) 302 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
@@ -300,16 +309,20 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE @@ -300,16 +309,20 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE
300 addr = ptr; 309 addr = ptr;
301 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 310 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
302 is_user = CPU_MEM_INDEX; 311 is_user = CPU_MEM_INDEX;
303 - if (__builtin_expect(env->tlb_write[is_user][index].address != 312 + if (__builtin_expect(env->tlb_table[is_user][index].addr_write !=
304 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 313 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
305 glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, is_user); 314 glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, is_user);
306 } else { 315 } else {
307 - physaddr = addr + env->tlb_write[is_user][index].addend; 316 + physaddr = addr + env->tlb_table[is_user][index].addend;
308 glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v); 317 glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v);
309 } 318 }
310 } 319 }
311 320
312 -#endif 321 +#endif /* ACCESS_TYPE != 3 */
  322 +
  323 +#endif /* !asm */
  324 +
  325 +#if ACCESS_TYPE != 3
313 326
314 #if DATA_SIZE == 8 327 #if DATA_SIZE == 8
315 static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr) 328 static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr)
@@ -355,6 +368,8 @@ static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v) @@ -355,6 +368,8 @@ static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v)
355 } 368 }
356 #endif /* DATA_SIZE == 4 */ 369 #endif /* DATA_SIZE == 4 */
357 370
  371 +#endif /* ACCESS_TYPE != 3 */
  372 +
358 #undef RES_TYPE 373 #undef RES_TYPE
359 #undef DATA_TYPE 374 #undef DATA_TYPE
360 #undef DATA_STYPE 375 #undef DATA_STYPE
@@ -363,3 +378,4 @@ static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v) @@ -363,3 +378,4 @@ static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v)
363 #undef DATA_SIZE 378 #undef DATA_SIZE
364 #undef CPU_MEM_INDEX 379 #undef CPU_MEM_INDEX
365 #undef MMUSUFFIX 380 #undef MMUSUFFIX
  381 +#undef ADDR_READ
softmmu_template.h
@@ -41,8 +41,10 @@ @@ -41,8 +41,10 @@
41 41
42 #ifdef SOFTMMU_CODE_ACCESS 42 #ifdef SOFTMMU_CODE_ACCESS
43 #define READ_ACCESS_TYPE 2 43 #define READ_ACCESS_TYPE 2
  44 +#define ADDR_READ addr_code
44 #else 45 #else
45 #define READ_ACCESS_TYPE 0 46 #define READ_ACCESS_TYPE 0
  47 +#define ADDR_READ addr_read
46 #endif 48 #endif
47 49
48 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, 50 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
@@ -83,9 +85,9 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -83,9 +85,9 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
83 /* XXX: could done more in memory macro in a non portable way */ 85 /* XXX: could done more in memory macro in a non portable way */
84 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 86 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
85 redo: 87 redo:
86 - tlb_addr = env->tlb_read[is_user][index].address; 88 + tlb_addr = env->tlb_table[is_user][index].ADDR_READ;
87 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 89 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
88 - physaddr = addr + env->tlb_read[is_user][index].addend; 90 + physaddr = addr + env->tlb_table[is_user][index].addend;
89 if (tlb_addr & ~TARGET_PAGE_MASK) { 91 if (tlb_addr & ~TARGET_PAGE_MASK) {
90 /* IO access */ 92 /* IO access */
91 if ((addr & (DATA_SIZE - 1)) != 0) 93 if ((addr & (DATA_SIZE - 1)) != 0)
@@ -122,9 +124,9 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -122,9 +124,9 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
122 124
123 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 125 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
124 redo: 126 redo:
125 - tlb_addr = env->tlb_read[is_user][index].address; 127 + tlb_addr = env->tlb_table[is_user][index].ADDR_READ;
126 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 128 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
127 - physaddr = addr + env->tlb_read[is_user][index].addend; 129 + physaddr = addr + env->tlb_table[is_user][index].addend;
128 if (tlb_addr & ~TARGET_PAGE_MASK) { 130 if (tlb_addr & ~TARGET_PAGE_MASK) {
129 /* IO access */ 131 /* IO access */
130 if ((addr & (DATA_SIZE - 1)) != 0) 132 if ((addr & (DATA_SIZE - 1)) != 0)
@@ -199,9 +201,9 @@ void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -199,9 +201,9 @@ void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
199 201
200 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 202 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
201 redo: 203 redo:
202 - tlb_addr = env->tlb_write[is_user][index].address; 204 + tlb_addr = env->tlb_table[is_user][index].addr_write;
203 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 205 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
204 - physaddr = addr + env->tlb_write[is_user][index].addend; 206 + physaddr = addr + env->tlb_table[is_user][index].addend;
205 if (tlb_addr & ~TARGET_PAGE_MASK) { 207 if (tlb_addr & ~TARGET_PAGE_MASK) {
206 /* IO access */ 208 /* IO access */
207 if ((addr & (DATA_SIZE - 1)) != 0) 209 if ((addr & (DATA_SIZE - 1)) != 0)
@@ -237,9 +239,9 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -237,9 +239,9 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
237 239
238 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 240 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
239 redo: 241 redo:
240 - tlb_addr = env->tlb_write[is_user][index].address; 242 + tlb_addr = env->tlb_table[is_user][index].addr_write;
241 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 243 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
242 - physaddr = addr + env->tlb_write[is_user][index].addend; 244 + physaddr = addr + env->tlb_table[is_user][index].addend;
243 if (tlb_addr & ~TARGET_PAGE_MASK) { 245 if (tlb_addr & ~TARGET_PAGE_MASK) {
244 /* IO access */ 246 /* IO access */
245 if ((addr & (DATA_SIZE - 1)) != 0) 247 if ((addr & (DATA_SIZE - 1)) != 0)
@@ -276,3 +278,4 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, @@ -276,3 +278,4 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
276 #undef SUFFIX 278 #undef SUFFIX
277 #undef USUFFIX 279 #undef USUFFIX
278 #undef DATA_SIZE 280 #undef DATA_SIZE
  281 +#undef ADDR_READ