Commit 29e179bc3f5e804ab58b975e65c91cb9cd287846
1 parent
274a9e70
[sh4] memory mapped TLB entries
SH4 MMU's memory mapped TLB feature is implemented. SH-Linux seems to write to memory mapped TLB to invalidate a TLB entry, but does not to read it. So only memory write feature is implemented. Work on memory read feature is left. (Shin-ichiro KAWASAKI) git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5067 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
3 changed files
with
223 additions
and
11 deletions
hw/sh7750.c
| ... | ... | @@ -30,6 +30,7 @@ |
| 30 | 30 | #include "sh7750_regs.h" |
| 31 | 31 | #include "sh7750_regnames.h" |
| 32 | 32 | #include "sh_intc.h" |
| 33 | +#include "cpu.h" | |
| 33 | 34 | |
| 34 | 35 | #define NB_DEVICES 4 |
| 35 | 36 | |
| ... | ... | @@ -532,10 +533,113 @@ static struct intc_group groups_pci[] = { |
| 532 | 533 | #define SH_CPU_SH7750_ALL (SH_CPU_SH7750 | SH_CPU_SH7750S | SH_CPU_SH7750R) |
| 533 | 534 | #define SH_CPU_SH7751_ALL (SH_CPU_SH7751 | SH_CPU_SH7751R) |
| 534 | 535 | |
| 536 | +/********************************************************************** | |
| 537 | + Memory mapped cache and TLB | |
| 538 | +**********************************************************************/ | |
| 539 | + | |
| 540 | +#define MM_REGION_MASK 0x07000000 | |
| 541 | +#define MM_ICACHE_ADDR (0) | |
| 542 | +#define MM_ICACHE_DATA (1) | |
| 543 | +#define MM_ITLB_ADDR (2) | |
| 544 | +#define MM_ITLB_DATA (3) | |
| 545 | +#define MM_OCACHE_ADDR (4) | |
| 546 | +#define MM_OCACHE_DATA (5) | |
| 547 | +#define MM_UTLB_ADDR (6) | |
| 548 | +#define MM_UTLB_DATA (7) | |
| 549 | +#define MM_REGION_TYPE(addr) ((addr & MM_REGION_MASK) >> 24) | |
| 550 | + | |
| 551 | +static uint32_t invalid_read(void *opaque, target_phys_addr_t addr) | |
| 552 | +{ | |
| 553 | + assert(0); | |
| 554 | + | |
| 555 | + return 0; | |
| 556 | +} | |
| 557 | + | |
| 558 | +static uint32_t sh7750_mmct_readl(void *opaque, target_phys_addr_t addr) | |
| 559 | +{ | |
| 560 | + uint32_t ret = 0; | |
| 561 | + | |
| 562 | + switch (MM_REGION_TYPE(addr)) { | |
| 563 | + case MM_ICACHE_ADDR: | |
| 564 | + case MM_ICACHE_DATA: | |
| 565 | + /* do nothing */ | |
| 566 | + break; | |
| 567 | + case MM_ITLB_ADDR: | |
| 568 | + case MM_ITLB_DATA: | |
| 569 | + /* XXXXX */ | |
| 570 | + assert(0); | |
| 571 | + break; | |
| 572 | + case MM_OCACHE_ADDR: | |
| 573 | + case MM_OCACHE_DATA: | |
| 574 | + /* do nothing */ | |
| 575 | + break; | |
| 576 | + case MM_UTLB_ADDR: | |
| 577 | + case MM_UTLB_DATA: | |
| 578 | + /* XXXXX */ | |
| 579 | + assert(0); | |
| 580 | + break; | |
| 581 | + default: | |
| 582 | + assert(0); | |
| 583 | + } | |
| 584 | + | |
| 585 | + return ret; | |
| 586 | +} | |
| 587 | + | |
| 588 | +static void invalid_write(void *opaque, target_phys_addr_t addr, | |
| 589 | + uint32_t mem_value) | |
| 590 | +{ | |
| 591 | + assert(0); | |
| 592 | +} | |
| 593 | + | |
| 594 | +static void sh7750_mmct_writel(void *opaque, target_phys_addr_t addr, | |
| 595 | + uint32_t mem_value) | |
| 596 | +{ | |
| 597 | + SH7750State *s = opaque; | |
| 598 | + | |
| 599 | + switch (MM_REGION_TYPE(addr)) { | |
| 600 | + case MM_ICACHE_ADDR: | |
| 601 | + case MM_ICACHE_DATA: | |
| 602 | + /* do nothing */ | |
| 603 | + break; | |
| 604 | + case MM_ITLB_ADDR: | |
| 605 | + case MM_ITLB_DATA: | |
| 606 | + /* XXXXX */ | |
| 607 | + assert(0); | |
| 608 | + break; | |
| 609 | + case MM_OCACHE_ADDR: | |
| 610 | + case MM_OCACHE_DATA: | |
| 611 | + /* do nothing */ | |
| 612 | + break; | |
| 613 | + case MM_UTLB_ADDR: | |
| 614 | + cpu_sh4_write_mmaped_utlb_addr(s->cpu, addr, mem_value); | |
| 615 | + break; | |
| 616 | + case MM_UTLB_DATA: | |
| 617 | + /* XXXXX */ | |
| 618 | + assert(0); | |
| 619 | + break; | |
| 620 | + default: | |
| 621 | + assert(0); | |
| 622 | + break; | |
| 623 | + } | |
| 624 | +} | |
| 625 | + | |
| 626 | +static CPUReadMemoryFunc *sh7750_mmct_read[] = { | |
| 627 | + invalid_read, | |
| 628 | + invalid_read, | |
| 629 | + sh7750_mmct_readl | |
| 630 | +}; | |
| 631 | + | |
| 632 | +static CPUWriteMemoryFunc *sh7750_mmct_write[] = { | |
| 633 | + invalid_write, | |
| 634 | + invalid_write, | |
| 635 | + sh7750_mmct_writel | |
| 636 | +}; | |
| 637 | + | |
| 535 | 638 | SH7750State *sh7750_init(CPUSH4State * cpu) |
| 536 | 639 | { |
| 537 | 640 | SH7750State *s; |
| 538 | 641 | int sh7750_io_memory; |
| 642 | + int sh7750_mm_cache_and_tlb; /* memory mapped cache and tlb */ | |
| 539 | 643 | int cpu_model = SH_CPU_SH7751R; /* for now */ |
| 540 | 644 | |
| 541 | 645 | s = qemu_mallocz(sizeof(SH7750State)); |
| ... | ... | @@ -546,6 +650,12 @@ SH7750State *sh7750_init(CPUSH4State * cpu) |
| 546 | 650 | sh7750_mem_write, s); |
| 547 | 651 | cpu_register_physical_memory(0x1c000000, 0x04000000, sh7750_io_memory); |
| 548 | 652 | |
| 653 | + sh7750_mm_cache_and_tlb = cpu_register_io_memory(0, | |
| 654 | + sh7750_mmct_read, | |
| 655 | + sh7750_mmct_write, s); | |
| 656 | + cpu_register_physical_memory(0xf0000000, 0x08000000, | |
| 657 | + sh7750_mm_cache_and_tlb); | |
| 658 | + | |
| 549 | 659 | sh_intc_init(&s->intc, NR_SOURCES, |
| 550 | 660 | _INTC_ARRAY(mask_registers), |
| 551 | 661 | _INTC_ARRAY(prio_registers)); | ... | ... |
target-sh4/cpu.h
| ... | ... | @@ -124,6 +124,8 @@ CPUSH4State *cpu_sh4_init(const char *cpu_model); |
| 124 | 124 | int cpu_sh4_exec(CPUSH4State * s); |
| 125 | 125 | int cpu_sh4_signal_handler(int host_signum, void *pinfo, |
| 126 | 126 | void *puc); |
| 127 | +void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, target_phys_addr_t addr, | |
| 128 | + uint32_t mem_value); | |
| 127 | 129 | |
| 128 | 130 | #include "softfloat.h" |
| 129 | 131 | ... | ... |
target-sh4/helper.c
| ... | ... | @@ -282,6 +282,29 @@ static int find_tlb_entry(CPUState * env, target_ulong address, |
| 282 | 282 | return match; |
| 283 | 283 | } |
| 284 | 284 | |
| 285 | +static int same_tlb_entry_exists(const tlb_t * haystack, uint8_t nbtlb, | |
| 286 | + const tlb_t * needle) | |
| 287 | +{ | |
| 288 | + int i; | |
| 289 | + for (i = 0; i < nbtlb; i++) | |
| 290 | + if (!memcmp(&haystack[i], needle, sizeof(tlb_t))) | |
| 291 | + return 1; | |
| 292 | + return 0; | |
| 293 | +} | |
| 294 | + | |
| 295 | +static void increment_urc(CPUState * env) | |
| 296 | +{ | |
| 297 | + uint8_t urb, urc; | |
| 298 | + | |
| 299 | + /* Increment URC */ | |
| 300 | + urb = ((env->mmucr) >> 18) & 0x3f; | |
| 301 | + urc = ((env->mmucr) >> 10) & 0x3f; | |
| 302 | + urc++; | |
| 303 | + if (urc == urb || urc == UTLB_SIZE - 1) | |
| 304 | + urc = 0; | |
| 305 | + env->mmucr = (env->mmucr & 0xffff03ff) | (urc << 10); | |
| 306 | +} | |
| 307 | + | |
| 285 | 308 | /* Find itlb entry - update itlb from utlb if necessary and asked for |
| 286 | 309 | Return entry, MMU_ITLB_MISS, MMU_ITLB_MULTIPLE or MMU_DTLB_MULTIPLE |
| 287 | 310 | Update the itlb from utlb if update is not 0 |
| ... | ... | @@ -313,15 +336,8 @@ int find_itlb_entry(CPUState * env, target_ulong address, |
| 313 | 336 | Return entry, MMU_DTLB_MISS, MMU_DTLB_MULTIPLE */ |
| 314 | 337 | int find_utlb_entry(CPUState * env, target_ulong address, int use_asid) |
| 315 | 338 | { |
| 316 | - uint8_t urb, urc; | |
| 317 | - | |
| 318 | - /* Increment URC */ | |
| 319 | - urb = ((env->mmucr) >> 18) & 0x3f; | |
| 320 | - urc = ((env->mmucr) >> 10) & 0x3f; | |
| 321 | - urc++; | |
| 322 | - if (urc == urb || urc == UTLB_SIZE - 1) | |
| 323 | - urc = 0; | |
| 324 | - env->mmucr = (env->mmucr & 0xffff03ff) | (urc << 10); | |
| 339 | + /* per utlb access */ | |
| 340 | + increment_urc(env); | |
| 325 | 341 | |
| 326 | 342 | /* Return entry */ |
| 327 | 343 | return find_tlb_entry(env, address, env->utlb, UTLB_SIZE, use_asid); |
| ... | ... | @@ -407,8 +423,21 @@ int get_physical_address(CPUState * env, target_ulong * physical, |
| 407 | 423 | return (rw & PAGE_WRITE) ? MMU_DTLB_MISS_WRITE : |
| 408 | 424 | MMU_DTLB_MISS_READ; |
| 409 | 425 | } |
| 410 | - /* Mask upper 3 bits */ | |
| 411 | - *physical = address & 0x1FFFFFFF; | |
| 426 | + if (address >= 0x80000000 && address < 0xc0000000) { | |
| 427 | + /* Mask upper 3 bits for P1 and P2 areas */ | |
| 428 | + *physical = address & 0x1fffffff; | |
| 429 | + } else if (address >= 0xfc000000) { | |
| 430 | + /* | |
| 431 | + * Mask upper 3 bits for control registers in P4 area, | |
| 432 | + * to unify access to control registers via P0-P3 area. | |
| 433 | + * The addresses for cache store queue, TLB address array | |
| 434 | + * are not masked. | |
| 435 | + */ | |
| 436 | + *physical = address & 0x1fffffff; | |
| 437 | + } else { | |
| 438 | + /* access to cache store queue, or TLB address array. */ | |
| 439 | + *physical = address; | |
| 440 | + } | |
| 412 | 441 | *prot = PAGE_READ | PAGE_WRITE; |
| 413 | 442 | return MMU_OK; |
| 414 | 443 | } |
| ... | ... | @@ -543,4 +572,75 @@ void cpu_load_tlb(CPUState * env) |
| 543 | 572 | entry->tc = (uint8_t)cpu_ptea_tc(env->ptea); |
| 544 | 573 | } |
| 545 | 574 | |
| 575 | +void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, target_phys_addr_t addr, | |
| 576 | + uint32_t mem_value) | |
| 577 | +{ | |
| 578 | + int associate = addr & 0x0000080; | |
| 579 | + uint32_t vpn = (mem_value & 0xfffffc00) >> 10; | |
| 580 | + uint8_t d = (uint8_t)((mem_value & 0x00000200) >> 9); | |
| 581 | + uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8); | |
| 582 | + uint8_t asid = (uint8_t)(mem_value & 0x000000ff); | |
| 583 | + | |
| 584 | + if (associate) { | |
| 585 | + int i; | |
| 586 | + tlb_t * utlb_match_entry = NULL; | |
| 587 | + int needs_tlb_flush = 0; | |
| 588 | + | |
| 589 | + /* search UTLB */ | |
| 590 | + for (i = 0; i < UTLB_SIZE; i++) { | |
| 591 | + tlb_t * entry = &s->utlb[i]; | |
| 592 | + if (!entry->v) | |
| 593 | + continue; | |
| 594 | + | |
| 595 | + if (entry->vpn == vpn && entry->asid == asid) { | |
| 596 | + if (utlb_match_entry) { | |
| 597 | + /* Multiple TLB Exception */ | |
| 598 | + s->exception_index = 0x140; | |
| 599 | + s->tea = addr; | |
| 600 | + break; | |
| 601 | + } | |
| 602 | + if (entry->v && !v) | |
| 603 | + needs_tlb_flush = 1; | |
| 604 | + entry->v = v; | |
| 605 | + entry->d = d; | |
| 606 | + utlb_match_entry = entry; | |
| 607 | + } | |
| 608 | + increment_urc(s); /* per utlb access */ | |
| 609 | + } | |
| 610 | + | |
| 611 | + /* search ITLB */ | |
| 612 | + for (i = 0; i < ITLB_SIZE; i++) { | |
| 613 | + tlb_t * entry = &s->itlb[i]; | |
| 614 | + if (entry->vpn == vpn && entry->asid == asid) { | |
| 615 | + if (entry->v && !v) | |
| 616 | + needs_tlb_flush = 1; | |
| 617 | + if (utlb_match_entry) | |
| 618 | + *entry = *utlb_match_entry; | |
| 619 | + else | |
| 620 | + entry->v = v; | |
| 621 | + break; | |
| 622 | + } | |
| 623 | + } | |
| 624 | + | |
| 625 | + if (needs_tlb_flush) | |
| 626 | + tlb_flush_page(s, vpn << 10); | |
| 627 | + | |
| 628 | + } else { | |
| 629 | + int index = (addr & 0x00003f00) >> 8; | |
| 630 | + tlb_t * entry = &s->utlb[index]; | |
| 631 | + if (entry->v) { | |
| 632 | + /* Overwriting valid entry in utlb. */ | |
| 633 | + target_ulong address = entry->vpn << 10; | |
| 634 | + if (!same_tlb_entry_exists(s->itlb, ITLB_SIZE, entry)) { | |
| 635 | + tlb_flush_page(s, address); | |
| 636 | + } | |
| 637 | + } | |
| 638 | + entry->asid = asid; | |
| 639 | + entry->vpn = vpn; | |
| 640 | + entry->d = d; | |
| 641 | + entry->v = v; | |
| 642 | + increment_urc(s); | |
| 643 | + } | |
| 644 | +} | |
| 645 | + | |
| 546 | 646 | #endif | ... | ... |