Changeset 36175 in vbox for trunk/src/recompiler/exec.c
- Timestamp:
- Mar 4, 2011 4:21:09 PM (14 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler/exec.c
r36171 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 31 30 #ifndef VBOX 32 31 #ifdef _WIN32 33 #define WIN32_LEAN_AND_MEAN34 32 #include <windows.h> 35 33 #else … … 85 83 #define SMC_BITMAP_USE_THRESHOLD 10 86 84 87 #define MMAP_AREA_START 0x0000000088 #define MMAP_AREA_END 0xa800000089 90 85 #if defined(TARGET_SPARC64) 91 86 #define TARGET_PHYS_ADDR_SPACE_BITS 41 … … 97 92 #elif defined(TARGET_PPC64) 98 93 #define TARGET_PHYS_ADDR_SPACE_BITS 42 99 #elif defined(TARGET_X86_64) && !defined( USE_KQEMU)94 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU) 100 95 #define TARGET_PHYS_ADDR_SPACE_BITS 42 101 #elif defined(TARGET_I386) && !defined( USE_KQEMU)96 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU) 102 97 #define TARGET_PHYS_ADDR_SPACE_BITS 36 103 98 #else … … 121 116 __attribute__((__section__(".gen_code"))) \ 122 117 __attribute__((aligned (32))) 118 #elif defined(_WIN32) 119 /* Maximum alignment for Win32 is 16. */ 120 #define code_gen_section \ 121 __attribute__((aligned (16))) 123 122 #else 124 123 #define code_gen_section \ … … 138 137 #ifndef VBOX 139 138 #if !defined(CONFIG_USER_ONLY) 140 ram_addr_t phys_ram_size;141 139 int phys_ram_fd; 142 uint8_t *phys_ram_base;143 140 uint8_t *phys_ram_dirty; 144 141 static int in_migration; 145 static ram_addr_t phys_ram_alloc_offset = 0; 142 143 typedef struct RAMBlock { 144 uint8_t *host; 145 ram_addr_t offset; 146 ram_addr_t length; 147 struct RAMBlock *next; 148 } RAMBlock; 149 150 static RAMBlock *ram_blocks; 151 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug) 152 then we can no longer assume contiguous ram offsets, and external uses 153 of this variable will break. */ 154 ram_addr_t last_ram_offset; 146 155 #endif 147 156 #else /* VBOX */ 148 RTGCPHYS phys_ram_size;149 157 /* we have memory ranges (the high PC-BIOS mapping) which 150 158 causes some pages to fall outside the dirty map here. */ … … 225 233 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; 226 234 void *io_mem_opaque[IO_MEM_NB_ENTRIES]; 227 char io_mem_used[IO_MEM_NB_ENTRIES];235 static char io_mem_used[IO_MEM_NB_ENTRIES]; 228 236 static int io_mem_watch; 229 237 #endif … … 406 414 size_t len = sizeof(PageDesc) * L2_SIZE; 407 415 /* Don't use qemu_malloc because it may recurse. */ 408 p = mmap( 0, len, PROT_READ | PROT_WRITE,416 p = mmap(NULL, len, PROT_READ | PROT_WRITE, 409 417 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 410 418 *lp = p; … … 431 439 432 440 p = *lp; 433 if (!p) 434 return 0; 441 if (!p) { 442 return NULL; 443 } 435 444 return p + (index & (L2_SIZE - 1)); 436 445 } … … 512 521 513 522 #if defined(CONFIG_USER_ONLY) 514 /* Currently it is not recomm anded to allocate big chunks of data in523 /* Currently it is not recommended to allocate big chunks of data in 515 524 user mode. It will change when a dedicated libc will be used */ 516 525 #define USE_STATIC_CODE_GEN_BUFFER 517 526 #endif 518 527 519 /* VBox allocates codegen buffer dynamically */ 520 #ifndef VBOX 528 #if defined(VBOX) && defined(USE_STATIC_CODE_GEN_BUFFER) 529 # error "VBox allocates codegen buffer dynamically" 530 #endif 531 521 532 #ifdef USE_STATIC_CODE_GEN_BUFFER 522 533 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]; 523 #endif524 534 #endif 525 535 … … 531 541 map_exec(code_gen_buffer, code_gen_buffer_size); 532 542 #else 533 # ifdef VBOX543 # ifdef VBOX 534 544 /* We cannot use phys_ram_size here, as it's 0 now, 535 545 * it only gets initialized once RAM registration callback … … 537 547 */ 538 548 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 539 # else549 # else /* !VBOX */ 540 550 code_gen_buffer_size = tb_size; 541 551 if (code_gen_buffer_size == 0) { … … 550 560 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE) 551 561 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE; 552 # endif /*VBOX */562 # endif /* !VBOX */ 553 563 /* The code gen buffer location may have constraints depending on 554 564 the host cpu and OS */ 555 # ifdef VBOX565 # ifdef VBOX 556 566 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size); 557 567 … … 561 571 return; 562 572 } 563 # else /* !VBOX */573 # else /* !VBOX */ 564 574 #if defined(__linux__) 565 575 { … … 594 604 } 595 605 } 596 #elif defined(__FreeBSD__) 606 #elif defined(__FreeBSD__) || defined(__DragonFly__) 597 607 { 598 608 int flags; … … 620 630 map_exec(code_gen_buffer, code_gen_buffer_size); 621 631 #endif 622 # endif /* !VBOX */632 # endif /* !VBOX */ 623 633 #endif /* !USE_STATIC_CODE_GEN_BUFFER */ 624 634 #ifndef VBOX … … 656 666 CPUState *env = opaque; 657 667 668 cpu_synchronize_state(env, 0); 669 658 670 qemu_put_be32s(f, &env->halted); 659 671 qemu_put_be32s(f, &env->interrupt_request); … … 669 681 qemu_get_be32s(f, &env->halted); 670 682 qemu_get_be32s(f, &env->interrupt_request); 671 env->interrupt_request &= ~CPU_INTERRUPT_EXIT; 683 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the 684 version_id is increased. */ 685 env->interrupt_request &= ~0x01; 672 686 tlb_flush(env, 1); 687 cpu_synchronize_state(env, 1); 673 688 674 689 return 0; 675 690 } 676 691 #endif 692 693 CPUState *qemu_get_cpu(int cpu) 694 { 695 CPUState *env = first_cpu; 696 697 while (env) { 698 if (env->cpu_index == cpu) 699 break; 700 env = env->next_cpu; 701 } 702 703 return env; 704 } 705 677 706 #endif /* !VBOX */ 678 707 … … 682 711 int cpu_index; 683 712 713 #if defined(CONFIG_USER_ONLY) 714 cpu_list_lock(); 715 #endif 684 716 env->next_cpu = NULL; 685 717 penv = &first_cpu; 686 718 cpu_index = 0; 687 719 while (*penv != NULL) { 688 penv = (CPUState **)&(*penv)->next_cpu;720 penv = &(*penv)->next_cpu; 689 721 cpu_index++; 690 722 } 691 723 env->cpu_index = cpu_index; 724 env->numa_node = 0; 692 725 TAILQ_INIT(&env->breakpoints); 693 726 TAILQ_INIT(&env->watchpoints); 694 727 *penv = env; 695 728 #ifndef VBOX 729 #if defined(CONFIG_USER_ONLY) 730 cpu_list_unlock(); 731 #endif 696 732 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) 697 733 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION, … … 789 825 if (!(address + TARGET_PAGE_SIZE <= tb->pc || 790 826 address >= tb->pc + tb->size)) { 791 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n", 827 printf("ERROR invalidate: address=" TARGET_FMT_lx 828 " PC=%08lx size=%04x\n", 792 829 address, (long)tb->pc, tb->size); 793 830 } … … 811 848 } 812 849 } 813 }814 }815 816 static void tb_jmp_check(TranslationBlock *tb)817 {818 TranslationBlock *tb1;819 unsigned int n1;820 821 /* suppress any remaining jumps to this TB */822 tb1 = tb->jmp_first;823 for(;;) {824 n1 = (long)tb1 & 3;825 tb1 = (TranslationBlock *)((long)tb1 & ~3);826 if (n1 == 2)827 break;828 tb1 = tb1->jmp_next[n1];829 }830 /* check end of list */831 if (tb1 != tb) {832 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);833 850 } 834 851 } … … 958 975 tb_phys_invalidate_count++; 959 976 } 960 961 977 962 978 #ifdef VBOX … … 1681 1697 if (env->singlestep_enabled != enabled) { 1682 1698 env->singlestep_enabled = enabled; 1683 /* must flush all the translated code to avoid inconsistancies */ 1684 /* XXX: only flush what is necessary */ 1685 tb_flush(env); 1699 if (kvm_enabled()) 1700 kvm_update_guest_debug(env, 0); 1701 else { 1702 /* must flush all the translated code to avoid inconsistencies */ 1703 /* XXX: only flush what is necessary */ 1704 tb_flush(env); 1705 } 1686 1706 } 1687 1707 #endif … … 1689 1709 1690 1710 #ifndef VBOX 1711 1691 1712 /* enable or disable low levels log */ 1692 1713 void cpu_set_log(int log_flags) … … 1725 1746 cpu_set_log(loglevel); 1726 1747 } 1748 1727 1749 #endif /* !VBOX */ 1728 1750 1729 /* mask must never be zero, except for A20 change call */ 1730 void cpu_interrupt(CPUState *env, int mask) 1731 { 1732 #if !defined(USE_NPTL) 1733 TranslationBlock *tb; 1734 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; 1735 #endif 1736 int old_mask; 1737 1738 if (mask & CPU_INTERRUPT_EXIT) { 1739 env->exit_request = 1; 1740 mask &= ~CPU_INTERRUPT_EXIT; 1741 } 1742 1743 old_mask = env->interrupt_request; 1744 #ifdef VBOX 1745 VM_ASSERT_EMT(env->pVM); 1746 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask); 1747 #else /* !VBOX */ 1748 /* FIXME: This is probably not threadsafe. A different thread could 1749 be in the middle of a read-modify-write operation. */ 1750 env->interrupt_request |= mask; 1751 #endif /* !VBOX */ 1751 static void cpu_unlink_tb(CPUState *env) 1752 { 1752 1753 #if defined(USE_NPTL) 1753 1754 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the … … 1756 1757 signals are used primarily to interrupt blocking syscalls. */ 1757 1758 #else 1759 TranslationBlock *tb; 1760 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; 1761 1762 tb = env->current_tb; 1763 /* if the cpu is currently executing code, we must unlink it and 1764 all the potentially executing TB */ 1765 if (tb && !testandset(&interrupt_lock)) { 1766 env->current_tb = NULL; 1767 tb_reset_jump_recursive(tb); 1768 resetlock(&interrupt_lock); 1769 } 1770 #endif 1771 } 1772 1773 /* mask must never be zero, except for A20 change call */ 1774 void cpu_interrupt(CPUState *env, int mask) 1775 { 1776 int old_mask; 1777 1778 old_mask = env->interrupt_request; 1779 #ifndef VBOX 1780 env->interrupt_request |= mask; 1781 #else /* VBOX */ 1782 VM_ASSERT_EMT(env->pVM); 1783 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask); 1784 #endif /* VBOX */ 1785 1786 #ifndef VBOX 1787 #ifndef CONFIG_USER_ONLY 1788 /* 1789 * If called from iothread context, wake the target cpu in 1790 * case its halted. 1791 */ 1792 if (!qemu_cpu_self(env)) { 1793 qemu_cpu_kick(env); 1794 return; 1795 } 1796 #endif 1797 #endif /* !VBOX */ 1798 1758 1799 if (use_icount) { 1759 1800 env->icount_decr.u16.high = 0xffff; … … 1765 1806 #endif 1766 1807 } else { 1767 tb = env->current_tb; 1768 /* if the cpu is currently executing code, we must unlink it and 1769 all the potentially executing TB */ 1770 if (tb && !testandset(&interrupt_lock)) { 1771 env->current_tb = NULL; 1772 tb_reset_jump_recursive(tb); 1773 resetlock(&interrupt_lock); 1774 } 1775 } 1776 #endif 1808 cpu_unlink_tb(env); 1809 } 1777 1810 } 1778 1811 … … 1788 1821 env->interrupt_request &= ~mask; 1789 1822 #endif /* !VBOX */ 1823 } 1824 1825 void cpu_exit(CPUState *env) 1826 { 1827 env->exit_request = 1; 1828 cpu_unlink_tb(env); 1790 1829 } 1791 1830 … … 1956 1995 } 1957 1996 1958 #ifdef VBOX1959 1997 static CPUTLBEntry s_cputlb_empty_entry = { 1960 1998 .addr_read = -1, … … 1963 2001 .addend = -1, 1964 2002 }; 1965 #endif /* VBOX */1966 2003 1967 2004 /* NOTE: if flush_global is true, also flush global entries (not … … 1979 2016 1980 2017 for(i = 0; i < CPU_TLB_SIZE; i++) { 1981 #ifdef VBOX1982 2018 int mmu_idx; 1983 2019 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1984 2020 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry; 1985 2021 } 1986 #else /* !VBOX */1987 env->tlb_table[0][i].addr_read = -1;1988 env->tlb_table[0][i].addr_write = -1;1989 env->tlb_table[0][i].addr_code = -1;1990 env->tlb_table[1][i].addr_read = -1;1991 env->tlb_table[1][i].addr_write = -1;1992 env->tlb_table[1][i].addr_code = -1;1993 #if (NB_MMU_MODES >= 3)1994 env->tlb_table[2][i].addr_read = -1;1995 env->tlb_table[2][i].addr_write = -1;1996 env->tlb_table[2][i].addr_code = -1;1997 #if (NB_MMU_MODES == 4)1998 env->tlb_table[3][i].addr_read = -1;1999 env->tlb_table[3][i].addr_write = -1;2000 env->tlb_table[3][i].addr_code = -1;2001 #endif2002 #endif2003 #endif /* !VBOX */2004 2022 } 2005 2023 2006 2024 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); 2007 2025 2026 #ifdef CONFIG_KQEMU 2027 if (env->kqemu_enabled) { 2028 kqemu_flush(env, flush_global); 2029 } 2030 #endif 2008 2031 #ifdef VBOX 2009 2032 /* inform raw mode about TLB flush */ 2010 2033 remR3FlushTLB(env, flush_global); 2011 #endif2012 #ifdef USE_KQEMU2013 if (env->kqemu_enabled) {2014 kqemu_flush(env, flush_global);2015 }2016 2034 #endif 2017 2035 tlb_flush_count++; … … 2026 2044 addr == (tlb_entry->addr_code & 2027 2045 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 2028 tlb_entry->addr_read = -1; 2029 tlb_entry->addr_write = -1; 2030 tlb_entry->addr_code = -1; 2046 *tlb_entry = s_cputlb_empty_entry; 2031 2047 } 2032 2048 } … … 2035 2051 { 2036 2052 int i; 2053 int mmu_idx; 2037 2054 2038 2055 #if defined(DEBUG_TLB) … … 2045 2062 addr &= TARGET_PAGE_MASK; 2046 2063 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 2047 tlb_flush_entry(&env->tlb_table[0][i], addr); 2048 tlb_flush_entry(&env->tlb_table[1][i], addr); 2049 #if (NB_MMU_MODES >= 3) 2050 tlb_flush_entry(&env->tlb_table[2][i], addr); 2051 #if (NB_MMU_MODES == 4) 2052 tlb_flush_entry(&env->tlb_table[3][i], addr); 2053 #endif 2054 #endif 2064 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) 2065 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); 2055 2066 2056 2067 tlb_flush_jmp_cache(env, addr); 2057 2068 2058 #ifdef USE_KQEMU2069 #ifdef CONFIG_KQEMU 2059 2070 if (env->kqemu_enabled) { 2060 2071 kqemu_flush_page(env, addr); … … 2104 2115 } 2105 2116 2117 /* Note: start and end must be within the same ram block. */ 2106 2118 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, 2107 2119 int dirty_flags) … … 2119 2131 return; 2120 2132 len = length >> TARGET_PAGE_BITS; 2121 #ifdef USE_KQEMU2133 #ifdef CONFIG_KQEMU 2122 2134 /* XXX: should not depend on cpu context */ 2123 2135 env = first_cpu; … … 2144 2156 start1 = start; 2145 2157 #elif !defined(VBOX) 2146 start1 = start + (unsigned long)phys_ram_base; 2158 start1 = (unsigned long)qemu_get_ram_ptr(start); 2159 /* Chek that we don't span multiple blocks - this breaks the 2160 address comparisons below. */ 2161 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1 2162 != (end - 1) - start) { 2163 abort(); 2164 } 2147 2165 #else 2148 2166 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */ 2149 2167 #endif 2168 2150 2169 for(env = first_cpu; env != NULL; env = env->next_cpu) { 2151 for(i = 0; i < CPU_TLB_SIZE; i++) 2152 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); 2153 for(i = 0; i < CPU_TLB_SIZE; i++) 2154 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); 2155 #if (NB_MMU_MODES >= 3) 2156 for(i = 0; i < CPU_TLB_SIZE; i++) 2157 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length); 2158 #if (NB_MMU_MODES == 4) 2159 for(i = 0; i < CPU_TLB_SIZE; i++) 2160 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length); 2161 #endif 2162 #endif 2170 int mmu_idx; 2171 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 2172 for(i = 0; i < CPU_TLB_SIZE; i++) 2173 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], 2174 start1, length); 2175 } 2163 2176 } 2164 2177 } … … 2168 2181 { 2169 2182 in_migration = enable; 2183 if (kvm_enabled()) { 2184 return kvm_set_migration_log(enable); 2185 } 2170 2186 return 0; 2171 2187 } … … 2177 2193 #endif /* !VBOX */ 2178 2194 2179 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr) 2180 { 2195 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, 2196 target_phys_addr_t end_addr) 2197 { 2198 int ret = 0; 2199 2181 2200 if (kvm_enabled()) 2182 kvm_physical_sync_dirty_bitmap(start_addr, end_addr); 2201 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr); 2202 return ret; 2183 2203 } 2184 2204 … … 2190 2210 { 2191 2211 ram_addr_t ram_addr; 2212 void *p; 2192 2213 2193 2214 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { 2194 /* RAM case */2195 2215 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 2196 2216 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; 2197 2217 #elif !defined(VBOX) 2198 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 2199 tlb_entry->addend - (unsigned long)phys_ram_base; 2218 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK) 2219 + tlb_entry->addend); 2220 ram_addr = qemu_ram_addr_from_host(p); 2200 2221 #else 2201 2222 Assert(phys_addend != -1); … … 2212 2233 { 2213 2234 int i; 2235 int mmu_idx; 2236 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 2237 for(i = 0; i < CPU_TLB_SIZE; i++) 2214 2238 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 2215 for(i = 0; i < CPU_TLB_SIZE; i++) 2216 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]); 2217 for(i = 0; i < CPU_TLB_SIZE; i++) 2218 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]); 2219 # if (NB_MMU_MODES >= 3) 2220 for(i = 0; i < CPU_TLB_SIZE; i++) 2221 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]); 2222 # if (NB_MMU_MODES == 4) 2223 for(i = 0; i < CPU_TLB_SIZE; i++) 2224 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]); 2225 # endif 2226 # endif 2227 #else /* VBOX */ 2228 for(i = 0; i < CPU_TLB_SIZE; i++) 2229 tlb_update_dirty(&env->tlb_table[0][i]); 2230 for(i = 0; i < CPU_TLB_SIZE; i++) 2231 tlb_update_dirty(&env->tlb_table[1][i]); 2232 #if (NB_MMU_MODES >= 3) 2233 for(i = 0; i < CPU_TLB_SIZE; i++) 2234 tlb_update_dirty(&env->tlb_table[2][i]); 2235 #if (NB_MMU_MODES == 4) 2236 for(i = 0; i < CPU_TLB_SIZE; i++) 2237 tlb_update_dirty(&env->tlb_table[3][i]); 2238 #endif 2239 #endif 2240 #endif /* VBOX */ 2239 tlb_update_dirty(&env->tlb_table[mmu_idx][i], env->phys_addends[mmu_idx][i]); 2240 #else 2241 tlb_update_dirty(&env->tlb_table[mmu_idx][i]); 2242 #endif 2243 } 2241 2244 } 2242 2245 … … 2252 2255 { 2253 2256 int i; 2257 int mmu_idx; 2254 2258 2255 2259 vaddr &= TARGET_PAGE_MASK; 2256 2260 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 2257 tlb_set_dirty1(&env->tlb_table[0][i], vaddr); 2258 tlb_set_dirty1(&env->tlb_table[1][i], vaddr); 2259 #if (NB_MMU_MODES >= 3) 2260 tlb_set_dirty1(&env->tlb_table[2][i], vaddr); 2261 #if (NB_MMU_MODES == 4) 2262 tlb_set_dirty1(&env->tlb_table[3][i], vaddr); 2263 #endif 2264 #endif 2261 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) 2262 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); 2265 2263 } 2266 2264 … … 2307 2305 addend = pd & TARGET_PAGE_MASK; 2308 2306 #elif !defined(VBOX) 2309 addend = (unsigned long) phys_ram_base +(pd & TARGET_PAGE_MASK);2307 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK); 2310 2308 #else 2311 2309 /** @todo this is racing the phys_page_find call above since it may register 2312 2310 * a new chunk of memory... */ 2313 addend = (unsigned long)remR3TlbGCPhys2Ptr(env, 2314 pd & TARGET_PAGE_MASK, 2315 !!(prot & PAGE_WRITE)); 2311 addend = (unsigned long)remR3TlbGCPhys2Ptr(env, pd & TARGET_PAGE_MASK, !!(prot & PAGE_WRITE)); 2316 2312 #endif 2317 2313 … … 2324 2320 iotlb |= IO_MEM_ROM; 2325 2321 } else { 2326 /* IO handlers are currently passed a ph sical address.2322 /* IO handlers are currently passed a physical address. 2327 2323 It would be nice to pass an offset from the base address 2328 2324 of that region. This would avoid having to special case RAM, … … 2345 2341 2346 2342 code_address = address; 2347 2348 2343 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 2344 2349 2345 if (addend & 0x3) 2350 2346 { … … 2369 2365 addend &= ~(target_ulong)0x3; 2370 2366 } 2371 #endif 2372 2367 2368 #endif 2373 2369 /* Make accesses to pages with watchpoints go via the 2374 2370 watchpoint trap routines. */ … … 2448 2444 2449 2445 #ifndef VBOX 2450 /* dump memory mappings */ 2451 void page_dump(FILE *f) 2446 2447 /* 2448 * Walks guest process memory "regions" one by one 2449 * and calls callback function 'fn' for each region. 2450 */ 2451 int walk_memory_regions(void *priv, 2452 int (*fn)(void *, unsigned long, unsigned long, unsigned long)) 2452 2453 { 2453 2454 unsigned long start, end; 2455 PageDesc *p = NULL; 2454 2456 int i, j, prot, prot1; 2455 PageDesc *p; 2456 2457 fprintf(f, "%-8s %-8s %-8s %s\n", 2458 "start", "end", "size", "prot"); 2459 start = -1; 2460 end = -1; 2457 int rc = 0; 2458 2459 start = end = -1; 2461 2460 prot = 0; 2462 for(i = 0; i <= L1_SIZE; i++) { 2463 if (i < L1_SIZE) 2464 p = l1_map[i]; 2465 else 2466 p = NULL; 2467 for(j = 0;j < L2_SIZE; j++) { 2468 if (!p) 2469 prot1 = 0; 2470 else 2471 prot1 = p[j].flags; 2461 2462 for (i = 0; i <= L1_SIZE; i++) { 2463 p = (i < L1_SIZE) ? l1_map[i] : NULL; 2464 for (j = 0; j < L2_SIZE; j++) { 2465 prot1 = (p == NULL) ? 0 : p[j].flags; 2466 /* 2467 * "region" is one continuous chunk of memory 2468 * that has same protection flags set. 2469 */ 2472 2470 if (prot1 != prot) { 2473 2471 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); 2474 2472 if (start != -1) { 2475 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", 2476 start, end, end - start, 2477 prot & PAGE_READ ? 'r' : '-', 2478 prot & PAGE_WRITE ? 'w' : '-', 2479 prot & PAGE_EXEC ? 'x' : '-'); 2473 rc = (*fn)(priv, start, end, prot); 2474 /* callback can stop iteration by returning != 0 */ 2475 if (rc != 0) 2476 return (rc); 2480 2477 } 2481 2478 if (prot1 != 0) … … 2485 2482 prot = prot1; 2486 2483 } 2487 if ( !p)2484 if (p == NULL) 2488 2485 break; 2489 2486 } 2490 2487 } 2491 } 2488 return (rc); 2489 } 2490 2491 static int dump_region(void *priv, unsigned long start, 2492 unsigned long end, unsigned long prot) 2493 { 2494 FILE *f = (FILE *)priv; 2495 2496 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", 2497 start, end, end - start, 2498 ((prot & PAGE_READ) ? 'r' : '-'), 2499 ((prot & PAGE_WRITE) ? 'w' : '-'), 2500 ((prot & PAGE_EXEC) ? 'x' : '-')); 2501 2502 return (0); 2503 } 2504 2505 /* dump memory mappings */ 2506 void page_dump(FILE *f) 2507 { 2508 (void) fprintf(f, "%-8s %-8s %-8s %s\n", 2509 "start", "end", "size", "prot"); 2510 walk_memory_regions(f, dump_region); 2511 } 2512 2492 2513 #endif /* !VBOX */ 2493 2514 … … 2503 2524 2504 2525 /* modify the flags of a page and invalidate the code if 2505 necessary. The flag PAGE_WRITE_ORG is position ned automatically2526 necessary. The flag PAGE_WRITE_ORG is positioned automatically 2506 2527 depending on PAGE_WRITE */ 2507 2528 void page_set_flags(target_ulong start, target_ulong end, int flags) … … 2573 2594 2574 2595 /* called from signal handler: invalidate the code and unprotect the 2575 page. Return TRUE if the fault was succes fully handled. */2596 page. Return TRUE if the fault was successfully handled. */ 2576 2597 int page_unprotect(target_ulong address, unsigned long pc, void *puc) 2577 2598 { … … 2657 2678 io memory page. The address used when calling the IO function is 2658 2679 the offset from the start of the region, plus region_offset. Both 2659 start_ region and regon_offset are rounded down to a page boundary2680 start_addr and region_offset are rounded down to a page boundary 2660 2681 before calculating this offset. This should not be a problem unless 2661 2682 the low bits of start_addr and region_offset differ. */ … … 2671 2692 void *subpage; 2672 2693 2673 #ifdef USE_KQEMU2694 #ifdef CONFIG_KQEMU 2674 2695 /* XXX: should not depend on cpu context */ 2675 2696 env = first_cpu; … … 2773 2794 } 2774 2795 2796 #ifdef CONFIG_KQEMU 2775 2797 /* XXX: better than nothing */ 2798 static ram_addr_t kqemu_ram_alloc(ram_addr_t size) 2799 { 2800 ram_addr_t addr; 2801 if ((last_ram_offset + size) > kqemu_phys_ram_size) { 2802 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n", 2803 (uint64_t)size, (uint64_t)kqemu_phys_ram_size); 2804 abort(); 2805 } 2806 addr = last_ram_offset; 2807 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size); 2808 return addr; 2809 } 2810 #endif 2811 2776 2812 ram_addr_t qemu_ram_alloc(ram_addr_t size) 2777 2813 { 2778 ram_addr_t addr; 2779 if ((phys_ram_alloc_offset + size) > phys_ram_size) { 2780 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n", 2781 (uint64_t)size, (uint64_t)phys_ram_size); 2814 RAMBlock *new_block; 2815 2816 #ifdef CONFIG_KQEMU 2817 if (kqemu_phys_ram_base) { 2818 return kqemu_ram_alloc(size); 2819 } 2820 #endif 2821 2822 size = TARGET_PAGE_ALIGN(size); 2823 new_block = qemu_malloc(sizeof(*new_block)); 2824 2825 new_block->host = qemu_vmalloc(size); 2826 new_block->offset = last_ram_offset; 2827 new_block->length = size; 2828 2829 new_block->next = ram_blocks; 2830 ram_blocks = new_block; 2831 2832 phys_ram_dirty = qemu_realloc(phys_ram_dirty, 2833 (last_ram_offset + size) >> TARGET_PAGE_BITS); 2834 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS), 2835 0xff, size >> TARGET_PAGE_BITS); 2836 2837 last_ram_offset += size; 2838 2839 if (kvm_enabled()) 2840 kvm_setup_guest_memory(new_block->host, size); 2841 2842 return new_block->offset; 2843 } 2844 2845 void qemu_ram_free(ram_addr_t addr) 2846 { 2847 /* TODO: implement this. */ 2848 } 2849 2850 /* Return a host pointer to ram allocated with qemu_ram_alloc. 2851 With the exception of the softmmu code in this file, this should 2852 only be used for local memory (e.g. video ram) that the device owns, 2853 and knows it isn't going to access beyond the end of the block. 2854 2855 It should not be used for general purpose DMA. 2856 Use cpu_physical_memory_map/cpu_physical_memory_rw instead. 2857 */ 2858 void *qemu_get_ram_ptr(ram_addr_t addr) 2859 { 2860 RAMBlock *prev; 2861 RAMBlock **prevp; 2862 RAMBlock *block; 2863 2864 #ifdef CONFIG_KQEMU 2865 if (kqemu_phys_ram_base) { 2866 return kqemu_phys_ram_base + addr; 2867 } 2868 #endif 2869 2870 prev = NULL; 2871 prevp = &ram_blocks; 2872 block = ram_blocks; 2873 while (block && (block->offset > addr 2874 || block->offset + block->length <= addr)) { 2875 if (prev) 2876 prevp = &prev->next; 2877 prev = block; 2878 block = block->next; 2879 } 2880 if (!block) { 2881 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 2782 2882 abort(); 2783 2883 } 2784 addr = phys_ram_alloc_offset; 2785 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size); 2786 return addr; 2787 } 2788 2789 void qemu_ram_free(ram_addr_t addr) 2790 { 2791 } 2884 /* Move this entry to to start of the list. */ 2885 if (prev) { 2886 prev->next = block->next; 2887 block->next = *prevp; 2888 *prevp = block; 2889 } 2890 return block->host + (addr - block->offset); 2891 } 2892 2893 /* Some of the softmmu routines need to translate from a host pointer 2894 (typically a TLB entry) back to a ram offset. */ 2895 ram_addr_t qemu_ram_addr_from_host(void *ptr) 2896 { 2897 RAMBlock *prev; 2898 RAMBlock **prevp; 2899 RAMBlock *block; 2900 uint8_t *host = ptr; 2901 2902 #ifdef CONFIG_KQEMU 2903 if (kqemu_phys_ram_base) { 2904 return host - kqemu_phys_ram_base; 2905 } 2906 #endif 2907 2908 prev = NULL; 2909 prevp = &ram_blocks; 2910 block = ram_blocks; 2911 while (block && (block->host > host 2912 || block->host + block->length <= host)) { 2913 if (prev) 2914 prevp = &prev->next; 2915 prev = block; 2916 block = block->next; 2917 } 2918 if (!block) { 2919 fprintf(stderr, "Bad ram pointer %p\n", ptr); 2920 abort(); 2921 } 2922 return block->offset + (host - block->host); 2923 } 2924 2792 2925 #endif /* !VBOX */ 2793 2926 … … 2891 3024 remR3PhysWriteU8(ram_addr, val); 2892 3025 #else 2893 stb_p( phys_ram_base + ram_addr, val);2894 #endif 2895 #ifdef USE_KQEMU3026 stb_p(qemu_get_ram_ptr(ram_addr), val); 3027 #endif 3028 #ifdef CONFIG_KQEMU 2896 3029 if (cpu_single_env->kqemu_enabled && 2897 3030 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) … … 2933 3066 remR3PhysWriteU16(ram_addr, val); 2934 3067 #else 2935 stw_p( phys_ram_base + ram_addr, val);2936 #endif 2937 #ifdef USE_KQEMU3068 stw_p(qemu_get_ram_ptr(ram_addr), val); 3069 #endif 3070 #ifdef CONFIG_KQEMU 2938 3071 if (cpu_single_env->kqemu_enabled && 2939 3072 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) … … 2975 3108 remR3PhysWriteU32(ram_addr, val); 2976 3109 #else 2977 stl_p( phys_ram_base + ram_addr, val);2978 #endif 2979 #ifdef USE_KQEMU3110 stl_p(qemu_get_ram_ptr(ram_addr), val); 3111 #endif 3112 #ifdef CONFIG_KQEMU 2980 3113 if (cpu_single_env->kqemu_enabled && 2981 3114 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) … … 3213 3346 eidx = SUBPAGE_IDX(end); 3214 3347 #if defined(DEBUG_SUBPAGE) 3215 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem % d\n", __func__,3348 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__, 3216 3349 mmio, start, end, idx, eidx, memory); 3217 3350 #endif … … 3244 3377 3245 3378 mmio->base = base; 3246 subpage_memory = cpu_register_io_memory( 0,subpage_read, subpage_write, mmio);3379 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio); 3247 3380 #if defined(DEBUG_SUBPAGE) 3248 3381 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, … … 3269 3402 } 3270 3403 3271 static void io_mem_init(void)3272 {3273 int i;3274 3275 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);3276 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);3277 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);3278 for (i=0; i<5; i++)3279 io_mem_used[i] = 1;3280 3281 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,3282 watch_mem_write, NULL);3283 3284 #ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */3285 /* alloc dirty bits array */3286 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);3287 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);3288 #endif /* !VBOX */3289 }3290 3291 3404 /* mem_read and mem_write are arrays of functions containing the 3292 3405 function to access byte (index 0), word (index 1) and dword (index 3293 2). Functions can be omitted with a NULL function pointer. The 3294 registered functions may be modified dynamically later. 3406 2). Functions can be omitted with a NULL function pointer. 3295 3407 If io_index is non zero, the corresponding io zone is 3296 3408 modified. If it is zero, a new io zone is allocated. The return 3297 3409 value can be used with cpu_register_physical_memory(). (-1) is 3298 3410 returned if error. */ 3299 int cpu_register_io_memory(int io_index,3300 CPUReadMemoryFunc **mem_read,3301 CPUWriteMemoryFunc **mem_write,3302 void *opaque)3411 static int cpu_register_io_memory_fixed(int io_index, 3412 CPUReadMemoryFunc **mem_read, 3413 CPUWriteMemoryFunc **mem_write, 3414 void *opaque) 3303 3415 { 3304 3416 int i, subwidth = 0; … … 3309 3421 return io_index; 3310 3422 } else { 3423 io_index >>= IO_MEM_SHIFT; 3311 3424 if (io_index >= IO_MEM_NB_ENTRIES) 3312 3425 return -1; … … 3323 3436 } 3324 3437 3438 int cpu_register_io_memory(CPUReadMemoryFunc **mem_read, 3439 CPUWriteMemoryFunc **mem_write, 3440 void *opaque) 3441 { 3442 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque); 3443 } 3444 3325 3445 void cpu_unregister_io_memory(int io_table_address) 3326 3446 { … … 3336 3456 } 3337 3457 3338 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index) 3339 { 3340 return io_mem_write[io_index >> IO_MEM_SHIFT]; 3341 } 3342 3343 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index) 3344 { 3345 return io_mem_read[io_index >> IO_MEM_SHIFT]; 3458 static void io_mem_init(void) 3459 { 3460 int i; 3461 3462 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL); 3463 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL); 3464 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL); 3465 for (i=0; i<5; i++) 3466 io_mem_used[i] = 1; 3467 3468 io_mem_watch = cpu_register_io_memory(watch_mem_read, 3469 watch_mem_write, NULL); 3470 #ifdef CONFIG_KQEMU 3471 if (kqemu_phys_ram_base) { 3472 /* alloc dirty bits array */ 3473 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS); 3474 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS); 3475 } 3476 #endif 3346 3477 } 3347 3478 … … 3456 3587 remR3PhysWrite(addr1, buf, l); NOREF(ptr); 3457 3588 #else 3458 ptr = phys_ram_base + addr1;3589 ptr = qemu_get_ram_ptr(addr1); 3459 3590 memcpy(ptr, buf, l); 3460 3591 #endif … … 3511 3642 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr); 3512 3643 #else 3513 ptr = phys_ram_base +(pd & TARGET_PAGE_MASK) +3644 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 3514 3645 (addr & ~TARGET_PAGE_MASK); 3515 3646 memcpy(buf, ptr, l); … … 3555 3686 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3556 3687 /* ROM/RAM case */ 3557 ptr = phys_ram_base + addr1;3688 ptr = qemu_get_ram_ptr(addr1); 3558 3689 memcpy(ptr, buf, l); 3559 3690 } … … 3596 3727 3597 3728 LIST_REMOVE(client, link); 3729 qemu_free(client); 3598 3730 } 3599 3731 … … 3605 3737 client = LIST_FIRST(&map_client_list); 3606 3738 client->callback(client->opaque); 3607 LIST_REMOVE(client, link);3739 cpu_unregister_map_client(client); 3608 3740 } 3609 3741 } … … 3655 3787 } else { 3656 3788 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3657 ptr = phys_ram_base + addr1;3789 ptr = qemu_get_ram_ptr(addr1); 3658 3790 } 3659 3791 if (!done) { … … 3680 3812 if (buffer != bounce.buffer) { 3681 3813 if (is_write) { 3682 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;3814 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer); 3683 3815 while (access_len) { 3684 3816 unsigned l; … … 3735 3867 /* RAM case */ 3736 3868 #ifndef VBOX 3737 ptr = phys_ram_base +(pd & TARGET_PAGE_MASK) +3869 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 3738 3870 (addr & ~TARGET_PAGE_MASK); 3739 3871 val = ldl_p(ptr); … … 3777 3909 /* RAM case */ 3778 3910 #ifndef VBOX 3779 ptr = phys_ram_base +(pd & TARGET_PAGE_MASK) +3911 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 3780 3912 (addr & ~TARGET_PAGE_MASK); 3781 3913 val = ldq_p(ptr); … … 3828 3960 #ifndef VBOX 3829 3961 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3830 ptr = phys_ram_base + addr1;3962 ptr = qemu_get_ram_ptr(addr1); 3831 3963 stl_p(ptr, val); 3832 3964 #else … … 3875 4007 } else { 3876 4008 #ifndef VBOX 3877 ptr = phys_ram_base +(pd & TARGET_PAGE_MASK) +4009 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 3878 4010 (addr & ~TARGET_PAGE_MASK); 3879 4011 stq_p(ptr, val); … … 3909 4041 /* RAM case */ 3910 4042 #ifndef VBOX 3911 ptr = phys_ram_base + addr1;4043 ptr = qemu_get_ram_ptr(addr1); 3912 4044 stl_p(ptr, val); 3913 4045 #else … … 3950 4082 #endif 3951 4083 3952 /* virtual memory access for debug */ 4084 #ifndef VBOX 4085 /* virtual memory access for debug (includes writing to ROM) */ 3953 4086 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 3954 4087 uint8_t *buf, int len, int is_write) … … 3967 4100 if (l > len) 3968 4101 l = len; 3969 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 3970 buf, l, is_write); 4102 phys_addr += (addr & ~TARGET_PAGE_MASK); 4103 #if !defined(CONFIG_USER_ONLY) 4104 if (is_write) 4105 cpu_physical_memory_write_rom(phys_addr, buf, l); 4106 else 4107 #endif 4108 cpu_physical_memory_rw(phys_addr, buf, l, is_write); 3971 4109 len -= l; 3972 4110 buf += l; … … 3975 4113 return 0; 3976 4114 } 4115 #endif /* !VBOX */ 3977 4116 3978 4117 /* in deterministic execution mode, instructions doing device I/Os
Note:
See TracChangeset
for help on using the changeset viewer.