Changeset 36170 in vbox for trunk/src/recompiler/exec.c
- Timestamp:
- Mar 4, 2011 12:49:02 PM (14 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler/exec.c
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 61 61 #endif 62 62 #include "osdep.h" 63 #include "kvm.h" 63 64 #if defined(CONFIG_USER_ONLY) 64 65 #include <qemu.h> … … 149 150 causes some pages to fall outside the dirty map here. */ 150 151 RTGCPHYS phys_ram_dirty_size; 152 uint8_t *phys_ram_dirty; 151 153 #endif /* VBOX */ 152 #if !defined(VBOX)153 uint8_t *phys_ram_base;154 #endif155 uint8_t *phys_ram_dirty;156 154 157 155 CPUState *first_cpu; … … 182 180 /* offset in host memory of the page + io_index in the low bits */ 183 181 ram_addr_t phys_offset; 182 ram_addr_t region_offset; 184 183 } PhysPageDesc; 185 184 … … 226 225 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; 227 226 void *io_mem_opaque[IO_MEM_NB_ENTRIES]; 228 static int io_mem_nb;227 char io_mem_used[IO_MEM_NB_ENTRIES]; 229 228 static int io_mem_watch; 230 229 #endif … … 257 256 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4]; 258 257 void *opaque[TARGET_PAGE_SIZE][2][4]; 258 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4]; 259 259 } subpage_t; 260 260 … … 305 305 { 306 306 SYSTEM_INFO system_info; 307 DWORD old_protect;308 307 309 308 GetSystemInfo(&system_info); … … 314 313 #endif 315 314 #endif /* !VBOX */ 316 317 315 if (qemu_host_page_size == 0) 318 316 qemu_host_page_size = qemu_real_host_page_size; … … 406 404 /* allocate if not found */ 407 405 #if defined(CONFIG_USER_ONLY) 408 unsigned long addr;409 406 size_t len = sizeof(PageDesc) * L2_SIZE; 410 407 /* Don't use qemu_malloc because it may recurse. */ … … 412 409 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 413 410 *lp = p; 414 addr = h2g(p);415 if (addr == (target_ulong)addr) {411 if (h2g_valid(p)) { 412 unsigned long addr = h2g(p); 416 413 page_set_flags(addr & TARGET_PAGE_MASK, 417 414 TARGET_PAGE_ALIGN(addr + len), … … 486 483 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); 487 484 *lp = pd; 488 for (i = 0; i < L2_SIZE; i++) 485 for (i = 0; i < L2_SIZE; i++) { 489 486 pd[i].phys_offset = IO_MEM_UNASSIGNED; 487 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS; 488 } 490 489 } 491 490 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); … … 545 544 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 546 545 #else 547 /* XXX: needs a djustments */546 /* XXX: needs ajustments */ 548 547 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4); 549 548 #endif 550 551 549 } 552 550 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE) 553 551 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE; 554 552 #endif /* VBOX */ 555 556 553 /* The code gen buffer location may have constraints depending on 557 554 the host cpu and OS */ … … 564 561 return; 565 562 } 566 #else //!VBOX563 #else /* !VBOX */ 567 564 #if defined(__linux__) 568 565 { … … 582 579 if (code_gen_buffer_size > (512 * 1024 * 1024)) 583 580 code_gen_buffer_size = (512 * 1024 * 1024); 581 #elif defined(__arm__) 582 /* Map the buffer below 32M, so we can use direct calls and branches */ 583 flags |= MAP_FIXED; 584 start = (void *) 0x01000000UL; 585 if (code_gen_buffer_size > 16 * 1024 * 1024) 586 code_gen_buffer_size = 16 * 1024 * 1024; 584 587 #endif 585 588 code_gen_buffer = mmap(start, code_gen_buffer_size, … … 615 618 #else 616 619 code_gen_buffer = qemu_malloc(code_gen_buffer_size); 617 if (!code_gen_buffer) {618 fprintf(stderr, "Could not allocate dynamic translator buffer\n");619 exit(1);620 }621 620 map_exec(code_gen_buffer, code_gen_buffer_size); 622 621 #endif 623 map_exec(code_gen_prologue, sizeof(code_gen_prologue));624 622 #endif /* !VBOX */ 625 623 #endif /* !USE_STATIC_CODE_GEN_BUFFER */ … … 629 627 map_exec(code_gen_prologue, _1K); 630 628 #endif 631 632 629 code_gen_buffer_max_size = code_gen_buffer_size - 633 630 code_gen_max_block_size(); … … 677 674 } 678 675 #endif 679 #endif / /!VBOX676 #endif /* !VBOX */ 680 677 681 678 void cpu_exec_init(CPUState *env) … … 692 689 } 693 690 env->cpu_index = cpu_index; 694 env->nb_watchpoints = 0; 691 TAILQ_INIT(&env->breakpoints); 692 TAILQ_INIT(&env->watchpoints); 695 693 *penv = env; 696 694 #ifndef VBOX … … 701 699 cpu_save, cpu_load, env); 702 700 #endif 703 #endif / / !VBOX701 #endif /* !VBOX */ 704 702 } 705 703 … … 780 778 781 779 #ifdef DEBUG_TB_CHECK 780 782 781 static void tb_invalidate_check(target_ulong address) 783 782 { … … 1040 1039 1041 1040 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8); 1042 if (!p->code_bitmap)1043 return;1044 1041 1045 1042 tb = p->first_tb; … … 1109 1106 int is_cpu_write_access) 1110 1107 { 1111 int n, current_tb_modified, current_tb_not_found, current_flags;1108 TranslationBlock *tb, *tb_next, *saved_tb; 1112 1109 CPUState *env = cpu_single_env; 1110 target_ulong tb_start, tb_end; 1113 1111 PageDesc *p; 1114 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb; 1115 target_ulong tb_start, tb_end; 1116 target_ulong current_pc, current_cs_base; 1112 int n; 1113 #ifdef TARGET_HAS_PRECISE_SMC 1114 int current_tb_not_found = is_cpu_write_access; 1115 TranslationBlock *current_tb = NULL; 1116 int current_tb_modified = 0; 1117 target_ulong current_pc = 0; 1118 target_ulong current_cs_base = 0; 1119 int current_flags = 0; 1120 #endif /* TARGET_HAS_PRECISE_SMC */ 1117 1121 1118 1122 p = page_find(start >> TARGET_PAGE_BITS); … … 1128 1132 /* we remove all the TBs in the range [start, end[ */ 1129 1133 /* XXX: see if in some cases it could be faster to invalidate all the code */ 1130 current_tb_not_found = is_cpu_write_access;1131 current_tb_modified = 0;1132 current_tb = NULL; /* avoid warning */1133 current_pc = 0; /* avoid warning */1134 current_cs_base = 0; /* avoid warning */1135 current_flags = 0; /* avoid warning */1136 1134 tb = p->first_tb; 1137 1135 while (tb != NULL) { … … 1170 1168 cpu_restore_state(current_tb, env, 1171 1169 env->mem_io_pc, NULL); 1172 #if defined(TARGET_I386) 1173 current_flags = env->hflags; 1174 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 1175 current_cs_base = (target_ulong)env->segs[R_CS].base; 1176 current_pc = current_cs_base + env->eip; 1177 #else 1178 #error unsupported CPU 1179 #endif 1170 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 1171 ¤t_flags); 1180 1172 } 1181 1173 #endif /* TARGET_HAS_PRECISE_SMC */ … … 1217 1209 } 1218 1210 1219 1220 1211 /* len must be <= 8 and start must be a multiple of len */ 1221 1212 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len) … … 1225 1216 #if 0 1226 1217 if (1) { 1227 if (loglevel) { 1228 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 1229 cpu_single_env->mem_io_vaddr, len, 1230 cpu_single_env->eip, 1231 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); 1232 } 1218 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 1219 cpu_single_env->mem_io_vaddr, len, 1220 cpu_single_env->eip, 1221 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); 1233 1222 } 1234 1223 #endif … … 1247 1236 } 1248 1237 1249 1250 1238 #if !defined(CONFIG_SOFTMMU) 1251 1239 static void tb_invalidate_phys_page(target_phys_addr_t addr, 1252 1240 unsigned long pc, void *puc) 1253 1241 { 1254 int n, current_flags, current_tb_modified; 1255 target_ulong current_pc, current_cs_base; 1242 TranslationBlock *tb; 1256 1243 PageDesc *p; 1257 TranslationBlock *tb, *current_tb;1244 int n; 1258 1245 #ifdef TARGET_HAS_PRECISE_SMC 1246 TranslationBlock *current_tb = NULL; 1259 1247 CPUState *env = cpu_single_env; 1248 int current_tb_modified = 0; 1249 target_ulong current_pc = 0; 1250 target_ulong current_cs_base = 0; 1251 int current_flags = 0; 1260 1252 #endif 1261 1253 … … 1265 1257 return; 1266 1258 tb = p->first_tb; 1267 current_tb_modified = 0;1268 current_tb = NULL;1269 current_pc = 0; /* avoid warning */1270 current_cs_base = 0; /* avoid warning */1271 current_flags = 0; /* avoid warning */1272 1259 #ifdef TARGET_HAS_PRECISE_SMC 1273 1260 if (tb && pc != 0) { … … 1289 1276 current_tb_modified = 1; 1290 1277 cpu_restore_state(current_tb, env, pc, puc); 1291 #if defined(TARGET_I386) 1292 current_flags = env->hflags; 1293 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 1294 current_cs_base = (target_ulong)env->segs[R_CS].base; 1295 current_pc = current_cs_base + env->eip; 1296 #else 1297 #error unsupported CPU 1298 #endif 1278 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 1279 ¤t_flags); 1299 1280 } 1300 1281 #endif /* TARGET_HAS_PRECISE_SMC */ … … 1540 1521 1541 1522 /* Add a watchpoint. */ 1542 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type) 1543 { 1544 int i; 1545 1546 for (i = 0; i < env->nb_watchpoints; i++) { 1547 if (addr == env->watchpoint[i].vaddr) 1523 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, 1524 int flags, CPUWatchpoint **watchpoint) 1525 { 1526 target_ulong len_mask = ~(len - 1); 1527 CPUWatchpoint *wp; 1528 1529 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */ 1530 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) { 1531 fprintf(stderr, "qemu: tried to set invalid watchpoint at " 1532 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len); 1533 #ifndef VBOX 1534 return -EINVAL; 1535 #else 1536 return VERR_INVALID_PARAMETER; 1537 #endif 1538 } 1539 wp = qemu_malloc(sizeof(*wp)); 1540 1541 wp->vaddr = addr; 1542 wp->len_mask = len_mask; 1543 wp->flags = flags; 1544 1545 /* keep all GDB-injected watchpoints in front */ 1546 if (flags & BP_GDB) 1547 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry); 1548 else 1549 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry); 1550 1551 tlb_flush_page(env, addr); 1552 1553 if (watchpoint) 1554 *watchpoint = wp; 1555 return 0; 1556 } 1557 1558 /* Remove a specific watchpoint. */ 1559 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len, 1560 int flags) 1561 { 1562 target_ulong len_mask = ~(len - 1); 1563 CPUWatchpoint *wp; 1564 1565 TAILQ_FOREACH(wp, &env->watchpoints, entry) { 1566 if (addr == wp->vaddr && len_mask == wp->len_mask 1567 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { 1568 cpu_watchpoint_remove_by_ref(env, wp); 1548 1569 return 0; 1549 } 1550 if (env->nb_watchpoints >= MAX_WATCHPOINTS) 1551 return -1; 1552 1553 i = env->nb_watchpoints++; 1554 env->watchpoint[i].vaddr = addr; 1555 env->watchpoint[i].type = type; 1556 tlb_flush_page(env, addr); 1557 /* FIXME: This flush is needed because of the hack to make memory ops 1558 terminate the TB. It can be removed once the proper IO trap and 1559 re-execute bits are in. */ 1560 tb_flush(env); 1561 return i; 1562 } 1563 1564 /* Remove a watchpoint. */ 1565 int cpu_watchpoint_remove(CPUState *env, target_ulong addr) 1566 { 1567 int i; 1568 1569 for (i = 0; i < env->nb_watchpoints; i++) { 1570 if (addr == env->watchpoint[i].vaddr) { 1571 env->nb_watchpoints--; 1572 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints]; 1573 tlb_flush_page(env, addr); 1570 } 1571 } 1572 #ifndef VBOX 1573 return -ENOENT; 1574 #else 1575 return VERR_NOT_FOUND; 1576 #endif 1577 } 1578 1579 /* Remove a specific watchpoint by reference. */ 1580 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint) 1581 { 1582 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry); 1583 1584 tlb_flush_page(env, watchpoint->vaddr); 1585 1586 qemu_free(watchpoint); 1587 } 1588 1589 /* Remove all matching watchpoints. */ 1590 void cpu_watchpoint_remove_all(CPUState *env, int mask) 1591 { 1592 CPUWatchpoint *wp, *next; 1593 1594 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) { 1595 if (wp->flags & mask) 1596 cpu_watchpoint_remove_by_ref(env, wp); 1597 } 1598 } 1599 1600 /* Add a breakpoint. */ 1601 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, 1602 CPUBreakpoint **breakpoint) 1603 { 1604 #if defined(TARGET_HAS_ICE) 1605 CPUBreakpoint *bp; 1606 1607 bp = qemu_malloc(sizeof(*bp)); 1608 1609 bp->pc = pc; 1610 bp->flags = flags; 1611 1612 /* keep all GDB-injected breakpoints in front */ 1613 if (flags & BP_GDB) 1614 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry); 1615 else 1616 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry); 1617 1618 breakpoint_invalidate(env, pc); 1619 1620 if (breakpoint) 1621 *breakpoint = bp; 1622 return 0; 1623 #else 1624 return -ENOSYS; 1625 #endif 1626 } 1627 1628 /* Remove a specific breakpoint. */ 1629 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags) 1630 { 1631 #if defined(TARGET_HAS_ICE) 1632 CPUBreakpoint *bp; 1633 1634 TAILQ_FOREACH(bp, &env->breakpoints, entry) { 1635 if (bp->pc == pc && bp->flags == flags) { 1636 cpu_breakpoint_remove_by_ref(env, bp); 1574 1637 return 0; 1575 1638 } 1576 1639 } 1577 return -1; 1578 } 1579 1580 /* Remove all watchpoints. */ 1581 void cpu_watchpoint_remove_all(CPUState *env) { 1582 int i; 1583 1584 for (i = 0; i < env->nb_watchpoints; i++) { 1585 tlb_flush_page(env, env->watchpoint[i].vaddr); 1586 } 1587 env->nb_watchpoints = 0; 1588 } 1589 1590 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a 1591 breakpoint is reached */ 1592 int cpu_breakpoint_insert(CPUState *env, target_ulong pc) 1640 # ifndef VBOX 1641 return -ENOENT; 1642 # else 1643 return VERR_NOT_FOUND; 1644 # endif 1645 #else 1646 return -ENOSYS; 1647 #endif 1648 } 1649 1650 /* Remove a specific breakpoint by reference. */ 1651 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint) 1593 1652 { 1594 1653 #if defined(TARGET_HAS_ICE) 1595 int i; 1596 1597 for(i = 0; i < env->nb_breakpoints; i++) { 1598 if (env->breakpoints[i] == pc) 1599 return 0; 1600 } 1601 1602 if (env->nb_breakpoints >= MAX_BREAKPOINTS) 1603 return -1; 1604 env->breakpoints[env->nb_breakpoints++] = pc; 1605 1606 breakpoint_invalidate(env, pc); 1607 return 0; 1608 #else 1609 return -1; 1610 #endif 1611 } 1612 1613 /* remove all breakpoints */ 1614 void cpu_breakpoint_remove_all(CPUState *env) { 1654 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry); 1655 1656 breakpoint_invalidate(env, breakpoint->pc); 1657 1658 qemu_free(breakpoint); 1659 #endif 1660 } 1661 1662 /* Remove all matching breakpoints. */ 1663 void cpu_breakpoint_remove_all(CPUState *env, int mask) 1664 { 1615 1665 #if defined(TARGET_HAS_ICE) 1616 int i; 1617 for(i = 0; i < env->nb_breakpoints; i++) { 1618 breakpoint_invalidate(env, env->breakpoints[i]); 1619 } 1620 env->nb_breakpoints = 0; 1621 #endif 1622 } 1623 1624 /* remove a breakpoint */ 1625 int cpu_breakpoint_remove(CPUState *env, target_ulong pc) 1626 { 1627 #if defined(TARGET_HAS_ICE) 1628 int i; 1629 for(i = 0; i < env->nb_breakpoints; i++) { 1630 if (env->breakpoints[i] == pc) 1631 goto found; 1632 } 1633 return -1; 1634 found: 1635 env->nb_breakpoints--; 1636 if (i < env->nb_breakpoints) 1637 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints]; 1638 1639 breakpoint_invalidate(env, pc); 1640 return 0; 1641 #else 1642 return -1; 1666 CPUBreakpoint *bp, *next; 1667 1668 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) { 1669 if (bp->flags & mask) 1670 cpu_breakpoint_remove_by_ref(env, bp); 1671 } 1643 1672 #endif 1644 1673 } … … 1758 1787 1759 1788 #ifndef VBOX 1760 CPULogItem cpu_log_items[] = {1789 const CPULogItem cpu_log_items[] = { 1761 1790 { CPU_LOG_TB_OUT_ASM, "out_asm", 1762 1791 "show generated host assembly code for each compiled TB" }, … … 1780 1809 { CPU_LOG_PCALL, "pcall", 1781 1810 "show protected mode far calls/returns/exceptions" }, 1811 { CPU_LOG_RESET, "cpu_reset", 1812 "show CPU state before CPU resets" }, 1782 1813 #endif 1783 1814 #ifdef DEBUG_IOPORT … … 1845 1876 cpu_dump_state(env, stderr, fprintf, 0); 1846 1877 #endif 1847 if ( logfile) {1848 fprintf(logfile,"qemu: fatal: ");1849 vfprintf(logfile,fmt, ap2);1850 fprintf(logfile,"\n");1878 if (qemu_log_enabled()) { 1879 qemu_log("qemu: fatal: "); 1880 qemu_log_vprintf(fmt, ap2); 1881 qemu_log("\n"); 1851 1882 #ifdef TARGET_I386 1852 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);1853 #else 1854 cpu_dump_state(env, logfile, fprintf, 0);1855 #endif 1856 fflush(logfile);1857 fclose(logfile);1883 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP); 1884 #else 1885 log_cpu_state(env, 0); 1886 #endif 1887 qemu_log_flush(); 1888 qemu_log_close(); 1858 1889 } 1859 1890 va_end(ap2); … … 1867 1898 { 1868 1899 CPUState *new_env = cpu_init(env->cpu_model_str); 1869 /* preserve chaining and index */1870 1900 CPUState *next_cpu = new_env->next_cpu; 1871 1901 int cpu_index = new_env->cpu_index; 1902 #if defined(TARGET_HAS_ICE) 1903 CPUBreakpoint *bp; 1904 CPUWatchpoint *wp; 1905 #endif 1906 1872 1907 memcpy(new_env, env, sizeof(CPUState)); 1908 1909 /* Preserve chaining and index. */ 1873 1910 new_env->next_cpu = next_cpu; 1874 1911 new_env->cpu_index = cpu_index; 1912 1913 /* Clone all break/watchpoints. 1914 Note: Once we support ptrace with hw-debug register access, make sure 1915 BP_CPU break/watchpoints are handled correctly on clone. */ 1916 TAILQ_INIT(&env->breakpoints); 1917 TAILQ_INIT(&env->watchpoints); 1918 #if defined(TARGET_HAS_ICE) 1919 TAILQ_FOREACH(bp, &env->breakpoints, entry) { 1920 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL); 1921 } 1922 TAILQ_FOREACH(wp, &env->watchpoints, entry) { 1923 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1, 1924 wp->flags, NULL); 1925 } 1926 #endif 1927 1875 1928 return new_env; 1876 1929 } … … 2118 2171 return in_migration; 2119 2172 } 2120 #endif 2173 #endif /* !VBOX */ 2174 2175 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr) 2176 { 2177 if (kvm_enabled()) 2178 kvm_physical_sync_dirty_bitmap(start_addr, end_addr); 2179 } 2121 2180 2122 2181 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) … … 2154 2213 for(i = 0; i < CPU_TLB_SIZE; i++) 2155 2214 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]); 2156 # if (NB_MMU_MODES >= 3)2215 # if (NB_MMU_MODES >= 3) 2157 2216 for(i = 0; i < CPU_TLB_SIZE; i++) 2158 2217 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]); 2159 # if (NB_MMU_MODES == 4)2218 # if (NB_MMU_MODES == 4) 2160 2219 for(i = 0; i < CPU_TLB_SIZE; i++) 2161 2220 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]); 2162 # endif2163 # endif2221 # endif 2222 # endif 2164 2223 #else /* VBOX */ 2165 2224 for(i = 0; i < CPU_TLB_SIZE; i++) … … 2218 2277 int ret; 2219 2278 CPUTLBEntry *te; 2220 int i;2279 CPUWatchpoint *wp; 2221 2280 target_phys_addr_t iotlb; 2222 2281 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) … … 2267 2326 We can't use the high bits of pd for this because 2268 2327 IO_MEM_ROMD uses these as a ram address. */ 2269 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr; 2328 iotlb = (pd & ~TARGET_PAGE_MASK); 2329 #ifndef VBOX 2330 if (p) { 2331 #else 2332 if ( p->phys_offset 2333 && (pd & ~TARGET_PAGE_MASK) != env->pVM->rem.s.iMMIOMemType 2334 && (pd & ~TARGET_PAGE_MASK) != env->pVM->rem.s.iHandlerMemType) { 2335 #endif 2336 iotlb += p->region_offset; 2337 } else { 2338 iotlb += paddr; 2339 } 2270 2340 } 2271 2341 … … 2299 2369 /* Make accesses to pages with watchpoints go via the 2300 2370 watchpoint trap routines. */ 2301 for (i = 0; i < env->nb_watchpoints; i++) {2302 if (vaddr == ( env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {2371 TAILQ_FOREACH(wp, &env->watchpoints, entry) { 2372 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { 2303 2373 iotlb = io_mem_watch + paddr; 2304 2374 /* TODO: The memory case can be optimized by not trapping … … 2467 2537 target_ulong addr; 2468 2538 2539 if (start + len < start) 2540 /* we've wrapped around */ 2541 return -1; 2542 2469 2543 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */ 2470 2544 start = start & TARGET_PAGE_MASK; 2471 2545 2472 if( end < start )2473 /* we've wrapped around */2474 return -1;2475 2546 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { 2476 2547 p = page_find(addr >> TARGET_PAGE_BITS); … … 2553 2624 2554 2625 #if !defined(CONFIG_USER_ONLY) 2626 2555 2627 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 2556 ram_addr_t memory );2628 ram_addr_t memory, ram_addr_t region_offset); 2557 2629 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 2558 ram_addr_t orig_memory );2630 ram_addr_t orig_memory, ram_addr_t region_offset); 2559 2631 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \ 2560 2632 need_subpage) \ … … 2579 2651 /* register physical memory. 'size' must be a multiple of the target 2580 2652 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an 2581 io memory page */ 2582 void cpu_register_physical_memory(target_phys_addr_t start_addr, 2583 ram_addr_t size, 2584 ram_addr_t phys_offset) 2653 io memory page. The address used when calling the IO function is 2654 the offset from the start of the region, plus region_offset. Both 2655 start_region and regon_offset are rounded down to a page boundary 2656 before calculating this offset. This should not be a problem unless 2657 the low bits of start_addr and region_offset differ. */ 2658 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, 2659 ram_addr_t size, 2660 ram_addr_t phys_offset, 2661 ram_addr_t region_offset) 2585 2662 { 2586 2663 target_phys_addr_t addr, end_addr; … … 2597 2674 } 2598 2675 #endif 2676 if (kvm_enabled()) 2677 kvm_set_phys_mem(start_addr, size, phys_offset); 2678 2679 if (phys_offset == IO_MEM_UNASSIGNED) { 2680 region_offset = start_addr; 2681 } 2682 region_offset &= TARGET_PAGE_MASK; 2599 2683 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; 2600 2684 end_addr = start_addr + (target_phys_addr_t)size; … … 2611 2695 if (!(orig_memory & IO_MEM_SUBPAGE)) { 2612 2696 subpage = subpage_init((addr & TARGET_PAGE_MASK), 2613 &p->phys_offset, orig_memory); 2697 &p->phys_offset, orig_memory, 2698 p->region_offset); 2614 2699 } else { 2615 2700 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK) 2616 2701 >> IO_MEM_SHIFT]; 2617 2702 } 2618 subpage_register(subpage, start_addr2, end_addr2, phys_offset); 2703 subpage_register(subpage, start_addr2, end_addr2, phys_offset, 2704 region_offset); 2705 p->region_offset = 0; 2619 2706 } else { 2620 2707 p->phys_offset = phys_offset; … … 2626 2713 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2627 2714 p->phys_offset = phys_offset; 2715 p->region_offset = region_offset; 2628 2716 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || 2629 (phys_offset & IO_MEM_ROMD)) 2717 (phys_offset & IO_MEM_ROMD)) { 2630 2718 phys_offset += TARGET_PAGE_SIZE; 2631 else {2719 } else { 2632 2720 target_phys_addr_t start_addr2, end_addr2; 2633 2721 int need_subpage = 0; … … 2638 2726 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { 2639 2727 subpage = subpage_init((addr & TARGET_PAGE_MASK), 2640 &p->phys_offset, IO_MEM_UNASSIGNED); 2728 &p->phys_offset, IO_MEM_UNASSIGNED, 2729 addr & TARGET_PAGE_MASK); 2641 2730 subpage_register(subpage, start_addr2, end_addr2, 2642 phys_offset); 2731 phys_offset, region_offset); 2732 p->region_offset = 0; 2643 2733 } 2644 2734 } 2645 2735 } 2736 region_offset += TARGET_PAGE_SIZE; 2646 2737 } 2647 2738 … … 2666 2757 2667 2758 #ifndef VBOX 2759 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) 2760 { 2761 if (kvm_enabled()) 2762 kvm_coalesce_mmio_region(addr, size); 2763 } 2764 2765 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) 2766 { 2767 if (kvm_enabled()) 2768 kvm_uncoalesce_mmio_region(addr, size); 2769 } 2770 2668 2771 /* XXX: better than nothing */ 2669 2772 ram_addr_t qemu_ram_alloc(ram_addr_t size) … … 2690 2793 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 2691 2794 #endif 2692 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)2795 #if defined(TARGET_SPARC) 2693 2796 do_unassigned_access(addr, 0, 0, 0, 1); 2694 2797 #endif … … 2701 2804 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 2702 2805 #endif 2703 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)2806 #if defined(TARGET_SPARC) 2704 2807 do_unassigned_access(addr, 0, 0, 0, 2); 2705 2808 #endif … … 2712 2815 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 2713 2816 #endif 2714 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)2817 #if defined(TARGET_SPARC) 2715 2818 do_unassigned_access(addr, 0, 0, 0, 4); 2716 2819 #endif … … 2723 2826 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 2724 2827 #endif 2725 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)2828 #if defined(TARGET_SPARC) 2726 2829 do_unassigned_access(addr, 1, 0, 0, 1); 2727 2830 #endif … … 2733 2836 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 2734 2837 #endif 2735 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)2838 #if defined(TARGET_SPARC) 2736 2839 do_unassigned_access(addr, 1, 0, 0, 2); 2737 2840 #endif … … 2743 2846 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 2744 2847 #endif 2745 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)2848 #if defined(TARGET_SPARC) 2746 2849 do_unassigned_access(addr, 1, 0, 0, 4); 2747 2850 #endif … … 2828 2931 stw_p(phys_ram_base + ram_addr, val); 2829 2932 #endif 2830 2831 2933 #ifdef USE_KQEMU 2832 2934 if (cpu_single_env->kqemu_enabled && … … 2900 3002 2901 3003 /* Generate a debug exception if a watchpoint has been hit. */ 2902 static void check_watchpoint(int offset, int flags)3004 static void check_watchpoint(int offset, int len_mask, int flags) 2903 3005 { 2904 3006 CPUState *env = cpu_single_env; 3007 target_ulong pc, cs_base; 3008 TranslationBlock *tb; 2905 3009 target_ulong vaddr; 2906 int i; 2907 3010 CPUWatchpoint *wp; 3011 int cpu_flags; 3012 3013 if (env->watchpoint_hit) { 3014 /* We re-entered the check after replacing the TB. Now raise 3015 * the debug interrupt so that is will trigger after the 3016 * current instruction. */ 3017 cpu_interrupt(env, CPU_INTERRUPT_DEBUG); 3018 return; 3019 } 2908 3020 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; 2909 for (i = 0; i < env->nb_watchpoints; i++) { 2910 if (vaddr == env->watchpoint[i].vaddr 2911 && (env->watchpoint[i].type & flags)) { 2912 env->watchpoint_hit = i + 1; 2913 cpu_interrupt(env, CPU_INTERRUPT_DEBUG); 2914 break; 3021 TAILQ_FOREACH(wp, &env->watchpoints, entry) { 3022 if ((vaddr == (wp->vaddr & len_mask) || 3023 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { 3024 wp->flags |= BP_WATCHPOINT_HIT; 3025 if (!env->watchpoint_hit) { 3026 env->watchpoint_hit = wp; 3027 tb = tb_find_pc(env->mem_io_pc); 3028 if (!tb) { 3029 cpu_abort(env, "check_watchpoint: could not find TB for " 3030 "pc=%p", (void *)env->mem_io_pc); 3031 } 3032 cpu_restore_state(tb, env, env->mem_io_pc, NULL); 3033 tb_phys_invalidate(tb, -1); 3034 if (wp->flags & BP_STOP_BEFORE_ACCESS) { 3035 env->exception_index = EXCP_DEBUG; 3036 } else { 3037 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); 3038 tb_gen_code(env, pc, cs_base, cpu_flags, 1); 3039 } 3040 cpu_resume_from_signal(env, NULL); 3041 } 3042 } else { 3043 wp->flags &= ~BP_WATCHPOINT_HIT; 2915 3044 } 2916 3045 } … … 2922 3051 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr) 2923 3052 { 2924 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);3053 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ); 2925 3054 return ldub_phys(addr); 2926 3055 } … … 2928 3057 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr) 2929 3058 { 2930 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);3059 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ); 2931 3060 return lduw_phys(addr); 2932 3061 } … … 2934 3063 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr) 2935 3064 { 2936 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);3065 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ); 2937 3066 return ldl_phys(addr); 2938 3067 } … … 2941 3070 uint32_t val) 2942 3071 { 2943 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);3072 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE); 2944 3073 stb_phys(addr, val); 2945 3074 } … … 2948 3077 uint32_t val) 2949 3078 { 2950 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);3079 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE); 2951 3080 stw_phys(addr, val); 2952 3081 } … … 2955 3084 uint32_t val) 2956 3085 { 2957 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);3086 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE); 2958 3087 stl_phys(addr, val); 2959 3088 } … … 2977 3106 unsigned int idx; 2978 3107 2979 idx = SUBPAGE_IDX(addr - mmio->base);3108 idx = SUBPAGE_IDX(addr); 2980 3109 #if defined(DEBUG_SUBPAGE) 2981 3110 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__, 2982 3111 mmio, len, addr, idx); 2983 3112 #endif 2984 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr); 3113 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], 3114 addr + mmio->region_offset[idx][0][len]); 2985 3115 2986 3116 return ret; … … 2992 3122 unsigned int idx; 2993 3123 2994 idx = SUBPAGE_IDX(addr - mmio->base);3124 idx = SUBPAGE_IDX(addr); 2995 3125 #if defined(DEBUG_SUBPAGE) 2996 3126 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__, 2997 3127 mmio, len, addr, idx, value); 2998 3128 #endif 2999 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value); 3129 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], 3130 addr + mmio->region_offset[idx][1][len], 3131 value); 3000 3132 } 3001 3133 … … 3067 3199 3068 3200 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 3069 ram_addr_t memory )3201 ram_addr_t memory, ram_addr_t region_offset) 3070 3202 { 3071 3203 int idx, eidx; … … 3086 3218 mmio->mem_read[idx][i] = &io_mem_read[memory][i]; 3087 3219 mmio->opaque[idx][0][i] = io_mem_opaque[memory]; 3220 mmio->region_offset[idx][0][i] = region_offset; 3088 3221 } 3089 3222 if (io_mem_write[memory][i]) { 3090 3223 mmio->mem_write[idx][i] = &io_mem_write[memory][i]; 3091 3224 mmio->opaque[idx][1][i] = io_mem_opaque[memory]; 3225 mmio->region_offset[idx][1][i] = region_offset; 3092 3226 } 3093 3227 } … … 3098 3232 3099 3233 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 3100 ram_addr_t orig_memory )3234 ram_addr_t orig_memory, ram_addr_t region_offset) 3101 3235 { 3102 3236 subpage_t *mmio; … … 3104 3238 3105 3239 mmio = qemu_mallocz(sizeof(subpage_t)); 3106 if (mmio != NULL) { 3107 3108 3240 3241 mmio->base = base; 3242 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio); 3109 3243 #if defined(DEBUG_SUBPAGE) 3110 3111 3112 #endif 3113 3114 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);3115 }3244 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, 3245 mmio, base, TARGET_PAGE_SIZE, subpage_memory); 3246 #endif 3247 *phys = subpage_memory | IO_MEM_SUBPAGE; 3248 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory, 3249 region_offset); 3116 3250 3117 3251 return mmio; 3118 3252 } 3119 3253 3254 static int get_free_io_mem_idx(void) 3255 { 3256 int i; 3257 3258 for (i = 0; i<IO_MEM_NB_ENTRIES; i++) 3259 if (!io_mem_used[i]) { 3260 io_mem_used[i] = 1; 3261 return i; 3262 } 3263 3264 return -1; 3265 } 3266 3120 3267 static void io_mem_init(void) 3121 3268 { 3269 int i; 3270 3122 3271 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL); 3123 3272 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); 3124 3273 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL); 3125 io_mem_nb = 5; 3274 for (i=0; i<5; i++) 3275 io_mem_used[i] = 1; 3126 3276 3127 3277 io_mem_watch = cpu_register_io_memory(0, watch_mem_read, … … 3151 3301 3152 3302 if (io_index <= 0) { 3153 i f (io_mem_nb >= IO_MEM_NB_ENTRIES)3154 return -1;3155 io_index = io_mem_nb++;3303 io_index = get_free_io_mem_idx(); 3304 if (io_index == -1) 3305 return io_index; 3156 3306 } else { 3157 3307 if (io_index >= IO_MEM_NB_ENTRIES) … … 3167 3317 io_mem_opaque[io_index] = opaque; 3168 3318 return (io_index << IO_MEM_SHIFT) | subwidth; 3319 } 3320 3321 void cpu_unregister_io_memory(int io_table_address) 3322 { 3323 int i; 3324 int io_index = io_table_address >> IO_MEM_SHIFT; 3325 3326 for (i=0;i < 3; i++) { 3327 io_mem_read[io_index][i] = unassigned_mem_read[i]; 3328 io_mem_write[io_index][i] = unassigned_mem_write[i]; 3329 } 3330 io_mem_opaque[io_index] = NULL; 3331 io_mem_used[io_index] = 0; 3169 3332 } 3170 3333 … … 3248 3411 if (is_write) { 3249 3412 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 3413 target_phys_addr_t addr1 = addr; 3250 3414 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3415 if (p) 3416 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3251 3417 /* XXX: could force cpu_single_env to NULL to avoid 3252 3418 potential bugs */ 3253 if (l >= 4 && ((addr & 3) == 0)) {3419 if (l >= 4 && ((addr1 & 3) == 0)) { 3254 3420 /* 32 bit write access */ 3255 3421 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) … … 3258 3424 val = *(const uint32_t *)buf; 3259 3425 #endif 3260 io_mem_write[io_index][2](io_mem_opaque[io_index], addr , val);3426 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val); 3261 3427 l = 4; 3262 } else if (l >= 2 && ((addr & 1) == 0)) {3428 } else if (l >= 2 && ((addr1 & 1) == 0)) { 3263 3429 /* 16 bit write access */ 3264 3430 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) … … 3267 3433 val = *(const uint16_t *)buf; 3268 3434 #endif 3269 io_mem_write[io_index][1](io_mem_opaque[io_index], addr , val);3435 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val); 3270 3436 l = 2; 3271 3437 } else { … … 3276 3442 val = *(const uint8_t *)buf; 3277 3443 #endif 3278 io_mem_write[io_index][0](io_mem_opaque[io_index], addr , val);3444 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val); 3279 3445 l = 1; 3280 3446 } … … 3303 3469 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 3304 3470 !(pd & IO_MEM_ROMD)) { 3471 target_phys_addr_t addr1 = addr; 3305 3472 /* I/O case */ 3306 3473 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3307 if (l >= 4 && ((addr & 3) == 0)) { 3474 if (p) 3475 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3476 if (l >= 4 && ((addr1 & 3) == 0)) { 3308 3477 /* 32 bit read access */ 3309 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr );3478 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1); 3310 3479 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 3311 3480 stl_p(buf, val); … … 3314 3483 #endif 3315 3484 l = 4; 3316 } else if (l >= 2 && ((addr & 1) == 0)) {3485 } else if (l >= 2 && ((addr1 & 1) == 0)) { 3317 3486 /* 16 bit read access */ 3318 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr );3487 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1); 3319 3488 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 3320 3489 stw_p(buf, val); … … 3325 3494 } else { 3326 3495 /* 8 bit read access */ 3327 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr );3496 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1); 3328 3497 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 3329 3498 stb_p(buf, val); … … 3351 3520 3352 3521 #ifndef VBOX 3522 3353 3523 /* used for ROM loading : can write in RAM and ROM */ 3354 3524 void cpu_physical_memory_write_rom(target_phys_addr_t addr, … … 3389 3559 } 3390 3560 } 3561 3562 typedef struct { 3563 void *buffer; 3564 target_phys_addr_t addr; 3565 target_phys_addr_t len; 3566 } BounceBuffer; 3567 3568 static BounceBuffer bounce; 3569 3570 typedef struct MapClient { 3571 void *opaque; 3572 void (*callback)(void *opaque); 3573 LIST_ENTRY(MapClient) link; 3574 } MapClient; 3575 3576 static LIST_HEAD(map_client_list, MapClient) map_client_list 3577 = LIST_HEAD_INITIALIZER(map_client_list); 3578 3579 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) 3580 { 3581 MapClient *client = qemu_malloc(sizeof(*client)); 3582 3583 client->opaque = opaque; 3584 client->callback = callback; 3585 LIST_INSERT_HEAD(&map_client_list, client, link); 3586 return client; 3587 } 3588 3589 void cpu_unregister_map_client(void *_client) 3590 { 3591 MapClient *client = (MapClient *)_client; 3592 3593 LIST_REMOVE(client, link); 3594 } 3595 3596 static void cpu_notify_map_clients(void) 3597 { 3598 MapClient *client; 3599 3600 while (!LIST_EMPTY(&map_client_list)) { 3601 client = LIST_FIRST(&map_client_list); 3602 client->callback(client->opaque); 3603 LIST_REMOVE(client, link); 3604 } 3605 } 3606 3607 /* Map a physical memory region into a host virtual address. 3608 * May map a subset of the requested range, given by and returned in *plen. 3609 * May return NULL if resources needed to perform the mapping are exhausted. 3610 * Use only for reads OR writes - not for read-modify-write operations. 3611 * Use cpu_register_map_client() to know when retrying the map operation is 3612 * likely to succeed. 3613 */ 3614 void *cpu_physical_memory_map(target_phys_addr_t addr, 3615 target_phys_addr_t *plen, 3616 int is_write) 3617 { 3618 target_phys_addr_t len = *plen; 3619 target_phys_addr_t done = 0; 3620 int l; 3621 uint8_t *ret = NULL; 3622 uint8_t *ptr; 3623 target_phys_addr_t page; 3624 unsigned long pd; 3625 PhysPageDesc *p; 3626 unsigned long addr1; 3627 3628 while (len > 0) { 3629 page = addr & TARGET_PAGE_MASK; 3630 l = (page + TARGET_PAGE_SIZE) - addr; 3631 if (l > len) 3632 l = len; 3633 p = phys_page_find(page >> TARGET_PAGE_BITS); 3634 if (!p) { 3635 pd = IO_MEM_UNASSIGNED; 3636 } else { 3637 pd = p->phys_offset; 3638 } 3639 3640 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 3641 if (done || bounce.buffer) { 3642 break; 3643 } 3644 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); 3645 bounce.addr = addr; 3646 bounce.len = l; 3647 if (!is_write) { 3648 cpu_physical_memory_rw(addr, bounce.buffer, l, 0); 3649 } 3650 ptr = bounce.buffer; 3651 } else { 3652 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3653 ptr = phys_ram_base + addr1; 3654 } 3655 if (!done) { 3656 ret = ptr; 3657 } else if (ret + done != ptr) { 3658 break; 3659 } 3660 3661 len -= l; 3662 addr += l; 3663 done += l; 3664 } 3665 *plen = done; 3666 return ret; 3667 } 3668 3669 /* Unmaps a memory region previously mapped by cpu_physical_memory_map(). 3670 * Will also mark the memory as dirty if is_write == 1. access_len gives 3671 * the amount of memory that was actually read or written by the caller. 3672 */ 3673 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, 3674 int is_write, target_phys_addr_t access_len) 3675 { 3676 if (buffer != bounce.buffer) { 3677 if (is_write) { 3678 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base; 3679 while (access_len) { 3680 unsigned l; 3681 l = TARGET_PAGE_SIZE; 3682 if (l > access_len) 3683 l = access_len; 3684 if (!cpu_physical_memory_is_dirty(addr1)) { 3685 /* invalidate code */ 3686 tb_invalidate_phys_page_range(addr1, addr1 + l, 0); 3687 /* set dirty bit */ 3688 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 3689 (0xff & ~CODE_DIRTY_FLAG); 3690 } 3691 addr1 += l; 3692 access_len -= l; 3693 } 3694 } 3695 return; 3696 } 3697 if (is_write) { 3698 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len); 3699 } 3700 qemu_free(bounce.buffer); 3701 bounce.buffer = NULL; 3702 cpu_notify_map_clients(); 3703 } 3704 3391 3705 #endif /* !VBOX */ 3392 3393 3706 3394 3707 /* warning: addr must be aligned */ … … 3412 3725 /* I/O case */ 3413 3726 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3727 if (p) 3728 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3414 3729 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); 3415 3730 } else { … … 3446 3761 /* I/O case */ 3447 3762 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3763 if (p) 3764 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3448 3765 #ifdef TARGET_WORDS_BIGENDIAN 3449 3766 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; … … 3501 3818 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 3502 3819 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3820 if (p) 3821 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3503 3822 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 3504 3823 } else { … … 3510 3829 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr); 3511 3830 #endif 3831 3512 3832 #ifndef VBOX 3513 3833 if (unlikely(in_migration)) { … … 3540 3860 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 3541 3861 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3862 if (p) 3863 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3542 3864 #ifdef TARGET_WORDS_BIGENDIAN 3543 3865 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32); … … 3575 3897 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 3576 3898 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3899 if (p) 3900 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3577 3901 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 3578 3902 } else {
Note:
See TracChangeset
for help on using the changeset viewer.