Changeset 36140 in vbox for trunk/src/recompiler/exec.c
- Timestamp:
- Mar 3, 2011 1:48:16 PM (14 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler/exec.c
r36125 r36140 31 31 #ifndef VBOX 32 32 #ifdef _WIN32 33 #define WIN32_LEAN_AND_MEAN 33 34 #include <windows.h> 34 35 #else … … 54 55 #include "cpu.h" 55 56 #include "exec-all.h" 57 #include "qemu-common.h" 58 #include "tcg.h" 59 #ifndef VBOX 60 #include "hw/hw.h" 61 #endif 62 #include "osdep.h" 56 63 #if defined(CONFIG_USER_ONLY) 57 64 #include <qemu.h> … … 66 73 //#define DEBUG_TB_CHECK 67 74 //#define DEBUG_TLB_CHECK 75 76 //#define DEBUG_IOPORT 77 //#define DEBUG_SUBPAGE 68 78 69 79 #if !defined(CONFIG_USER_ONLY) … … 114 124 __attribute__((aligned (32))) 115 125 #endif 126 116 127 uint8_t code_gen_prologue[1024] code_gen_section; 117 118 128 #else /* VBOX */ 119 129 extern uint8_t* code_gen_prologue; 120 130 #endif /* VBOX */ 121 122 131 static uint8_t *code_gen_buffer; 123 132 static unsigned long code_gen_buffer_size; … … 171 180 172 181 typedef struct PhysPageDesc { 173 /* offset in host memory of the page + io_index in the low 12bits */182 /* offset in host memory of the page + io_index in the low bits */ 174 183 ram_addr_t phys_offset; 175 184 } PhysPageDesc; … … 194 203 #define L1_SIZE (1 << L1_BITS) 195 204 #define L2_SIZE (1 << L2_BITS) 196 197 static void io_mem_init(void);198 205 199 206 unsigned long qemu_real_host_page_size; … … 252 259 } subpage_t; 253 260 254 255 261 #ifndef VBOX 256 262 #ifdef _WIN32 … … 279 285 } 280 286 #endif 281 #else / / VBOX287 #else /* VBOX */ 282 288 static void map_exec(void *addr, long size) 283 289 { … … 285 291 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE); 286 292 } 287 #endif 293 #endif /* VBOX */ 288 294 289 295 static void page_init(void) … … 325 331 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); 326 332 #endif 333 327 334 #ifdef VBOX 328 335 /* We use other means to set reserved bit on our pages */ 329 #else 336 #else /* !VBOX */ 330 337 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY) 331 338 { … … 355 362 } 356 363 #endif 357 #endif 364 #endif /* !VBOX */ 358 365 } 359 366 … … 498 505 #endif 499 506 500 #ifdef VBOX 501 /* 502 * We don't need such huge codegen buffer size, as execute most of the code 503 * in raw or hwacc mode 504 */ 507 #ifdef VBOX /* We don't need such huge codegen buffer size, as execute 508 most of the code in raw or hwacc mode. */ 505 509 #define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024) 506 #else 510 #else /* !VBOX */ 507 511 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024) 508 #endif 512 #endif /* !VBOX */ 509 513 510 514 #if defined(CONFIG_USER_ONLY) 511 /* Currently it is not recomm ended to allocate big chunks of data in515 /* Currently it is not recommanded to allocate big chunks of data in 512 516 user mode. It will change when a dedicated libc will be used */ 513 517 #define USE_STATIC_CODE_GEN_BUFFER … … 829 833 } 830 834 } 831 #endif // DEBUG_TB_CHECK 835 836 #endif 832 837 833 838 /* invalidate one TB */ … … 956 961 957 962 #ifdef VBOX 963 958 964 void tb_invalidate_virt(CPUState *env, uint32_t eip) 959 965 { … … 998 1004 } 999 1005 # endif /* VBOX_STRICT */ 1006 1000 1007 #endif /* VBOX */ 1001 1008 … … 1032 1039 TranslationBlock *tb; 1033 1040 1034 p->code_bitmap = qemu_malloc (TARGET_PAGE_SIZE / 8);1041 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8); 1035 1042 if (!p->code_bitmap) 1036 1043 return; 1037 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);1038 1044 1039 1045 tb = p->first_tb; … … 1516 1522 static void breakpoint_invalidate(CPUState *env, target_ulong pc) 1517 1523 { 1518 target_ulong addr, pd; 1524 target_phys_addr_t addr; 1525 target_ulong pd; 1519 1526 ram_addr_t ram_addr; 1520 1527 PhysPageDesc *p; … … 1644 1651 if (env->singlestep_enabled != enabled) { 1645 1652 env->singlestep_enabled = enabled; 1646 /* must flush all the translated code to avoid inconsist encies */1653 /* must flush all the translated code to avoid inconsistancies */ 1647 1654 /* XXX: only flush what is necessary */ 1648 1655 tb_flush(env); … … 1657 1664 loglevel = log_flags; 1658 1665 if (loglevel && !logfile) { 1659 logfile = fopen(logfilename, "w");1666 logfile = fopen(logfilename, log_append ? "a" : "w"); 1660 1667 if (!logfile) { 1661 1668 perror(logfilename); … … 1665 1672 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ 1666 1673 { 1667 static uint8_tlogfile_buf[4096];1674 static char logfile_buf[4096]; 1668 1675 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); 1669 1676 } … … 1671 1678 setvbuf(logfile, NULL, _IOLBF, 0); 1672 1679 #endif 1680 log_append = 1; 1681 } 1682 if (!loglevel && logfile) { 1683 fclose(logfile); 1684 logfile = NULL; 1673 1685 } 1674 1686 } … … 1677 1689 { 1678 1690 logfilename = strdup(filename); 1691 if (logfile) { 1692 fclose(logfile); 1693 logfile = NULL; 1694 } 1695 cpu_set_log(loglevel); 1679 1696 } 1680 1697 #endif /* !VBOX */ … … 1747 1764 "show target assembly code for each compiled TB" }, 1748 1765 { CPU_LOG_TB_OP, "op", 1749 "show micro ops for each compiled TB (only usable if 'in_asm' used)" }, 1766 "show micro ops for each compiled TB" }, 1767 { CPU_LOG_TB_OP_OPT, "op_opt", 1768 "show micro ops " 1750 1769 #ifdef TARGET_I386 1751 { CPU_LOG_TB_OP_OPT, "op_opt",1752 "show micro ops after optimization for each compiled TB" }, 1753 #endif 1770 "before eflags optimization and " 1771 #endif 1772 "after liveness analysis" }, 1754 1773 { CPU_LOG_INT, "int", 1755 1774 "show interrupts/exceptions in short format" }, … … 1757 1776 "show trace before each executed TB (lots of logs)" }, 1758 1777 { CPU_LOG_TB_CPU, "cpu", 1759 "show CPU state before bloc translation" },1778 "show CPU state before block translation" }, 1760 1779 #ifdef TARGET_I386 1761 1780 { CPU_LOG_PCALL, "pcall", … … 1779 1798 int cpu_str_to_log_mask(const char *str) 1780 1799 { 1781 CPULogItem *item;1800 const CPULogItem *item; 1782 1801 int mask; 1783 1802 const char *p, *p1; … … 1814 1833 { 1815 1834 va_list ap; 1835 va_list ap2; 1816 1836 1817 1837 va_start(ap, fmt); 1838 va_copy(ap2, ap); 1818 1839 fprintf(stderr, "qemu: fatal: "); 1819 1840 vfprintf(stderr, fmt, ap); … … 1824 1845 cpu_dump_state(env, stderr, fprintf, 0); 1825 1846 #endif 1847 if (logfile) { 1848 fprintf(logfile, "qemu: fatal: "); 1849 vfprintf(logfile, fmt, ap2); 1850 fprintf(logfile, "\n"); 1851 #ifdef TARGET_I386 1852 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP); 1853 #else 1854 cpu_dump_state(env, logfile, fprintf, 0); 1855 #endif 1856 fflush(logfile); 1857 fclose(logfile); 1858 } 1859 va_end(ap2); 1826 1860 va_end(ap); 1827 1861 abort(); … … 1841 1875 return new_env; 1842 1876 } 1843 #endif 1877 #endif /* !VBOX */ 1844 1878 1845 1879 #if !defined(CONFIG_USER_ONLY) … … 1865 1899 } 1866 1900 1901 #ifdef VBOX 1867 1902 static CPUTLBEntry s_cputlb_empty_entry = { 1868 1903 .addr_read = -1, … … 1871 1906 .addend = -1, 1872 1907 }; 1908 #endif /* VBOX */ 1873 1909 1874 1910 /* NOTE: if flush_global is true, also flush global entries (not … … 1886 1922 1887 1923 for(i = 0; i < CPU_TLB_SIZE; i++) { 1924 #ifdef VBOX 1888 1925 int mmu_idx; 1889 1926 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1890 1927 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry; 1891 1928 } 1929 #else /* !VBOX */ 1930 env->tlb_table[0][i].addr_read = -1; 1931 env->tlb_table[0][i].addr_write = -1; 1932 env->tlb_table[0][i].addr_code = -1; 1933 env->tlb_table[1][i].addr_read = -1; 1934 env->tlb_table[1][i].addr_write = -1; 1935 env->tlb_table[1][i].addr_code = -1; 1936 #if (NB_MMU_MODES >= 3) 1937 env->tlb_table[2][i].addr_read = -1; 1938 env->tlb_table[2][i].addr_write = -1; 1939 env->tlb_table[2][i].addr_code = -1; 1940 #if (NB_MMU_MODES == 4) 1941 env->tlb_table[3][i].addr_read = -1; 1942 env->tlb_table[3][i].addr_write = -1; 1943 env->tlb_table[3][i].addr_code = -1; 1944 #endif 1945 #endif 1946 #endif /* !VBOX */ 1892 1947 } 1893 1948 … … 1987 2042 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; 1988 2043 if ((addr - start) < length) { 1989 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;2044 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY; 1990 2045 } 1991 2046 } … … 2129 2184 } 2130 2185 2131 2132 /* update the TLB corresponding to virtual page vaddr and phys addr 2133 addr so that it is no longer dirty */ 2134 static inline void tlb_set_dirty(CPUState *env, 2135 unsigned long addr, target_ulong vaddr) 2186 /* update the TLB corresponding to virtual page vaddr 2187 so that it is no longer dirty */ 2188 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr) 2136 2189 { 2137 2190 int i; 2138 2191 2139 addr &= TARGET_PAGE_MASK;2192 vaddr &= TARGET_PAGE_MASK; 2140 2193 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 2141 tlb_set_dirty1(&env->tlb_table[0][i], addr);2142 tlb_set_dirty1(&env->tlb_table[1][i], addr);2194 tlb_set_dirty1(&env->tlb_table[0][i], vaddr); 2195 tlb_set_dirty1(&env->tlb_table[1][i], vaddr); 2143 2196 #if (NB_MMU_MODES >= 3) 2144 2197 tlb_set_dirty1(&env->tlb_table[2][i], vaddr); … … 2302 2355 return ret; 2303 2356 } 2304 #if 02305 /* called from signal handler: invalidate the code and unprotect the2306 page. Return TRUE if the fault was successfully handled. */2307 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)2308 {2309 #if !defined(CONFIG_SOFTMMU)2310 VirtPageDesc *vp;2311 2312 #if defined(DEBUG_TLB)2313 printf("page_unprotect: addr=0x%08x\n", addr);2314 #endif2315 addr &= TARGET_PAGE_MASK;2316 2317 /* if it is not mapped, no need to worry here */2318 if (addr >= MMAP_AREA_END)2319 return 0;2320 vp = virt_page_find(addr >> TARGET_PAGE_BITS);2321 if (!vp)2322 return 0;2323 /* NOTE: in this case, validate_tag is _not_ tested as it2324 validates only the code TLB */2325 if (vp->valid_tag != virt_valid_tag)2326 return 0;2327 if (!(vp->prot & PAGE_WRITE))2328 return 0;2329 #if defined(DEBUG_TLB)2330 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",2331 addr, vp->phys_addr, vp->prot);2332 #endif2333 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)2334 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",2335 (unsigned long)addr, vp->prot);2336 /* set the dirty bit */2337 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;2338 /* flush the code inside */2339 tb_invalidate_phys_page(vp->phys_addr, pc, puc);2340 return 1;2341 #elif defined(VBOX)2342 addr &= TARGET_PAGE_MASK;2343 2344 /* if it is not mapped, no need to worry here */2345 if (addr >= MMAP_AREA_END)2346 return 0;2347 return 1;2348 #else2349 return 0;2350 #endif2351 }2352 #endif /* 0 */2353 2357 2354 2358 #else … … 2425 2429 2426 2430 /* modify the flags of a page and invalidate the code if 2427 necessary. The flag PAGE_WRITE_ORG is position ed automatically2431 necessary. The flag PAGE_WRITE_ORG is positionned automatically 2428 2432 depending on PAGE_WRITE */ 2429 2433 void page_set_flags(target_ulong start, target_ulong end, int flags) … … 2432 2436 target_ulong addr; 2433 2437 2438 /* mmap_lock should already be held. */ 2434 2439 start = start & TARGET_PAGE_MASK; 2435 2440 end = TARGET_PAGE_ALIGN(end); … … 2439 2444 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n")); 2440 2445 #endif 2441 spin_lock(&tb_lock);2442 2446 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { 2443 2447 p = page_find_alloc(addr >> TARGET_PAGE_BITS); 2448 /* We may be called for host regions that are outside guest 2449 address space. */ 2450 if (!p) 2451 return; 2444 2452 /* if the write protection is set, then we invalidate the code 2445 2453 inside */ … … 2451 2459 p->flags = flags; 2452 2460 } 2453 spin_unlock(&tb_lock);2454 2461 } 2455 2462 … … 2491 2498 2492 2499 /* called from signal handler: invalidate the code and unprotect the 2493 page. Return TRUE if the fault was succes sfully handled. */2500 page. Return TRUE if the fault was succesfully handled. */ 2494 2501 int page_unprotect(target_ulong address, unsigned long pc, void *puc) 2495 2502 { … … 2570 2577 } while (0) 2571 2578 2572 2573 2579 /* register physical memory. 'size' must be a multiple of the target 2574 2580 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an 2575 2581 io memory page */ 2576 2582 void cpu_register_physical_memory(target_phys_addr_t start_addr, 2577 unsigned longsize,2578 unsigned longphys_offset)2583 ram_addr_t size, 2584 ram_addr_t phys_offset) 2579 2585 { 2580 2586 target_phys_addr_t addr, end_addr; … … 2613 2619 } else { 2614 2620 p->phys_offset = phys_offset; 2615 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||2616 (phys_offset & IO_MEM_ROMD))2621 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || 2622 (phys_offset & IO_MEM_ROMD)) 2617 2623 phys_offset += TARGET_PAGE_SIZE; 2618 2624 } … … 2620 2626 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2621 2627 p->phys_offset = phys_offset; 2622 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||2623 (phys_offset & IO_MEM_ROMD))2628 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || 2629 (phys_offset & IO_MEM_ROMD)) 2624 2630 phys_offset += TARGET_PAGE_SIZE; 2625 2631 else { … … 2639 2645 } 2640 2646 } 2647 2641 2648 /* since each CPU stores ram addresses in its TLB cache, we must 2642 2649 reset the modified entries */ … … 2648 2655 2649 2656 /* XXX: temporary until new memory mapping API */ 2650 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)2657 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr) 2651 2658 { 2652 2659 PhysPageDesc *p; … … 2676 2683 { 2677 2684 } 2678 #endif 2679 2685 #endif /* !VBOX */ 2680 2686 2681 2687 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) 2682 2688 { 2683 2689 #ifdef DEBUG_UNASSIGNED 2684 printf("Unassigned mem read 0x%08x\n", (int)addr);2690 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 2685 2691 #endif 2686 2692 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) … … 2715 2721 { 2716 2722 #ifdef DEBUG_UNASSIGNED 2717 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val); 2723 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 2724 #endif 2725 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) 2726 do_unassigned_access(addr, 1, 0, 0, 1); 2718 2727 #endif 2719 2728 } … … 2738 2747 #endif 2739 2748 } 2749 2740 2750 static CPUReadMemoryFunc *unassigned_mem_read[3] = { 2741 2751 unassigned_mem_readb, … … 2750 2760 }; 2751 2761 2752 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)2753 { 2754 unsigned long ram_addr; 2762 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr, 2763 uint32_t val) 2764 { 2755 2765 int dirty_flags; 2756 #if defined(VBOX)2757 ram_addr = addr;2758 #else2759 ram_addr = addr - (unsigned long)phys_ram_base;2760 #endif2761 2766 #ifdef VBOX 2762 2767 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) … … 2777 2782 } 2778 2783 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 2779 remR3PhysWriteU8( addr, val);2780 #else 2781 stb_p( (uint8_t *)(long)addr, val);2784 remR3PhysWriteU8(ram_addr, val); 2785 #else 2786 stb_p(phys_ram_base + ram_addr, val); 2782 2787 #endif 2783 2788 #ifdef USE_KQEMU … … 2794 2799 flushed */ 2795 2800 if (dirty_flags == 0xff) 2796 tlb_set_dirty(cpu_single_env, addr,cpu_single_env->mem_io_vaddr);2797 } 2798 2799 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)2800 { 2801 unsigned long ram_addr; 2801 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); 2802 } 2803 2804 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr, 2805 uint32_t val) 2806 { 2802 2807 int dirty_flags; 2803 #if defined(VBOX)2804 ram_addr = addr;2805 #else2806 ram_addr = addr - (unsigned long)phys_ram_base;2807 #endif2808 2808 #ifdef VBOX 2809 2809 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) … … 2824 2824 } 2825 2825 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 2826 remR3PhysWriteU16( addr, val);2827 #else 2828 stw_p( (uint8_t *)(long)addr, val);2826 remR3PhysWriteU16(ram_addr, val); 2827 #else 2828 stw_p(phys_ram_base + ram_addr, val); 2829 2829 #endif 2830 2830 … … 2842 2842 flushed */ 2843 2843 if (dirty_flags == 0xff) 2844 tlb_set_dirty(cpu_single_env, addr,cpu_single_env->mem_io_vaddr);2845 } 2846 2847 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)2848 { 2849 unsigned long ram_addr; 2844 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); 2845 } 2846 2847 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr, 2848 uint32_t val) 2849 { 2850 2850 int dirty_flags; 2851 #if defined(VBOX)2852 ram_addr = addr;2853 #else2854 ram_addr = addr - (unsigned long)phys_ram_base;2855 #endif2856 2851 #ifdef VBOX 2857 2852 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) … … 2872 2867 } 2873 2868 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 2874 remR3PhysWriteU32( addr, val);2875 #else 2876 stl_p( (uint8_t *)(long)addr, val);2869 remR3PhysWriteU32(ram_addr, val); 2870 #else 2871 stl_p(phys_ram_base + ram_addr, val); 2877 2872 #endif 2878 2873 #ifdef USE_KQEMU … … 2889 2884 flushed */ 2890 2885 if (dirty_flags == 0xff) 2891 tlb_set_dirty(cpu_single_env, addr,cpu_single_env->mem_io_vaddr);2886 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); 2892 2887 } 2893 2888 … … 2903 2898 notdirty_mem_writel, 2904 2899 }; 2905 2906 2900 2907 2901 /* Generate a debug exception if a watchpoint has been hit. */ … … 3184 3178 return io_mem_read[io_index >> IO_MEM_SHIFT]; 3185 3179 } 3180 3186 3181 #endif /* !defined(CONFIG_USER_ONLY) */ 3187 3182 … … 3210 3205 /* FIXME - should this return an error rather than just fail? */ 3211 3206 return; 3212 memcpy(p, buf, l en);3213 unlock_user(p, addr, l en);3207 memcpy(p, buf, l); 3208 unlock_user(p, addr, l); 3214 3209 } else { 3215 3210 if (!(flags & PAGE_READ)) 3216 3211 return; 3212 /* XXX: this code should not depend on lock_user */ 3217 3213 if (!(p = lock_user(VERIFY_READ, addr, l, 1))) 3218 3214 /* FIXME - should this return an error rather than just fail? */ 3219 3215 return; 3220 memcpy(buf, p, l en);3216 memcpy(buf, p, l); 3221 3217 unlock_user(p, addr, 0); 3222 3218 } … … 3508 3504 } else { 3509 3505 #ifndef VBOX 3510 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +3511 (addr & ~TARGET_PAGE_MASK);3506 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3507 ptr = phys_ram_base + addr1; 3512 3508 stl_p(ptr, val); 3513 3509 #else … … 3521 3517 /* set dirty bit */ 3522 3518 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 3523 3519 (0xff & ~CODE_DIRTY_FLAG); 3524 3520 } 3525 3521 } 3526 #endif 3522 #endif /* !VBOX */ 3527 3523 } 3528 3524 } … … 3561 3557 } 3562 3558 } 3563 3564 3559 3565 3560 /* warning: addr must be aligned */ … … 3632 3627 { 3633 3628 int l; 3634 target_ulong page, phys_addr; 3629 target_phys_addr_t phys_addr; 3630 target_ulong page; 3635 3631 3636 3632 while (len > 0) {
Note:
See TracChangeset
for help on using the changeset viewer.