Changeset 13559 in vbox
- Timestamp:
- Oct 24, 2008 2:38:42 PM (16 years ago)
- Location:
- trunk/src/recompiler_new
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler_new/VBoxRecompiler.c
r13504 r13559 265 265 266 266 /* ctx. */ 267 rc = CPUMQueryGuestCtxPtr(pVM, &pVM->rem.s.pCtx); 268 if (VBOX_FAILURE(rc)) 269 { 270 AssertMsgFailed(("Failed to obtain guest ctx pointer. rc=%Vrc\n", rc)); 271 return rc; 272 } 267 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM); 273 268 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n")); 274 269 … … 3179 3174 { 3180 3175 uint64_t val; 3176 STAM_PROFILE_ADV_START(&gStatMemRead, a); 3177 VBOX_CHECK_ADDR(SrcGCPhys); 3178 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys); 3179 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3180 return val; 3181 } 3182 3183 /** 3184 * Read guest RAM and ROM, signed 64-bit. 3185 * 3186 * @param SrcGCPhys The source address (guest physical). 3187 */ 3188 int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys) 3189 { 3190 int64_t val; 3181 3191 STAM_PROFILE_ADV_START(&gStatMemRead, a); 3182 3192 VBOX_CHECK_ADDR(SrcGCPhys); -
trunk/src/recompiler_new/cpu-all.h
r13382 r13559 1190 1190 1191 1191 /* physical memory access */ 1192 #define TLB_INVALID_MASK (1 << 3) 1193 #define IO_MEM_SHIFT 4 1192 1193 /* MMIO pages are identified by a combination of an IO device index and 1194 3 flags. The ROMD code stores the page ram offset in iotlb entry, 1195 so only a limited number of ids are avaiable. */ 1196 1197 #define IO_MEM_SHIFT 3 1194 1198 #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT)) 1195 1199 … … 1201 1205 #define IO_MEM_RAM_MISSING (5 << IO_MEM_SHIFT) /* used internally, never use directly */ 1202 1206 #endif 1203 /* acts like a ROM when read and like a device when written. As an 1204 exception, the write memory callback gets the ram offset instead of 1205 the physical address */ 1207 1208 /* Acts like a ROM when read and like a device when written. */ 1206 1209 #define IO_MEM_ROMD (1) 1207 1210 #define IO_MEM_SUBPAGE (2) -
trunk/src/recompiler_new/cpu-defs.h
r13370 r13559 137 137 target_phys_addr_t addend; 138 138 #endif 139 #ifndef VBOX140 139 /* padding to get a power of two size */ 141 140 uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - … … 143 142 ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) + 144 143 sizeof(target_phys_addr_t))]; 145 #endif146 144 } CPUTLBEntry; 147 145 -
trunk/src/recompiler_new/exec.c
r13504 r13559 2072 2072 #endif 2073 2073 if (!cpu_physical_memory_is_dirty(ram_addr)) { 2074 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;2074 tlb_entry->addr_write |= TLB_NOTDIRTY; 2075 2075 } 2076 2076 } … … 2237 2237 return ret; 2238 2238 } 2239 2239 #if 0 2240 2240 /* called from signal handler: invalidate the code and unprotect the 2241 2241 page. Return TRUE if the fault was succesfully handled. */ … … 2285 2285 #endif 2286 2286 } 2287 #endif /* 0 */ 2287 2288 2288 2289 #else … … 2388 2389 } 2389 2390 2391 int page_check_range(target_ulong start, target_ulong len, int flags) 2392 { 2393 PageDesc *p; 2394 target_ulong end; 2395 target_ulong addr; 2396 2397 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */ 2398 start = start & TARGET_PAGE_MASK; 2399 2400 if( end < start ) 2401 /* we've wrapped around */ 2402 return -1; 2403 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { 2404 p = page_find(addr >> TARGET_PAGE_BITS); 2405 if( !p ) 2406 return -1; 2407 if( !(p->flags & PAGE_VALID) ) 2408 return -1; 2409 2410 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) 2411 return -1; 2412 if (flags & PAGE_WRITE) { 2413 if (!(p->flags & PAGE_WRITE_ORG)) 2414 return -1; 2415 /* unprotect the page if it was put read-only because it 2416 contains translated code */ 2417 if (!(p->flags & PAGE_WRITE)) { 2418 if (!page_unprotect(addr, 0, NULL)) 2419 return -1; 2420 } 2421 return 0; 2422 } 2423 } 2424 return 0; 2425 } 2426 2390 2427 /* called from signal handler: invalidate the code and unprotect the 2391 2428 page. Return TRUE if the fault was succesfully handled. */ … … 2396 2433 target_ulong host_start, host_end, addr; 2397 2434 2435 /* Technically this isn't safe inside a signal handler. However we 2436 know this only ever happens in a synchronous SEGV handler, so in 2437 practice it seems to be ok. */ 2438 mmap_lock(); 2439 2398 2440 host_start = address & qemu_host_page_mask; 2399 2441 page_index = host_start >> TARGET_PAGE_BITS; 2400 2442 p1 = page_find(page_index); 2401 if (!p1) 2443 if (!p1) { 2444 mmap_unlock(); 2402 2445 return 0; 2446 } 2403 2447 host_end = host_start + qemu_host_page_size; 2404 2448 p = p1; … … 2422 2466 tb_invalidate_check(address); 2423 2467 #endif 2468 mmap_unlock(); 2424 2469 return 1; 2425 2470 } 2426 2471 } 2472 mmap_unlock(); 2427 2473 return 0; 2428 }2429 2430 /* call this function when system calls directly modify a memory area */2431 /* ??? This should be redundant now we have lock_user. */2432 void page_unprotect_range(target_ulong data, target_ulong data_size)2433 {2434 target_ulong start, end, addr;2435 2436 start = data;2437 end = start + data_size;2438 start &= TARGET_PAGE_MASK;2439 end = TARGET_PAGE_ALIGN(end);2440 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {2441 page_unprotect(addr, 0, NULL);2442 }2443 2474 } 2444 2475 … … 2448 2479 } 2449 2480 #endif /* defined(CONFIG_USER_ONLY) */ 2481 2482 #if !defined(CONFIG_USER_ONLY) 2483 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 2484 ram_addr_t memory); 2485 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 2486 ram_addr_t orig_memory); 2487 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \ 2488 need_subpage) \ 2489 do { \ 2490 if (addr > start_addr) \ 2491 start_addr2 = 0; \ 2492 else { \ 2493 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \ 2494 if (start_addr2 > 0) \ 2495 need_subpage = 1; \ 2496 } \ 2497 \ 2498 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \ 2499 end_addr2 = TARGET_PAGE_SIZE - 1; \ 2500 else { \ 2501 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \ 2502 if (end_addr2 < TARGET_PAGE_SIZE - 1) \ 2503 need_subpage = 1; \ 2504 } \ 2505 } while (0) 2506 2450 2507 2451 2508 /* register physical memory. 'size' must be a multiple of the target … … 2459 2516 PhysPageDesc *p; 2460 2517 CPUState *env; 2461 2518 ram_addr_t orig_size = size; 2519 void *subpage; 2520 2521 #ifdef USE_KQEMU 2522 /* XXX: should not depend on cpu context */ 2523 env = first_cpu; 2524 if (env->kqemu_enabled) { 2525 kqemu_set_phys_mem(start_addr, size, phys_offset); 2526 } 2527 #endif 2462 2528 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; 2463 end_addr = start_addr + size;2529 end_addr = start_addr + (target_phys_addr_t)size; 2464 2530 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { 2465 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2466 p->phys_offset = phys_offset; 2531 p = phys_page_find(addr >> TARGET_PAGE_BITS); 2532 if (p && p->phys_offset != IO_MEM_UNASSIGNED) { 2533 ram_addr_t orig_memory = p->phys_offset; 2534 target_phys_addr_t start_addr2, end_addr2; 2535 int need_subpage = 0; 2536 2537 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, 2538 need_subpage); 2539 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { 2540 if (!(orig_memory & IO_MEM_SUBPAGE)) { 2541 subpage = subpage_init((addr & TARGET_PAGE_MASK), 2542 &p->phys_offset, orig_memory); 2543 } else { 2544 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK) 2545 >> IO_MEM_SHIFT]; 2546 } 2547 subpage_register(subpage, start_addr2, end_addr2, phys_offset); 2548 } else { 2549 p->phys_offset = phys_offset; 2467 2550 #if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE) 2468 2551 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || … … 2473 2556 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING) 2474 2557 #endif 2475 2476 phys_offset += TARGET_PAGE_SIZE; 2477 } 2478 2558 phys_offset += TARGET_PAGE_SIZE; 2559 } 2560 } else { 2561 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2562 p->phys_offset = phys_offset; 2563 #if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE) 2564 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || 2565 (phys_offset & IO_MEM_ROMD)) 2566 #else 2567 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM 2568 || (phys_offset & IO_MEM_ROMD) 2569 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING) 2570 #endif 2571 phys_offset += TARGET_PAGE_SIZE; 2572 else { 2573 target_phys_addr_t start_addr2, end_addr2; 2574 int need_subpage = 0; 2575 2576 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, 2577 end_addr2, need_subpage); 2578 2579 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { 2580 subpage = subpage_init((addr & TARGET_PAGE_MASK), 2581 &p->phys_offset, IO_MEM_UNASSIGNED); 2582 subpage_register(subpage, start_addr2, end_addr2, 2583 phys_offset); 2584 } 2585 } 2586 } 2587 } 2479 2588 /* since each CPU stores ram addresses in its TLB cache, we must 2480 2589 reset the modified entries */ … … 2496 2605 } 2497 2606 2607 #ifndef VBOX 2608 /* XXX: better than nothing */ 2609 ram_addr_t qemu_ram_alloc(ram_addr_t size) 2610 { 2611 ram_addr_t addr; 2612 if ((phys_ram_alloc_offset + size) > phys_ram_size) { 2613 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n", 2614 (uint64_t)size, (uint64_t)phys_ram_size); 2615 abort(); 2616 } 2617 addr = phys_ram_alloc_offset; 2618 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size); 2619 return addr; 2620 } 2621 2622 void qemu_ram_free(ram_addr_t addr) 2623 { 2624 } 2625 #endif 2626 2627 2498 2628 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) 2499 2629 { … … 2501 2631 printf("Unassigned mem read 0x%08x\n", (int)addr); 2502 2632 #endif 2633 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) 2634 do_unassigned_access(addr, 0, 0, 0, 1); 2635 #endif 2636 return 0; 2637 } 2638 2639 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr) 2640 { 2641 #ifdef DEBUG_UNASSIGNED 2642 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 2643 #endif 2644 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) 2645 do_unassigned_access(addr, 0, 0, 0, 2); 2646 #endif 2647 return 0; 2648 } 2649 2650 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr) 2651 { 2652 #ifdef DEBUG_UNASSIGNED 2653 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 2654 #endif 2655 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) 2656 do_unassigned_access(addr, 0, 0, 0, 4); 2657 #endif 2503 2658 return 0; 2504 2659 } … … 2511 2666 } 2512 2667 2668 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) 2669 { 2670 #ifdef DEBUG_UNASSIGNED 2671 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 2672 #endif 2673 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) 2674 do_unassigned_access(addr, 1, 0, 0, 2); 2675 #endif 2676 } 2677 2678 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) 2679 { 2680 #ifdef DEBUG_UNASSIGNED 2681 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 2682 #endif 2683 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) 2684 do_unassigned_access(addr, 1, 0, 0, 4); 2685 #endif 2686 } 2513 2687 static CPUReadMemoryFunc *unassigned_mem_read[3] = { 2514 2688 unassigned_mem_readb, 2515 unassigned_mem_read b,2516 unassigned_mem_read b,2689 unassigned_mem_readw, 2690 unassigned_mem_readl, 2517 2691 }; 2518 2692 2519 2693 static CPUWriteMemoryFunc *unassigned_mem_write[3] = { 2520 2694 unassigned_mem_writeb, 2521 unassigned_mem_write b,2522 unassigned_mem_write b,2695 unassigned_mem_writew, 2696 unassigned_mem_writel, 2523 2697 }; 2524 2698 … … 2670 2844 }; 2671 2845 2846 2847 /* Generate a debug exception if a watchpoint has been hit. */ 2848 static void check_watchpoint(int offset, int flags) 2849 { 2850 CPUState *env = cpu_single_env; 2851 target_ulong vaddr; 2852 int i; 2853 2854 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; 2855 for (i = 0; i < env->nb_watchpoints; i++) { 2856 if (vaddr == env->watchpoint[i].vaddr 2857 && (env->watchpoint[i].type & flags)) { 2858 env->watchpoint_hit = i + 1; 2859 cpu_interrupt(env, CPU_INTERRUPT_DEBUG); 2860 break; 2861 } 2862 } 2863 } 2864 2865 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, 2866 so these check for a hit then pass through to the normal out-of-line 2867 phys routines. */ 2868 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr) 2869 { 2870 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ); 2871 return ldub_phys(addr); 2872 } 2873 2874 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr) 2875 { 2876 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ); 2877 return lduw_phys(addr); 2878 } 2879 2880 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr) 2881 { 2882 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ); 2883 return ldl_phys(addr); 2884 } 2885 2886 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr, 2887 uint32_t val) 2888 { 2889 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE); 2890 stb_phys(addr, val); 2891 } 2892 2893 static void watch_mem_writew(void *opaque, target_phys_addr_t addr, 2894 uint32_t val) 2895 { 2896 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE); 2897 stw_phys(addr, val); 2898 } 2899 2900 static void watch_mem_writel(void *opaque, target_phys_addr_t addr, 2901 uint32_t val) 2902 { 2903 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE); 2904 stl_phys(addr, val); 2905 } 2906 2907 static CPUReadMemoryFunc *watch_mem_read[3] = { 2908 watch_mem_readb, 2909 watch_mem_readw, 2910 watch_mem_readl, 2911 }; 2912 2913 static CPUWriteMemoryFunc *watch_mem_write[3] = { 2914 watch_mem_writeb, 2915 watch_mem_writew, 2916 watch_mem_writel, 2917 }; 2918 2919 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr, 2920 unsigned int len) 2921 { 2922 uint32_t ret; 2923 unsigned int idx; 2924 2925 idx = SUBPAGE_IDX(addr - mmio->base); 2926 #if defined(DEBUG_SUBPAGE) 2927 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__, 2928 mmio, len, addr, idx); 2929 #endif 2930 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr); 2931 2932 return ret; 2933 } 2934 2935 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr, 2936 uint32_t value, unsigned int len) 2937 { 2938 unsigned int idx; 2939 2940 idx = SUBPAGE_IDX(addr - mmio->base); 2941 #if defined(DEBUG_SUBPAGE) 2942 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__, 2943 mmio, len, addr, idx, value); 2944 #endif 2945 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value); 2946 } 2947 2948 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr) 2949 { 2950 #if defined(DEBUG_SUBPAGE) 2951 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr); 2952 #endif 2953 2954 return subpage_readlen(opaque, addr, 0); 2955 } 2956 2957 static void subpage_writeb (void *opaque, target_phys_addr_t addr, 2958 uint32_t value) 2959 { 2960 #if defined(DEBUG_SUBPAGE) 2961 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value); 2962 #endif 2963 subpage_writelen(opaque, addr, value, 0); 2964 } 2965 2966 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr) 2967 { 2968 #if defined(DEBUG_SUBPAGE) 2969 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr); 2970 #endif 2971 2972 return subpage_readlen(opaque, addr, 1); 2973 } 2974 2975 static void subpage_writew (void *opaque, target_phys_addr_t addr, 2976 uint32_t value) 2977 { 2978 #if defined(DEBUG_SUBPAGE) 2979 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value); 2980 #endif 2981 subpage_writelen(opaque, addr, value, 1); 2982 } 2983 2984 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr) 2985 { 2986 #if defined(DEBUG_SUBPAGE) 2987 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr); 2988 #endif 2989 2990 return subpage_readlen(opaque, addr, 2); 2991 } 2992 2993 static void subpage_writel (void *opaque, 2994 target_phys_addr_t addr, uint32_t value) 2995 { 2996 #if defined(DEBUG_SUBPAGE) 2997 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value); 2998 #endif 2999 subpage_writelen(opaque, addr, value, 2); 3000 } 3001 3002 static CPUReadMemoryFunc *subpage_read[] = { 3003 &subpage_readb, 3004 &subpage_readw, 3005 &subpage_readl, 3006 }; 3007 3008 static CPUWriteMemoryFunc *subpage_write[] = { 3009 &subpage_writeb, 3010 &subpage_writew, 3011 &subpage_writel, 3012 }; 3013 3014 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 3015 ram_addr_t memory) 3016 { 3017 int idx, eidx; 3018 unsigned int i; 3019 3020 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) 3021 return -1; 3022 idx = SUBPAGE_IDX(start); 3023 eidx = SUBPAGE_IDX(end); 3024 #if defined(DEBUG_SUBPAGE) 3025 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__, 3026 mmio, start, end, idx, eidx, memory); 3027 #endif 3028 memory >>= IO_MEM_SHIFT; 3029 for (; idx <= eidx; idx++) { 3030 for (i = 0; i < 4; i++) { 3031 if (io_mem_read[memory][i]) { 3032 mmio->mem_read[idx][i] = &io_mem_read[memory][i]; 3033 mmio->opaque[idx][0][i] = io_mem_opaque[memory]; 3034 } 3035 if (io_mem_write[memory][i]) { 3036 mmio->mem_write[idx][i] = &io_mem_write[memory][i]; 3037 mmio->opaque[idx][1][i] = io_mem_opaque[memory]; 3038 } 3039 } 3040 } 3041 3042 return 0; 3043 } 3044 3045 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 3046 ram_addr_t orig_memory) 3047 { 3048 subpage_t *mmio; 3049 int subpage_memory; 3050 3051 mmio = qemu_mallocz(sizeof(subpage_t)); 3052 if (mmio != NULL) { 3053 mmio->base = base; 3054 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio); 3055 #if defined(DEBUG_SUBPAGE) 3056 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, 3057 mmio, base, TARGET_PAGE_SIZE, subpage_memory); 3058 #endif 3059 *phys = subpage_memory | IO_MEM_SUBPAGE; 3060 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory); 3061 } 3062 3063 return mmio; 3064 } 3065 2672 3066 static void io_mem_init(void) 2673 3067 { … … 2681 3075 io_mem_nb = 5; 2682 3076 #endif 2683 3077 3078 io_mem_watch = cpu_register_io_memory(0, watch_mem_read, 3079 watch_mem_write, NULL); 3080 2684 3081 #ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */ 2685 3082 /* alloc dirty bits array */ … … 2691 3088 /* mem_read and mem_write are arrays of functions containing the 2692 3089 function to access byte (index 0), word (index 1) and dword (index 2693 2). All functions must be supplied. If io_index is non zero, the 2694 corresponding io zone is modified. If it is zero, a new io zone is 2695 allocated. The return value can be used with 2696 cpu_register_physical_memory(). (-1) is returned if error. */ 3090 2). Functions can be omitted with a NULL function pointer. The 3091 registered functions may be modified dynamically later. 3092 If io_index is non zero, the corresponding io zone is 3093 modified. If it is zero, a new io zone is allocated. The return 3094 value can be used with cpu_register_physical_memory(). (-1) is 3095 returned if error. */ 2697 3096 int cpu_register_io_memory(int io_index, 2698 3097 CPUReadMemoryFunc **mem_read, … … 2700 3099 void *opaque) 2701 3100 { 2702 int i ;3101 int i, subwidth = 0; 2703 3102 2704 3103 if (io_index <= 0) { … … 2712 3111 2713 3112 for(i = 0;i < 3; i++) { 3113 if (!mem_read[i] || !mem_write[i]) 3114 subwidth = IO_MEM_SUBWIDTH; 2714 3115 io_mem_read[io_index][i] = mem_read[i]; 2715 3116 io_mem_write[io_index][i] = mem_write[i]; 2716 3117 } 2717 3118 io_mem_opaque[io_index] = opaque; 2718 return io_index << IO_MEM_SHIFT;3119 return (io_index << IO_MEM_SHIFT) | subwidth; 2719 3120 } 2720 3121 … … 2728 3129 return io_mem_read[io_index >> IO_MEM_SHIFT]; 2729 3130 } 3131 #endif /* !defined(CONFIG_USER_ONLY) */ 2730 3132 2731 3133 /* physical memory access (slow version, mainly for debug) */ … … 2749 3151 if (!(flags & PAGE_WRITE)) 2750 3152 return; 2751 p = lock_user(addr, len, 0); 3153 /* XXX: this code should not depend on lock_user */ 3154 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) 3155 /* FIXME - should this return an error rather than just fail? */ 3156 return; 2752 3157 memcpy(p, buf, len); 2753 3158 unlock_user(p, addr, len); … … 2755 3160 if (!(flags & PAGE_READ)) 2756 3161 return; 2757 p = lock_user(addr, len, 1); 3162 if (!(p = lock_user(VERIFY_READ, addr, l, 1))) 3163 /* FIXME - should this return an error rather than just fail? */ 3164 return; 2758 3165 memcpy(buf, p, len); 2759 3166 unlock_user(p, addr, 0); … … 3052 3459 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr); 3053 3460 #endif 3054 } 3055 } 3461 #ifndef VBOX 3462 if (unlikely(in_migration)) { 3463 if (!cpu_physical_memory_is_dirty(addr1)) { 3464 /* invalidate code */ 3465 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); 3466 /* set dirty bit */ 3467 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 3468 (0xff & ~CODE_DIRTY_FLAG); 3469 } 3470 } 3471 #endif 3472 } 3473 } 3474 3475 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val) 3476 { 3477 int io_index; 3478 uint8_t *ptr; 3479 unsigned long pd; 3480 PhysPageDesc *p; 3481 3482 p = phys_page_find(addr >> TARGET_PAGE_BITS); 3483 if (!p) { 3484 pd = IO_MEM_UNASSIGNED; 3485 } else { 3486 pd = p->phys_offset; 3487 } 3488 3489 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 3490 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3491 #ifdef TARGET_WORDS_BIGENDIAN 3492 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32); 3493 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val); 3494 #else 3495 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 3496 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32); 3497 #endif 3498 } else { 3499 #ifndef VBOX 3500 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 3501 (addr & ~TARGET_PAGE_MASK); 3502 stq_p(ptr, val); 3503 #else 3504 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr); 3505 #endif 3506 } 3507 } 3508 3056 3509 3057 3510 /* warning: addr must be aligned */ … … 3143 3596 } 3144 3597 return 0; 3598 } 3599 3600 /* in deterministic execution mode, instructions doing device I/Os 3601 must be at the end of the TB */ 3602 void cpu_io_recompile(CPUState *env, void *retaddr) 3603 { 3604 TranslationBlock *tb; 3605 uint32_t n, cflags; 3606 target_ulong pc, cs_base; 3607 uint64_t flags; 3608 3609 tb = tb_find_pc((unsigned long)retaddr); 3610 if (!tb) { 3611 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 3612 retaddr); 3613 } 3614 n = env->icount_decr.u16.low + tb->icount; 3615 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL); 3616 /* Calculate how many instructions had been executed before the fault 3617 occurred. */ 3618 n = n - env->icount_decr.u16.low; 3619 /* Generate a new TB ending on the I/O insn. */ 3620 n++; 3621 /* On MIPS and SH, delay slot instructions can only be restarted if 3622 they were already the first instruction in the TB. If this is not 3623 the first instruction in a TB then re-execute the preceding 3624 branch. */ 3625 #if defined(TARGET_MIPS) 3626 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { 3627 env->active_tc.PC -= 4; 3628 env->icount_decr.u16.low++; 3629 env->hflags &= ~MIPS_HFLAG_BMASK; 3630 } 3631 #elif defined(TARGET_SH4) 3632 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 3633 && n > 1) { 3634 env->pc -= 2; 3635 env->icount_decr.u16.low++; 3636 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); 3637 } 3638 #endif 3639 /* This should never happen. */ 3640 if (n > CF_COUNT_MASK) 3641 cpu_abort(env, "TB too big during recompile"); 3642 3643 cflags = n | CF_LAST_IO; 3644 pc = tb->pc; 3645 cs_base = tb->cs_base; 3646 flags = tb->flags; 3647 tb_phys_invalidate(tb, -1); 3648 /* FIXME: In theory this could raise an exception. In practice 3649 we have already translated the block once so it's probably ok. */ 3650 tb_gen_code(env, pc, cs_base, flags, cflags); 3651 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not 3652 the first in the TB) then we end up generating a whole new TB and 3653 repeating the fault, which is horribly inefficient. 3654 Better would be to execute just this insn uncached, or generate a 3655 second new TB. */ 3656 cpu_resume_from_signal(env, NULL); 3145 3657 } 3146 3658 … … 3172 3684 } 3173 3685 /* XXX: avoid using doubles ? */ 3174 cpu_fprintf(f, "TB count %d\n", nb_tbs); 3686 cpu_fprintf(f, "Translation buffer state:\n"); 3687 cpu_fprintf(f, "gen code size %ld/%ld\n", 3688 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size); 3689 cpu_fprintf(f, "TB count %d/%d\n", 3690 nb_tbs, code_gen_max_blocks); 3175 3691 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", 3176 3692 nb_tbs ? target_code_size / nb_tbs : 0, … … 3187 3703 direct_jmp2_count, 3188 3704 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0); 3705 cpu_fprintf(f, "\nStatistics:\n"); 3189 3706 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count); 3190 3707 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count); 3191 3708 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); 3709 tcg_dump_info(f, cpu_fprintf); 3192 3710 } 3193 3711 #endif /* !VBOX */ -
trunk/src/recompiler_new/target-i386/translate.c
r13539 r13559 827 827 static void gen_check_external_event() 828 828 { 829 /** @todo: this code is either wrong, or low performing, 830 rewrite flags check in TCG IR */ 831 //tcg_gen_helper_0_0(helper_check_external_event); 829 int skip_label; 830 831 skip_label = gen_new_label(); 832 tcg_gen_ld32u_tl(cpu_tmp0, cpu_env, offsetof(CPUState, interrupt_request)); 833 /* Keep in sync with helper_check_external_event() */ 834 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 835 CPU_INTERRUPT_EXTERNAL_EXIT 836 | CPU_INTERRUPT_EXTERNAL_TIMER 837 | CPU_INTERRUPT_EXTERNAL_DMA 838 | CPU_INTERRUPT_EXTERNAL_HARD); 839 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, skip_label); 840 841 tcg_gen_helper_0_0(helper_check_external_event); 842 843 gen_set_label(skip_label); 832 844 } 833 845
Note:
See TracChangeset
for help on using the changeset viewer.