VirtualBox

Changeset 13559 in vbox


Ignore:
Timestamp:
Oct 24, 2008 2:38:42 PM (16 years ago)
Author:
vboxsync
Message:

made TCG generate VBOX-aware phys mem access code, some more QEMU code merged

Location:
trunk/src/recompiler_new
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/recompiler_new/VBoxRecompiler.c

    r13504 r13559  
    265265
    266266    /* ctx. */
    267     rc = CPUMQueryGuestCtxPtr(pVM, &pVM->rem.s.pCtx);
    268     if (VBOX_FAILURE(rc))
    269     {
    270         AssertMsgFailed(("Failed to obtain guest ctx pointer. rc=%Vrc\n", rc));
    271         return rc;
    272     }
     267    pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
    273268    AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
    274269
     
    31793174{
    31803175    uint64_t val;
     3176    STAM_PROFILE_ADV_START(&gStatMemRead, a);
     3177    VBOX_CHECK_ADDR(SrcGCPhys);
     3178    val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
     3179    STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
     3180    return val;
     3181}
     3182
     3183/**
     3184 * Read guest RAM and ROM, signed 64-bit.
     3185 *
     3186 * @param   SrcGCPhys       The source address (guest physical).
     3187 */
     3188int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
     3189{
     3190    int64_t val;
    31813191    STAM_PROFILE_ADV_START(&gStatMemRead, a);
    31823192    VBOX_CHECK_ADDR(SrcGCPhys);
  • trunk/src/recompiler_new/cpu-all.h

    r13382 r13559  
    11901190
    11911191/* physical memory access */
    1192 #define TLB_INVALID_MASK   (1 << 3)
    1193 #define IO_MEM_SHIFT       4
     1192
     1193/* MMIO pages are identified by a combination of an IO device index and
     1194   3 flags.  The ROMD code stores the page ram offset in iotlb entry,
     1195   so only a limited number of ids are avaiable.  */
     1196
     1197#define IO_MEM_SHIFT       3
    11941198#define IO_MEM_NB_ENTRIES  (1 << (TARGET_PAGE_BITS  - IO_MEM_SHIFT))
    11951199
     
    12011205#define IO_MEM_RAM_MISSING (5 << IO_MEM_SHIFT) /* used internally, never use directly */
    12021206#endif
    1203 /* acts like a ROM when read and like a device when written. As an
    1204    exception, the write memory callback gets the ram offset instead of
    1205    the physical address */
     1207
     1208/* Acts like a ROM when read and like a device when written.  */
    12061209#define IO_MEM_ROMD        (1)
    12071210#define IO_MEM_SUBPAGE     (2)
  • trunk/src/recompiler_new/cpu-defs.h

    r13370 r13559  
    137137    target_phys_addr_t addend;
    138138#endif
    139 #ifndef VBOX
    140139    /* padding to get a power of two size */
    141140    uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
     
    143142                   ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) +
    144143                   sizeof(target_phys_addr_t))];
    145 #endif
    146144} CPUTLBEntry;
    147145
  • trunk/src/recompiler_new/exec.c

    r13504 r13559  
    20722072#endif
    20732073        if (!cpu_physical_memory_is_dirty(ram_addr)) {
    2074             tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
     2074            tlb_entry->addr_write |= TLB_NOTDIRTY;
    20752075        }
    20762076    }
     
    22372237    return ret;
    22382238}
    2239 
     2239#if 0
    22402240/* called from signal handler: invalidate the code and unprotect the
    22412241   page. Return TRUE if the fault was succesfully handled. */
     
    22852285#endif
    22862286}
     2287#endif /* 0 */
    22872288
    22882289#else
     
    23882389}
    23892390
     2391int page_check_range(target_ulong start, target_ulong len, int flags)
     2392{
     2393    PageDesc *p;
     2394    target_ulong end;
     2395    target_ulong addr;
     2396
     2397    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
     2398    start = start & TARGET_PAGE_MASK;
     2399
     2400    if( end < start )
     2401        /* we've wrapped around */
     2402        return -1;
     2403    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
     2404        p = page_find(addr >> TARGET_PAGE_BITS);
     2405        if( !p )
     2406            return -1;
     2407        if( !(p->flags & PAGE_VALID) )
     2408            return -1;
     2409
     2410        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
     2411            return -1;
     2412        if (flags & PAGE_WRITE) {
     2413            if (!(p->flags & PAGE_WRITE_ORG))
     2414                return -1;
     2415            /* unprotect the page if it was put read-only because it
     2416               contains translated code */
     2417            if (!(p->flags & PAGE_WRITE)) {
     2418                if (!page_unprotect(addr, 0, NULL))
     2419                    return -1;
     2420            }
     2421            return 0;
     2422        }
     2423    }
     2424    return 0;
     2425}
     2426
    23902427/* called from signal handler: invalidate the code and unprotect the
    23912428   page. Return TRUE if the fault was succesfully handled. */
     
    23962433    target_ulong host_start, host_end, addr;
    23972434
     2435    /* Technically this isn't safe inside a signal handler.  However we
     2436       know this only ever happens in a synchronous SEGV handler, so in
     2437       practice it seems to be ok.  */
     2438    mmap_lock();
     2439
    23982440    host_start = address & qemu_host_page_mask;
    23992441    page_index = host_start >> TARGET_PAGE_BITS;
    24002442    p1 = page_find(page_index);
    2401     if (!p1)
     2443    if (!p1) {
     2444        mmap_unlock();
    24022445        return 0;
     2446    }
    24032447    host_end = host_start + qemu_host_page_size;
    24042448    p = p1;
     
    24222466            tb_invalidate_check(address);
    24232467#endif
     2468            mmap_unlock();
    24242469            return 1;
    24252470        }
    24262471    }
     2472    mmap_unlock();
    24272473    return 0;
    2428 }
    2429 
    2430 /* call this function when system calls directly modify a memory area */
    2431 /* ??? This should be redundant now we have lock_user.  */
    2432 void page_unprotect_range(target_ulong data, target_ulong data_size)
    2433 {
    2434     target_ulong start, end, addr;
    2435 
    2436     start = data;
    2437     end = start + data_size;
    2438     start &= TARGET_PAGE_MASK;
    2439     end = TARGET_PAGE_ALIGN(end);
    2440     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
    2441         page_unprotect(addr, 0, NULL);
    2442     }
    24432474}
    24442475
     
    24482479}
    24492480#endif /* defined(CONFIG_USER_ONLY) */
     2481
     2482#if !defined(CONFIG_USER_ONLY)
     2483static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
     2484                             ram_addr_t memory);
     2485static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
     2486                           ram_addr_t orig_memory);
     2487#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
     2488                      need_subpage)                                     \
     2489    do {                                                                \
     2490        if (addr > start_addr)                                          \
     2491            start_addr2 = 0;                                            \
     2492        else {                                                          \
     2493            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
     2494            if (start_addr2 > 0)                                        \
     2495                need_subpage = 1;                                       \
     2496        }                                                               \
     2497                                                                        \
     2498        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
     2499            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
     2500        else {                                                          \
     2501            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
     2502            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
     2503                need_subpage = 1;                                       \
     2504        }                                                               \
     2505    } while (0)
     2506
    24502507
    24512508/* register physical memory. 'size' must be a multiple of the target
     
    24592516    PhysPageDesc *p;
    24602517    CPUState *env;
    2461 
     2518    ram_addr_t orig_size = size;
     2519    void *subpage;
     2520
     2521#ifdef USE_KQEMU
     2522    /* XXX: should not depend on cpu context */
     2523    env = first_cpu;
     2524    if (env->kqemu_enabled) {
     2525        kqemu_set_phys_mem(start_addr, size, phys_offset);
     2526    }
     2527#endif
    24622528    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
    2463     end_addr = start_addr + size;
     2529    end_addr = start_addr + (target_phys_addr_t)size;
    24642530    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
    2465         p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
    2466         p->phys_offset = phys_offset;
     2531        p = phys_page_find(addr >> TARGET_PAGE_BITS);
     2532        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
     2533            ram_addr_t orig_memory = p->phys_offset;
     2534            target_phys_addr_t start_addr2, end_addr2;
     2535            int need_subpage = 0;
     2536
     2537            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
     2538                          need_subpage);
     2539            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
     2540                if (!(orig_memory & IO_MEM_SUBPAGE)) {
     2541                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
     2542                                           &p->phys_offset, orig_memory);
     2543                } else {
     2544                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
     2545                                            >> IO_MEM_SHIFT];
     2546                }
     2547                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
     2548            } else {
     2549                p->phys_offset = phys_offset;
    24672550#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
    24682551        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
     
    24732556            || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
    24742557#endif
    2475 
    2476             phys_offset += TARGET_PAGE_SIZE;
    2477     }
    2478 
     2558                    phys_offset += TARGET_PAGE_SIZE;
     2559            }
     2560        } else {
     2561            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
     2562            p->phys_offset = phys_offset;
     2563#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
     2564        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
     2565            (phys_offset & IO_MEM_ROMD))
     2566#else
     2567        if (   (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
     2568            || (phys_offset & IO_MEM_ROMD)
     2569            || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
     2570#endif
     2571                phys_offset += TARGET_PAGE_SIZE;
     2572            else {
     2573                target_phys_addr_t start_addr2, end_addr2;
     2574                int need_subpage = 0;
     2575
     2576                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
     2577                              end_addr2, need_subpage);
     2578
     2579                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
     2580                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
     2581                                           &p->phys_offset, IO_MEM_UNASSIGNED);
     2582                    subpage_register(subpage, start_addr2, end_addr2,
     2583                                     phys_offset);
     2584                }
     2585            }
     2586        }
     2587    }
    24792588    /* since each CPU stores ram addresses in its TLB cache, we must
    24802589       reset the modified entries */
     
    24962605}
    24972606
     2607#ifndef VBOX
     2608/* XXX: better than nothing */
     2609ram_addr_t qemu_ram_alloc(ram_addr_t size)
     2610{
     2611    ram_addr_t addr;
     2612    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
     2613        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
     2614                (uint64_t)size, (uint64_t)phys_ram_size);
     2615        abort();
     2616    }
     2617    addr = phys_ram_alloc_offset;
     2618    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
     2619    return addr;
     2620}
     2621
     2622void qemu_ram_free(ram_addr_t addr)
     2623{
     2624}
     2625#endif
     2626
     2627
    24982628static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
    24992629{
     
    25012631    printf("Unassigned mem read  0x%08x\n", (int)addr);
    25022632#endif
     2633#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
     2634    do_unassigned_access(addr, 0, 0, 0, 1);
     2635#endif
     2636    return 0;
     2637}
     2638
     2639static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
     2640{
     2641#ifdef DEBUG_UNASSIGNED
     2642    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
     2643#endif
     2644#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
     2645    do_unassigned_access(addr, 0, 0, 0, 2);
     2646#endif
     2647    return 0;
     2648}
     2649
     2650static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
     2651{
     2652#ifdef DEBUG_UNASSIGNED
     2653    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
     2654#endif
     2655#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
     2656    do_unassigned_access(addr, 0, 0, 0, 4);
     2657#endif
    25032658    return 0;
    25042659}
     
    25112666}
    25122667
     2668static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
     2669{
     2670#ifdef DEBUG_UNASSIGNED
     2671    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
     2672#endif
     2673#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
     2674    do_unassigned_access(addr, 1, 0, 0, 2);
     2675#endif
     2676}
     2677
     2678static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
     2679{
     2680#ifdef DEBUG_UNASSIGNED
     2681    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
     2682#endif
     2683#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
     2684    do_unassigned_access(addr, 1, 0, 0, 4);
     2685#endif
     2686}
    25132687static CPUReadMemoryFunc *unassigned_mem_read[3] = {
    25142688    unassigned_mem_readb,
    2515     unassigned_mem_readb,
    2516     unassigned_mem_readb,
     2689    unassigned_mem_readw,
     2690    unassigned_mem_readl,
    25172691};
    25182692
    25192693static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
    25202694    unassigned_mem_writeb,
    2521     unassigned_mem_writeb,
    2522     unassigned_mem_writeb,
     2695    unassigned_mem_writew,
     2696    unassigned_mem_writel,
    25232697};
    25242698
     
    26702844};
    26712845
     2846
     2847/* Generate a debug exception if a watchpoint has been hit.  */
     2848static void check_watchpoint(int offset, int flags)
     2849{
     2850    CPUState *env = cpu_single_env;
     2851    target_ulong vaddr;
     2852    int i;
     2853
     2854    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
     2855    for (i = 0; i < env->nb_watchpoints; i++) {
     2856        if (vaddr == env->watchpoint[i].vaddr
     2857                && (env->watchpoint[i].type & flags)) {
     2858            env->watchpoint_hit = i + 1;
     2859            cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
     2860            break;
     2861        }
     2862    }
     2863}
     2864
     2865/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
     2866   so these check for a hit then pass through to the normal out-of-line
     2867   phys routines.  */
     2868static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
     2869{
     2870    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
     2871    return ldub_phys(addr);
     2872}
     2873
     2874static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
     2875{
     2876    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
     2877    return lduw_phys(addr);
     2878}
     2879
     2880static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
     2881{
     2882    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
     2883    return ldl_phys(addr);
     2884}
     2885
     2886static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
     2887                             uint32_t val)
     2888{
     2889    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
     2890    stb_phys(addr, val);
     2891}
     2892
     2893static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
     2894                             uint32_t val)
     2895{
     2896    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
     2897    stw_phys(addr, val);
     2898}
     2899
     2900static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
     2901                             uint32_t val)
     2902{
     2903    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
     2904    stl_phys(addr, val);
     2905}
     2906
     2907static CPUReadMemoryFunc *watch_mem_read[3] = {
     2908    watch_mem_readb,
     2909    watch_mem_readw,
     2910    watch_mem_readl,
     2911};
     2912
     2913static CPUWriteMemoryFunc *watch_mem_write[3] = {
     2914    watch_mem_writeb,
     2915    watch_mem_writew,
     2916    watch_mem_writel,
     2917};
     2918
     2919static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
     2920                                 unsigned int len)
     2921{
     2922    uint32_t ret;
     2923    unsigned int idx;
     2924
     2925    idx = SUBPAGE_IDX(addr - mmio->base);
     2926#if defined(DEBUG_SUBPAGE)
     2927    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
     2928           mmio, len, addr, idx);
     2929#endif
     2930    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
     2931
     2932    return ret;
     2933}
     2934
     2935static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
     2936                              uint32_t value, unsigned int len)
     2937{
     2938    unsigned int idx;
     2939
     2940    idx = SUBPAGE_IDX(addr - mmio->base);
     2941#if defined(DEBUG_SUBPAGE)
     2942    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
     2943           mmio, len, addr, idx, value);
     2944#endif
     2945    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
     2946}
     2947
     2948static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
     2949{
     2950#if defined(DEBUG_SUBPAGE)
     2951    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
     2952#endif
     2953
     2954    return subpage_readlen(opaque, addr, 0);
     2955}
     2956
     2957static void subpage_writeb (void *opaque, target_phys_addr_t addr,
     2958                            uint32_t value)
     2959{
     2960#if defined(DEBUG_SUBPAGE)
     2961    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
     2962#endif
     2963    subpage_writelen(opaque, addr, value, 0);
     2964}
     2965
     2966static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
     2967{
     2968#if defined(DEBUG_SUBPAGE)
     2969    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
     2970#endif
     2971
     2972    return subpage_readlen(opaque, addr, 1);
     2973}
     2974
     2975static void subpage_writew (void *opaque, target_phys_addr_t addr,
     2976                            uint32_t value)
     2977{
     2978#if defined(DEBUG_SUBPAGE)
     2979    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
     2980#endif
     2981    subpage_writelen(opaque, addr, value, 1);
     2982}
     2983
     2984static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
     2985{
     2986#if defined(DEBUG_SUBPAGE)
     2987    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
     2988#endif
     2989
     2990    return subpage_readlen(opaque, addr, 2);
     2991}
     2992
     2993static void subpage_writel (void *opaque,
     2994                         target_phys_addr_t addr, uint32_t value)
     2995{
     2996#if defined(DEBUG_SUBPAGE)
     2997    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
     2998#endif
     2999    subpage_writelen(opaque, addr, value, 2);
     3000}
     3001
     3002static CPUReadMemoryFunc *subpage_read[] = {
     3003    &subpage_readb,
     3004    &subpage_readw,
     3005    &subpage_readl,
     3006};
     3007
     3008static CPUWriteMemoryFunc *subpage_write[] = {
     3009    &subpage_writeb,
     3010    &subpage_writew,
     3011    &subpage_writel,
     3012};
     3013
     3014static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
     3015                             ram_addr_t memory)
     3016{
     3017    int idx, eidx;
     3018    unsigned int i;
     3019
     3020    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
     3021        return -1;
     3022    idx = SUBPAGE_IDX(start);
     3023    eidx = SUBPAGE_IDX(end);
     3024#if defined(DEBUG_SUBPAGE)
     3025    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
     3026           mmio, start, end, idx, eidx, memory);
     3027#endif
     3028    memory >>= IO_MEM_SHIFT;
     3029    for (; idx <= eidx; idx++) {
     3030        for (i = 0; i < 4; i++) {
     3031            if (io_mem_read[memory][i]) {
     3032                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
     3033                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
     3034            }
     3035            if (io_mem_write[memory][i]) {
     3036                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
     3037                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
     3038            }
     3039        }
     3040    }
     3041
     3042    return 0;
     3043}
     3044
     3045static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
     3046                           ram_addr_t orig_memory)
     3047{
     3048    subpage_t *mmio;
     3049    int subpage_memory;
     3050
     3051    mmio = qemu_mallocz(sizeof(subpage_t));
     3052    if (mmio != NULL) {
     3053        mmio->base = base;
     3054        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
     3055#if defined(DEBUG_SUBPAGE)
     3056        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
     3057               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
     3058#endif
     3059        *phys = subpage_memory | IO_MEM_SUBPAGE;
     3060        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
     3061    }
     3062
     3063    return mmio;
     3064}
     3065
    26723066static void io_mem_init(void)
    26733067{
     
    26813075    io_mem_nb = 5;
    26823076#endif
    2683 
     3077   
     3078    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
     3079                                          watch_mem_write, NULL);
     3080   
    26843081#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
    26853082    /* alloc dirty bits array */
     
    26913088/* mem_read and mem_write are arrays of functions containing the
    26923089   function to access byte (index 0), word (index 1) and dword (index
    2693    2). All functions must be supplied. If io_index is non zero, the
    2694    corresponding io zone is modified. If it is zero, a new io zone is
    2695    allocated. The return value can be used with
    2696    cpu_register_physical_memory(). (-1) is returned if error. */
     3090   2). Functions can be omitted with a NULL function pointer. The
     3091   registered functions may be modified dynamically later.
     3092   If io_index is non zero, the corresponding io zone is
     3093   modified. If it is zero, a new io zone is allocated. The return
     3094   value can be used with cpu_register_physical_memory(). (-1) is
     3095   returned if error. */
    26973096int cpu_register_io_memory(int io_index,
    26983097                           CPUReadMemoryFunc **mem_read,
     
    27003099                           void *opaque)
    27013100{
    2702     int i;
     3101    int i, subwidth = 0;
    27033102
    27043103    if (io_index <= 0) {
     
    27123111
    27133112    for(i = 0;i < 3; i++) {
     3113        if (!mem_read[i] || !mem_write[i])
     3114            subwidth = IO_MEM_SUBWIDTH;
    27143115        io_mem_read[io_index][i] = mem_read[i];
    27153116        io_mem_write[io_index][i] = mem_write[i];
    27163117    }
    27173118    io_mem_opaque[io_index] = opaque;
    2718     return io_index << IO_MEM_SHIFT;
     3119    return (io_index << IO_MEM_SHIFT) | subwidth;
    27193120}
    27203121
     
    27283129    return io_mem_read[io_index >> IO_MEM_SHIFT];
    27293130}
     3131#endif /* !defined(CONFIG_USER_ONLY) */
    27303132
    27313133/* physical memory access (slow version, mainly for debug) */
     
    27493151            if (!(flags & PAGE_WRITE))
    27503152                return;
    2751             p = lock_user(addr, len, 0);
     3153            /* XXX: this code should not depend on lock_user */
     3154            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
     3155                /* FIXME - should this return an error rather than just fail? */
     3156                return;
    27523157            memcpy(p, buf, len);
    27533158            unlock_user(p, addr, len);
     
    27553160            if (!(flags & PAGE_READ))
    27563161                return;
    2757             p = lock_user(addr, len, 1);
     3162            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
     3163                /* FIXME - should this return an error rather than just fail? */
     3164                return;
    27583165            memcpy(buf, p, len);
    27593166            unlock_user(p, addr, 0);
     
    30523459        remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
    30533460#endif
    3054     }
    3055 }
     3461#ifndef VBOX
     3462        if (unlikely(in_migration)) {
     3463            if (!cpu_physical_memory_is_dirty(addr1)) {
     3464                /* invalidate code */
     3465                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
     3466                /* set dirty bit */
     3467                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
     3468                        (0xff & ~CODE_DIRTY_FLAG);
     3469            }
     3470        }
     3471#endif
     3472    }
     3473}
     3474
     3475void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
     3476{
     3477    int io_index;
     3478    uint8_t *ptr;
     3479    unsigned long pd;
     3480    PhysPageDesc *p;
     3481
     3482    p = phys_page_find(addr >> TARGET_PAGE_BITS);
     3483    if (!p) {
     3484        pd = IO_MEM_UNASSIGNED;
     3485    } else {
     3486        pd = p->phys_offset;
     3487    }
     3488
     3489    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
     3490        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
     3491#ifdef TARGET_WORDS_BIGENDIAN
     3492        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
     3493        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
     3494#else
     3495        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
     3496        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
     3497#endif
     3498    } else {
     3499#ifndef VBOX
     3500        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
     3501            (addr & ~TARGET_PAGE_MASK);
     3502        stq_p(ptr, val);
     3503#else
     3504        remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
     3505#endif
     3506    }
     3507}
     3508
    30563509
    30573510/* warning: addr must be aligned */
     
    31433596    }
    31443597    return 0;
     3598}
     3599
     3600/* in deterministic execution mode, instructions doing device I/Os
     3601   must be at the end of the TB */
     3602void cpu_io_recompile(CPUState *env, void *retaddr)
     3603{
     3604    TranslationBlock *tb;
     3605    uint32_t n, cflags;
     3606    target_ulong pc, cs_base;
     3607    uint64_t flags;
     3608
     3609    tb = tb_find_pc((unsigned long)retaddr);
     3610    if (!tb) {
     3611        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
     3612                  retaddr);
     3613    }
     3614    n = env->icount_decr.u16.low + tb->icount;
     3615    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
     3616    /* Calculate how many instructions had been executed before the fault
     3617       occurred.  */
     3618    n = n - env->icount_decr.u16.low;
     3619    /* Generate a new TB ending on the I/O insn.  */
     3620    n++;
     3621    /* On MIPS and SH, delay slot instructions can only be restarted if
     3622       they were already the first instruction in the TB.  If this is not
     3623       the first instruction in a TB then re-execute the preceding
     3624       branch.  */
     3625#if defined(TARGET_MIPS)
     3626    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
     3627        env->active_tc.PC -= 4;
     3628        env->icount_decr.u16.low++;
     3629        env->hflags &= ~MIPS_HFLAG_BMASK;
     3630    }
     3631#elif defined(TARGET_SH4)
     3632    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
     3633            && n > 1) {
     3634        env->pc -= 2;
     3635        env->icount_decr.u16.low++;
     3636        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
     3637    }
     3638#endif
     3639    /* This should never happen.  */
     3640    if (n > CF_COUNT_MASK)
     3641        cpu_abort(env, "TB too big during recompile");
     3642
     3643    cflags = n | CF_LAST_IO;
     3644    pc = tb->pc;
     3645    cs_base = tb->cs_base;
     3646    flags = tb->flags;
     3647    tb_phys_invalidate(tb, -1);
     3648    /* FIXME: In theory this could raise an exception.  In practice
     3649       we have already translated the block once so it's probably ok.  */
     3650    tb_gen_code(env, pc, cs_base, flags, cflags);
     3651    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
     3652       the first in the TB) then we end up generating a whole new TB and
     3653       repeating the fault, which is horribly inefficient.
     3654       Better would be to execute just this insn uncached, or generate a
     3655       second new TB.  */
     3656    cpu_resume_from_signal(env, NULL);
    31453657}
    31463658
     
    31723684    }
    31733685    /* XXX: avoid using doubles ? */
    3174     cpu_fprintf(f, "TB count            %d\n", nb_tbs);
     3686    cpu_fprintf(f, "Translation buffer state:\n");
     3687    cpu_fprintf(f, "gen code size       %ld/%ld\n",
     3688                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
     3689    cpu_fprintf(f, "TB count            %d/%d\n",
     3690                nb_tbs, code_gen_max_blocks);
    31753691    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
    31763692                nb_tbs ? target_code_size / nb_tbs : 0,
     
    31873703                direct_jmp2_count,
    31883704                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
     3705    cpu_fprintf(f, "\nStatistics:\n");
    31893706    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
    31903707    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
    31913708    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
     3709    tcg_dump_info(f, cpu_fprintf);
    31923710}
    31933711#endif /* !VBOX */
  • trunk/src/recompiler_new/target-i386/translate.c

    r13539 r13559  
    827827static void gen_check_external_event()
    828828{
    829     /** @todo: this code is either wrong, or low performing,
    830         rewrite flags check in TCG IR */
    831     //tcg_gen_helper_0_0(helper_check_external_event);
     829    int skip_label;
     830   
     831    skip_label = gen_new_label();
     832    tcg_gen_ld32u_tl(cpu_tmp0, cpu_env, offsetof(CPUState, interrupt_request));
     833    /* Keep in sync with helper_check_external_event() */
     834    tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0,
     835                    CPU_INTERRUPT_EXTERNAL_EXIT
     836                    | CPU_INTERRUPT_EXTERNAL_TIMER
     837                    | CPU_INTERRUPT_EXTERNAL_DMA
     838                    | CPU_INTERRUPT_EXTERNAL_HARD);
     839    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, skip_label);
     840
     841    tcg_gen_helper_0_0(helper_check_external_event);
     842
     843   gen_set_label(skip_label);
    832844}
    833845
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette