VirtualBox

Changeset 36175 in vbox for trunk/src/recompiler/exec.c


Ignore:
Timestamp:
Mar 4, 2011 4:21:09 PM (14 years ago)
Author:
vboxsync
Message:

rem: Synced up to v0.11.1 (35bfc7324e2e6946c4113ada5db30553a1a7c40b) from git://git.savannah.nongnu.org/qemu.git.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/recompiler/exec.c

    r36171 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    3130#ifndef VBOX
    3231#ifdef _WIN32
    33 #define WIN32_LEAN_AND_MEAN
    3432#include <windows.h>
    3533#else
     
    8583#define SMC_BITMAP_USE_THRESHOLD 10
    8684
    87 #define MMAP_AREA_START        0x00000000
    88 #define MMAP_AREA_END          0xa8000000
    89 
    9085#if defined(TARGET_SPARC64)
    9186#define TARGET_PHYS_ADDR_SPACE_BITS 41
     
    9792#elif defined(TARGET_PPC64)
    9893#define TARGET_PHYS_ADDR_SPACE_BITS 42
    99 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
     94#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
    10095#define TARGET_PHYS_ADDR_SPACE_BITS 42
    101 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
     96#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
    10297#define TARGET_PHYS_ADDR_SPACE_BITS 36
    10398#else
     
    121116    __attribute__((__section__(".gen_code")))           \
    122117    __attribute__((aligned (32)))
     118#elif defined(_WIN32)
     119/* Maximum alignment for Win32 is 16. */
     120#define code_gen_section                                \
     121    __attribute__((aligned (16)))
    123122#else
    124123#define code_gen_section                                \
     
    138137#ifndef VBOX
    139138#if !defined(CONFIG_USER_ONLY)
    140 ram_addr_t phys_ram_size;
    141139int phys_ram_fd;
    142 uint8_t *phys_ram_base;
    143140uint8_t *phys_ram_dirty;
    144141static int in_migration;
    145 static ram_addr_t phys_ram_alloc_offset = 0;
     142
     143typedef struct RAMBlock {
     144    uint8_t *host;
     145    ram_addr_t offset;
     146    ram_addr_t length;
     147    struct RAMBlock *next;
     148} RAMBlock;
     149
     150static RAMBlock *ram_blocks;
     151/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
     152   then we can no longer assume contiguous ram offsets, and external uses
     153   of this variable will break.  */
     154ram_addr_t last_ram_offset;
    146155#endif
    147156#else /* VBOX */
    148 RTGCPHYS phys_ram_size;
    149157/* we have memory ranges (the high PC-BIOS mapping) which
    150158   causes some pages to fall outside the dirty map here. */
     
    225233CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
    226234void *io_mem_opaque[IO_MEM_NB_ENTRIES];
    227 char io_mem_used[IO_MEM_NB_ENTRIES];
     235static char io_mem_used[IO_MEM_NB_ENTRIES];
    228236static int io_mem_watch;
    229237#endif
     
    406414        size_t len = sizeof(PageDesc) * L2_SIZE;
    407415        /* Don't use qemu_malloc because it may recurse.  */
    408         p = mmap(0, len, PROT_READ | PROT_WRITE,
     416        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
    409417                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    410418        *lp = p;
     
    431439
    432440    p = *lp;
    433     if (!p)
    434         return 0;
     441    if (!p) {
     442        return NULL;
     443    }
    435444    return p + (index & (L2_SIZE - 1));
    436445}
     
    512521
    513522#if defined(CONFIG_USER_ONLY)
    514 /* Currently it is not recommanded to allocate big chunks of data in
     523/* Currently it is not recommended to allocate big chunks of data in
    515524   user mode. It will change when a dedicated libc will be used */
    516525#define USE_STATIC_CODE_GEN_BUFFER
    517526#endif
    518527
    519 /* VBox allocates codegen buffer dynamically */
    520 #ifndef VBOX
     528#if defined(VBOX) && defined(USE_STATIC_CODE_GEN_BUFFER)
     529# error "VBox allocates codegen buffer dynamically"
     530#endif
     531
    521532#ifdef USE_STATIC_CODE_GEN_BUFFER
    522533static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
    523 #endif
    524534#endif
    525535
     
    531541    map_exec(code_gen_buffer, code_gen_buffer_size);
    532542#else
    533 #ifdef VBOX
     543# ifdef VBOX
    534544    /* We cannot use phys_ram_size here, as it's 0 now,
    535545     * it only gets initialized once RAM registration callback
     
    537547     */
    538548    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
    539 #else
     549# else  /* !VBOX */
    540550    code_gen_buffer_size = tb_size;
    541551    if (code_gen_buffer_size == 0) {
     
    550560    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
    551561        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
    552 #endif /* VBOX */
     562# endif /* !VBOX */
    553563    /* The code gen buffer location may have constraints depending on
    554564       the host cpu and OS */
    555 #ifdef VBOX
     565# ifdef VBOX
    556566    code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
    557567
     
    561571        return;
    562572    }
    563 #else  /* !VBOX */
     573# else  /* !VBOX */
    564574#if defined(__linux__)
    565575    {
     
    594604        }
    595605    }
    596 #elif defined(__FreeBSD__)
     606#elif defined(__FreeBSD__) || defined(__DragonFly__)
    597607    {
    598608        int flags;
     
    620630    map_exec(code_gen_buffer, code_gen_buffer_size);
    621631#endif
    622 #endif /* !VBOX */
     632# endif /* !VBOX */
    623633#endif /* !USE_STATIC_CODE_GEN_BUFFER */
    624634#ifndef VBOX
     
    656666    CPUState *env = opaque;
    657667
     668    cpu_synchronize_state(env, 0);
     669
    658670    qemu_put_be32s(f, &env->halted);
    659671    qemu_put_be32s(f, &env->interrupt_request);
     
    669681    qemu_get_be32s(f, &env->halted);
    670682    qemu_get_be32s(f, &env->interrupt_request);
    671     env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
     683    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
     684       version_id is increased. */
     685    env->interrupt_request &= ~0x01;
    672686    tlb_flush(env, 1);
     687    cpu_synchronize_state(env, 1);
    673688
    674689    return 0;
    675690}
    676691#endif
     692
     693CPUState *qemu_get_cpu(int cpu)
     694{
     695    CPUState *env = first_cpu;
     696
     697    while (env) {
     698        if (env->cpu_index == cpu)
     699            break;
     700        env = env->next_cpu;
     701    }
     702
     703    return env;
     704}
     705
    677706#endif /* !VBOX */
    678707
     
    682711    int cpu_index;
    683712
     713#if defined(CONFIG_USER_ONLY)
     714    cpu_list_lock();
     715#endif
    684716    env->next_cpu = NULL;
    685717    penv = &first_cpu;
    686718    cpu_index = 0;
    687719    while (*penv != NULL) {
    688         penv = (CPUState **)&(*penv)->next_cpu;
     720        penv = &(*penv)->next_cpu;
    689721        cpu_index++;
    690722    }
    691723    env->cpu_index = cpu_index;
     724    env->numa_node = 0;
    692725    TAILQ_INIT(&env->breakpoints);
    693726    TAILQ_INIT(&env->watchpoints);
    694727    *penv = env;
    695728#ifndef VBOX
     729#if defined(CONFIG_USER_ONLY)
     730    cpu_list_unlock();
     731#endif
    696732#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
    697733    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
     
    789825            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
    790826                  address >= tb->pc + tb->size)) {
    791                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
     827                printf("ERROR invalidate: address=" TARGET_FMT_lx
     828                       " PC=%08lx size=%04x\n",
    792829                       address, (long)tb->pc, tb->size);
    793830            }
     
    811848            }
    812849        }
    813     }
    814 }
    815 
    816 static void tb_jmp_check(TranslationBlock *tb)
    817 {
    818     TranslationBlock *tb1;
    819     unsigned int n1;
    820 
    821     /* suppress any remaining jumps to this TB */
    822     tb1 = tb->jmp_first;
    823     for(;;) {
    824         n1 = (long)tb1 & 3;
    825         tb1 = (TranslationBlock *)((long)tb1 & ~3);
    826         if (n1 == 2)
    827             break;
    828         tb1 = tb1->jmp_next[n1];
    829     }
    830     /* check end of list */
    831     if (tb1 != tb) {
    832         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
    833850    }
    834851}
     
    958975    tb_phys_invalidate_count++;
    959976}
    960 
    961977
    962978#ifdef VBOX
     
    16811697    if (env->singlestep_enabled != enabled) {
    16821698        env->singlestep_enabled = enabled;
    1683         /* must flush all the translated code to avoid inconsistancies */
    1684         /* XXX: only flush what is necessary */
    1685         tb_flush(env);
     1699        if (kvm_enabled())
     1700            kvm_update_guest_debug(env, 0);
     1701        else {
     1702            /* must flush all the translated code to avoid inconsistencies */
     1703            /* XXX: only flush what is necessary */
     1704            tb_flush(env);
     1705        }
    16861706    }
    16871707#endif
     
    16891709
    16901710#ifndef VBOX
     1711
    16911712/* enable or disable low levels log */
    16921713void cpu_set_log(int log_flags)
     
    17251746    cpu_set_log(loglevel);
    17261747}
     1748
    17271749#endif /* !VBOX */
    17281750
    1729 /* mask must never be zero, except for A20 change call */
    1730 void cpu_interrupt(CPUState *env, int mask)
    1731 {
    1732 #if !defined(USE_NPTL)
    1733     TranslationBlock *tb;
    1734     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
    1735 #endif
    1736     int old_mask;
    1737 
    1738     if (mask & CPU_INTERRUPT_EXIT) {
    1739         env->exit_request = 1;
    1740         mask &= ~CPU_INTERRUPT_EXIT;
    1741     }
    1742 
    1743     old_mask = env->interrupt_request;
    1744 #ifdef VBOX
    1745     VM_ASSERT_EMT(env->pVM);
    1746     ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
    1747 #else /* !VBOX */
    1748     /* FIXME: This is probably not threadsafe.  A different thread could
    1749        be in the middle of a read-modify-write operation.  */
    1750     env->interrupt_request |= mask;
    1751 #endif /* !VBOX */
     1751static void cpu_unlink_tb(CPUState *env)
     1752{
    17521753#if defined(USE_NPTL)
    17531754    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
     
    17561757       signals are used primarily to interrupt blocking syscalls.  */
    17571758#else
     1759    TranslationBlock *tb;
     1760    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
     1761
     1762    tb = env->current_tb;
     1763    /* if the cpu is currently executing code, we must unlink it and
     1764       all the potentially executing TB */
     1765    if (tb && !testandset(&interrupt_lock)) {
     1766        env->current_tb = NULL;
     1767        tb_reset_jump_recursive(tb);
     1768        resetlock(&interrupt_lock);
     1769    }
     1770#endif
     1771}
     1772
     1773/* mask must never be zero, except for A20 change call */
     1774void cpu_interrupt(CPUState *env, int mask)
     1775{
     1776    int old_mask;
     1777
     1778    old_mask = env->interrupt_request;
     1779#ifndef VBOX
     1780    env->interrupt_request |= mask;
     1781#else  /* VBOX */
     1782    VM_ASSERT_EMT(env->pVM);
     1783    ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
     1784#endif /* VBOX */
     1785
     1786#ifndef VBOX
     1787#ifndef CONFIG_USER_ONLY
     1788    /*
     1789     * If called from iothread context, wake the target cpu in
     1790     * case its halted.
     1791     */
     1792    if (!qemu_cpu_self(env)) {
     1793        qemu_cpu_kick(env);
     1794        return;
     1795    }
     1796#endif
     1797#endif /* !VBOX */
     1798
    17581799    if (use_icount) {
    17591800        env->icount_decr.u16.high = 0xffff;
     
    17651806#endif
    17661807    } else {
    1767         tb = env->current_tb;
    1768         /* if the cpu is currently executing code, we must unlink it and
    1769            all the potentially executing TB */
    1770         if (tb && !testandset(&interrupt_lock)) {
    1771             env->current_tb = NULL;
    1772             tb_reset_jump_recursive(tb);
    1773             resetlock(&interrupt_lock);
    1774         }
    1775     }
    1776 #endif
     1808        cpu_unlink_tb(env);
     1809    }
    17771810}
    17781811
     
    17881821    env->interrupt_request &= ~mask;
    17891822#endif /* !VBOX */
     1823}
     1824
     1825void cpu_exit(CPUState *env)
     1826{
     1827    env->exit_request = 1;
     1828    cpu_unlink_tb(env);
    17901829}
    17911830
     
    19561995}
    19571996
    1958 #ifdef VBOX
    19591997static CPUTLBEntry s_cputlb_empty_entry = {
    19601998    .addr_read  = -1,
     
    19632001    .addend     = -1,
    19642002};
    1965 #endif /* VBOX */
    19662003
    19672004/* NOTE: if flush_global is true, also flush global entries (not
     
    19792016
    19802017    for(i = 0; i < CPU_TLB_SIZE; i++) {
    1981 #ifdef VBOX
    19822018        int mmu_idx;
    19832019        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
    19842020            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
    19852021        }
    1986 #else  /* !VBOX */
    1987         env->tlb_table[0][i].addr_read = -1;
    1988         env->tlb_table[0][i].addr_write = -1;
    1989         env->tlb_table[0][i].addr_code = -1;
    1990         env->tlb_table[1][i].addr_read = -1;
    1991         env->tlb_table[1][i].addr_write = -1;
    1992         env->tlb_table[1][i].addr_code = -1;
    1993 #if (NB_MMU_MODES >= 3)
    1994         env->tlb_table[2][i].addr_read = -1;
    1995         env->tlb_table[2][i].addr_write = -1;
    1996         env->tlb_table[2][i].addr_code = -1;
    1997 #if (NB_MMU_MODES == 4)
    1998         env->tlb_table[3][i].addr_read = -1;
    1999         env->tlb_table[3][i].addr_write = -1;
    2000         env->tlb_table[3][i].addr_code = -1;
    2001 #endif
    2002 #endif
    2003 #endif /* !VBOX */
    20042022    }
    20052023
    20062024    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
    20072025
     2026#ifdef CONFIG_KQEMU
     2027    if (env->kqemu_enabled) {
     2028        kqemu_flush(env, flush_global);
     2029    }
     2030#endif
    20082031#ifdef VBOX
    20092032    /* inform raw mode about TLB flush */
    20102033    remR3FlushTLB(env, flush_global);
    2011 #endif
    2012 #ifdef USE_KQEMU
    2013     if (env->kqemu_enabled) {
    2014         kqemu_flush(env, flush_global);
    2015     }
    20162034#endif
    20172035    tlb_flush_count++;
     
    20262044        addr == (tlb_entry->addr_code &
    20272045                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    2028         tlb_entry->addr_read = -1;
    2029         tlb_entry->addr_write = -1;
    2030         tlb_entry->addr_code = -1;
     2046        *tlb_entry = s_cputlb_empty_entry;
    20312047    }
    20322048}
     
    20352051{
    20362052    int i;
     2053    int mmu_idx;
    20372054
    20382055#if defined(DEBUG_TLB)
     
    20452062    addr &= TARGET_PAGE_MASK;
    20462063    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    2047     tlb_flush_entry(&env->tlb_table[0][i], addr);
    2048     tlb_flush_entry(&env->tlb_table[1][i], addr);
    2049 #if (NB_MMU_MODES >= 3)
    2050     tlb_flush_entry(&env->tlb_table[2][i], addr);
    2051 #if (NB_MMU_MODES == 4)
    2052     tlb_flush_entry(&env->tlb_table[3][i], addr);
    2053 #endif
    2054 #endif
     2064    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
     2065        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
    20552066
    20562067    tlb_flush_jmp_cache(env, addr);
    20572068
    2058 #ifdef USE_KQEMU
     2069#ifdef CONFIG_KQEMU
    20592070    if (env->kqemu_enabled) {
    20602071        kqemu_flush_page(env, addr);
     
    21042115}
    21052116
     2117/* Note: start and end must be within the same ram block.  */
    21062118void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
    21072119                                     int dirty_flags)
     
    21192131        return;
    21202132    len = length >> TARGET_PAGE_BITS;
    2121 #ifdef USE_KQEMU
     2133#ifdef CONFIG_KQEMU
    21222134    /* XXX: should not depend on cpu context */
    21232135    env = first_cpu;
     
    21442156    start1 = start;
    21452157#elif !defined(VBOX)
    2146     start1 = start + (unsigned long)phys_ram_base;
     2158    start1 = (unsigned long)qemu_get_ram_ptr(start);
     2159    /* Chek that we don't span multiple blocks - this breaks the
     2160       address comparisons below.  */
     2161    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
     2162            != (end - 1) - start) {
     2163        abort();
     2164    }
    21472165#else
    21482166    start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
    21492167#endif
     2168
    21502169    for(env = first_cpu; env != NULL; env = env->next_cpu) {
    2151         for(i = 0; i < CPU_TLB_SIZE; i++)
    2152             tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
    2153         for(i = 0; i < CPU_TLB_SIZE; i++)
    2154             tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
    2155 #if (NB_MMU_MODES >= 3)
    2156         for(i = 0; i < CPU_TLB_SIZE; i++)
    2157             tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
    2158 #if (NB_MMU_MODES == 4)
    2159         for(i = 0; i < CPU_TLB_SIZE; i++)
    2160             tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
    2161 #endif
    2162 #endif
     2170        int mmu_idx;
     2171        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
     2172            for(i = 0; i < CPU_TLB_SIZE; i++)
     2173                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
     2174                                      start1, length);
     2175        }
    21632176    }
    21642177}
     
    21682181{
    21692182    in_migration = enable;
     2183    if (kvm_enabled()) {
     2184        return kvm_set_migration_log(enable);
     2185    }
    21702186    return 0;
    21712187}
     
    21772193#endif /* !VBOX */
    21782194
    2179 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
    2180 {
     2195int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
     2196                                   target_phys_addr_t end_addr)
     2197{
     2198    int ret = 0;
     2199
    21812200    if (kvm_enabled())
    2182         kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
     2201        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
     2202    return ret;
    21832203}
    21842204
     
    21902210{
    21912211    ram_addr_t ram_addr;
     2212    void *p;
    21922213
    21932214    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
    2194         /* RAM case */
    21952215#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
    21962216        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
    21972217#elif !defined(VBOX)
    2198         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
    2199             tlb_entry->addend - (unsigned long)phys_ram_base;
     2218        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
     2219            + tlb_entry->addend);
     2220        ram_addr = qemu_ram_addr_from_host(p);
    22002221#else
    22012222        Assert(phys_addend != -1);
     
    22122233{
    22132234    int i;
     2235    int mmu_idx;
     2236    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
     2237        for(i = 0; i < CPU_TLB_SIZE; i++)
    22142238#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
    2215     for(i = 0; i < CPU_TLB_SIZE; i++)
    2216         tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
    2217     for(i = 0; i < CPU_TLB_SIZE; i++)
    2218         tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
    2219 # if (NB_MMU_MODES >= 3)
    2220     for(i = 0; i < CPU_TLB_SIZE; i++)
    2221         tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
    2222 #  if (NB_MMU_MODES == 4)
    2223     for(i = 0; i < CPU_TLB_SIZE; i++)
    2224         tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
    2225 #  endif
    2226 # endif
    2227 #else /* VBOX */
    2228     for(i = 0; i < CPU_TLB_SIZE; i++)
    2229         tlb_update_dirty(&env->tlb_table[0][i]);
    2230     for(i = 0; i < CPU_TLB_SIZE; i++)
    2231         tlb_update_dirty(&env->tlb_table[1][i]);
    2232 #if (NB_MMU_MODES >= 3)
    2233     for(i = 0; i < CPU_TLB_SIZE; i++)
    2234         tlb_update_dirty(&env->tlb_table[2][i]);
    2235 #if (NB_MMU_MODES == 4)
    2236     for(i = 0; i < CPU_TLB_SIZE; i++)
    2237         tlb_update_dirty(&env->tlb_table[3][i]);
    2238 #endif
    2239 #endif
    2240 #endif /* VBOX */
     2239            tlb_update_dirty(&env->tlb_table[mmu_idx][i], env->phys_addends[mmu_idx][i]);
     2240#else
     2241            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
     2242#endif
     2243    }
    22412244}
    22422245
     
    22522255{
    22532256    int i;
     2257    int mmu_idx;
    22542258
    22552259    vaddr &= TARGET_PAGE_MASK;
    22562260    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    2257     tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
    2258     tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
    2259 #if (NB_MMU_MODES >= 3)
    2260     tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
    2261 #if (NB_MMU_MODES == 4)
    2262     tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
    2263 #endif
    2264 #endif
     2261    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
     2262        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
    22652263}
    22662264
     
    23072305    addend = pd & TARGET_PAGE_MASK;
    23082306#elif !defined(VBOX)
    2309     addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
     2307    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
    23102308#else
    23112309    /** @todo this is racing the phys_page_find call above since it may register
    23122310     *        a new chunk of memory...  */
    2313     addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
    2314                                                pd & TARGET_PAGE_MASK,
    2315                                                !!(prot & PAGE_WRITE));
     2311    addend = (unsigned long)remR3TlbGCPhys2Ptr(env, pd & TARGET_PAGE_MASK, !!(prot & PAGE_WRITE));
    23162312#endif
    23172313
     
    23242320            iotlb |= IO_MEM_ROM;
    23252321    } else {
    2326         /* IO handlers are currently passed a phsical address.
     2322        /* IO handlers are currently passed a physical address.
    23272323           It would be nice to pass an offset from the base address
    23282324           of that region.  This would avoid having to special case RAM,
     
    23452341
    23462342    code_address = address;
    2347 
    23482343#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
     2344
    23492345    if (addend & 0x3)
    23502346    {
     
    23692365        addend &= ~(target_ulong)0x3;
    23702366    }
    2371 #endif
    2372 
     2367
     2368#endif
    23732369    /* Make accesses to pages with watchpoints go via the
    23742370       watchpoint trap routines.  */
     
    24482444
    24492445#ifndef VBOX
    2450 /* dump memory mappings */
    2451 void page_dump(FILE *f)
     2446
     2447/*
     2448 * Walks guest process memory "regions" one by one
     2449 * and calls callback function 'fn' for each region.
     2450 */
     2451int walk_memory_regions(void *priv,
     2452    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
    24522453{
    24532454    unsigned long start, end;
     2455    PageDesc *p = NULL;
    24542456    int i, j, prot, prot1;
    2455     PageDesc *p;
    2456 
    2457     fprintf(f, "%-8s %-8s %-8s %s\n",
    2458             "start", "end", "size", "prot");
    2459     start = -1;
    2460     end = -1;
     2457    int rc = 0;
     2458
     2459    start = end = -1;
    24612460    prot = 0;
    2462     for(i = 0; i <= L1_SIZE; i++) {
    2463         if (i < L1_SIZE)
    2464             p = l1_map[i];
    2465         else
    2466             p = NULL;
    2467         for(j = 0;j < L2_SIZE; j++) {
    2468             if (!p)
    2469                 prot1 = 0;
    2470             else
    2471                 prot1 = p[j].flags;
     2461
     2462    for (i = 0; i <= L1_SIZE; i++) {
     2463        p = (i < L1_SIZE) ? l1_map[i] : NULL;
     2464        for (j = 0; j < L2_SIZE; j++) {
     2465            prot1 = (p == NULL) ? 0 : p[j].flags;
     2466            /*
     2467             * "region" is one continuous chunk of memory
     2468             * that has same protection flags set.
     2469             */
    24722470            if (prot1 != prot) {
    24732471                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
    24742472                if (start != -1) {
    2475                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
    2476                             start, end, end - start,
    2477                             prot & PAGE_READ ? 'r' : '-',
    2478                             prot & PAGE_WRITE ? 'w' : '-',
    2479                             prot & PAGE_EXEC ? 'x' : '-');
     2473                    rc = (*fn)(priv, start, end, prot);
     2474                    /* callback can stop iteration by returning != 0 */
     2475                    if (rc != 0)
     2476                        return (rc);
    24802477                }
    24812478                if (prot1 != 0)
     
    24852482                prot = prot1;
    24862483            }
    2487             if (!p)
     2484            if (p == NULL)
    24882485                break;
    24892486        }
    24902487    }
    2491 }
     2488    return (rc);
     2489}
     2490
     2491static int dump_region(void *priv, unsigned long start,
     2492    unsigned long end, unsigned long prot)
     2493{
     2494    FILE *f = (FILE *)priv;
     2495
     2496    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
     2497        start, end, end - start,
     2498        ((prot & PAGE_READ) ? 'r' : '-'),
     2499        ((prot & PAGE_WRITE) ? 'w' : '-'),
     2500        ((prot & PAGE_EXEC) ? 'x' : '-'));
     2501
     2502    return (0);
     2503}
     2504
     2505/* dump memory mappings */
     2506void page_dump(FILE *f)
     2507{
     2508    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
     2509            "start", "end", "size", "prot");
     2510    walk_memory_regions(f, dump_region);
     2511}
     2512
    24922513#endif /* !VBOX */
    24932514
     
    25032524
    25042525/* modify the flags of a page and invalidate the code if
    2505    necessary. The flag PAGE_WRITE_ORG is positionned automatically
     2526   necessary. The flag PAGE_WRITE_ORG is positioned automatically
    25062527   depending on PAGE_WRITE */
    25072528void page_set_flags(target_ulong start, target_ulong end, int flags)
     
    25732594
    25742595/* called from signal handler: invalidate the code and unprotect the
    2575    page. Return TRUE if the fault was succesfully handled. */
     2596   page. Return TRUE if the fault was successfully handled. */
    25762597int page_unprotect(target_ulong address, unsigned long pc, void *puc)
    25772598{
     
    26572678   io memory page.  The address used when calling the IO function is
    26582679   the offset from the start of the region, plus region_offset.  Both
    2659    start_region and regon_offset are rounded down to a page boundary
     2680   start_addr and region_offset are rounded down to a page boundary
    26602681   before calculating this offset.  This should not be a problem unless
    26612682   the low bits of start_addr and region_offset differ.  */
     
    26712692    void *subpage;
    26722693
    2673 #ifdef USE_KQEMU
     2694#ifdef CONFIG_KQEMU
    26742695    /* XXX: should not depend on cpu context */
    26752696    env = first_cpu;
     
    27732794}
    27742795
     2796#ifdef CONFIG_KQEMU
    27752797/* XXX: better than nothing */
     2798static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
     2799{
     2800    ram_addr_t addr;
     2801    if ((last_ram_offset + size) > kqemu_phys_ram_size) {
     2802        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
     2803                (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
     2804        abort();
     2805    }
     2806    addr = last_ram_offset;
     2807    last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
     2808    return addr;
     2809}
     2810#endif
     2811
    27762812ram_addr_t qemu_ram_alloc(ram_addr_t size)
    27772813{
    2778     ram_addr_t addr;
    2779     if ((phys_ram_alloc_offset + size) > phys_ram_size) {
    2780         fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
    2781                 (uint64_t)size, (uint64_t)phys_ram_size);
     2814    RAMBlock *new_block;
     2815
     2816#ifdef CONFIG_KQEMU
     2817    if (kqemu_phys_ram_base) {
     2818        return kqemu_ram_alloc(size);
     2819    }
     2820#endif
     2821
     2822    size = TARGET_PAGE_ALIGN(size);
     2823    new_block = qemu_malloc(sizeof(*new_block));
     2824
     2825    new_block->host = qemu_vmalloc(size);
     2826    new_block->offset = last_ram_offset;
     2827    new_block->length = size;
     2828
     2829    new_block->next = ram_blocks;
     2830    ram_blocks = new_block;
     2831
     2832    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
     2833        (last_ram_offset + size) >> TARGET_PAGE_BITS);
     2834    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
     2835           0xff, size >> TARGET_PAGE_BITS);
     2836
     2837    last_ram_offset += size;
     2838
     2839    if (kvm_enabled())
     2840        kvm_setup_guest_memory(new_block->host, size);
     2841
     2842    return new_block->offset;
     2843}
     2844
     2845void qemu_ram_free(ram_addr_t addr)
     2846{
     2847    /* TODO: implement this.  */
     2848}
     2849
     2850/* Return a host pointer to ram allocated with qemu_ram_alloc.
     2851   With the exception of the softmmu code in this file, this should
     2852   only be used for local memory (e.g. video ram) that the device owns,
     2853   and knows it isn't going to access beyond the end of the block.
     2854
     2855   It should not be used for general purpose DMA.
     2856   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
     2857 */
     2858void *qemu_get_ram_ptr(ram_addr_t addr)
     2859{
     2860    RAMBlock *prev;
     2861    RAMBlock **prevp;
     2862    RAMBlock *block;
     2863
     2864#ifdef CONFIG_KQEMU
     2865    if (kqemu_phys_ram_base) {
     2866        return kqemu_phys_ram_base + addr;
     2867    }
     2868#endif
     2869
     2870    prev = NULL;
     2871    prevp = &ram_blocks;
     2872    block = ram_blocks;
     2873    while (block && (block->offset > addr
     2874                     || block->offset + block->length <= addr)) {
     2875        if (prev)
     2876          prevp = &prev->next;
     2877        prev = block;
     2878        block = block->next;
     2879    }
     2880    if (!block) {
     2881        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
    27822882        abort();
    27832883    }
    2784     addr = phys_ram_alloc_offset;
    2785     phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
    2786     return addr;
    2787 }
    2788 
    2789 void qemu_ram_free(ram_addr_t addr)
    2790 {
    2791 }
     2884    /* Move this entry to to start of the list.  */
     2885    if (prev) {
     2886        prev->next = block->next;
     2887        block->next = *prevp;
     2888        *prevp = block;
     2889    }
     2890    return block->host + (addr - block->offset);
     2891}
     2892
     2893/* Some of the softmmu routines need to translate from a host pointer
     2894   (typically a TLB entry) back to a ram offset.  */
     2895ram_addr_t qemu_ram_addr_from_host(void *ptr)
     2896{
     2897    RAMBlock *prev;
     2898    RAMBlock **prevp;
     2899    RAMBlock *block;
     2900    uint8_t *host = ptr;
     2901
     2902#ifdef CONFIG_KQEMU
     2903    if (kqemu_phys_ram_base) {
     2904        return host - kqemu_phys_ram_base;
     2905    }
     2906#endif
     2907
     2908    prev = NULL;
     2909    prevp = &ram_blocks;
     2910    block = ram_blocks;
     2911    while (block && (block->host > host
     2912                     || block->host + block->length <= host)) {
     2913        if (prev)
     2914          prevp = &prev->next;
     2915        prev = block;
     2916        block = block->next;
     2917    }
     2918    if (!block) {
     2919        fprintf(stderr, "Bad ram pointer %p\n", ptr);
     2920        abort();
     2921    }
     2922    return block->offset + (host - block->host);
     2923}
     2924
    27922925#endif /* !VBOX */
    27932926
     
    28913024    remR3PhysWriteU8(ram_addr, val);
    28923025#else
    2893     stb_p(phys_ram_base + ram_addr, val);
    2894 #endif
    2895 #ifdef USE_KQEMU
     3026    stb_p(qemu_get_ram_ptr(ram_addr), val);
     3027#endif
     3028#ifdef CONFIG_KQEMU
    28963029    if (cpu_single_env->kqemu_enabled &&
    28973030        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
     
    29333066    remR3PhysWriteU16(ram_addr, val);
    29343067#else
    2935     stw_p(phys_ram_base + ram_addr, val);
    2936 #endif
    2937 #ifdef USE_KQEMU
     3068    stw_p(qemu_get_ram_ptr(ram_addr), val);
     3069#endif
     3070#ifdef CONFIG_KQEMU
    29383071    if (cpu_single_env->kqemu_enabled &&
    29393072        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
     
    29753108    remR3PhysWriteU32(ram_addr, val);
    29763109#else
    2977     stl_p(phys_ram_base + ram_addr, val);
    2978 #endif
    2979 #ifdef USE_KQEMU
     3110    stl_p(qemu_get_ram_ptr(ram_addr), val);
     3111#endif
     3112#ifdef CONFIG_KQEMU
    29803113    if (cpu_single_env->kqemu_enabled &&
    29813114        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
     
    32133346    eidx = SUBPAGE_IDX(end);
    32143347#if defined(DEBUG_SUBPAGE)
    3215     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
     3348    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
    32163349           mmio, start, end, idx, eidx, memory);
    32173350#endif
     
    32443377
    32453378    mmio->base = base;
    3246     subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
     3379    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
    32473380#if defined(DEBUG_SUBPAGE)
    32483381    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
     
    32693402}
    32703403
    3271 static void io_mem_init(void)
    3272 {
    3273     int i;
    3274 
    3275     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
    3276     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
    3277     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
    3278     for (i=0; i<5; i++)
    3279         io_mem_used[i] = 1;
    3280 
    3281     io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
    3282                                           watch_mem_write, NULL);
    3283 
    3284 #ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
    3285     /* alloc dirty bits array */
    3286     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
    3287     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
    3288 #endif /* !VBOX */
    3289 }
    3290 
    32913404/* mem_read and mem_write are arrays of functions containing the
    32923405   function to access byte (index 0), word (index 1) and dword (index
    3293    2). Functions can be omitted with a NULL function pointer. The
    3294    registered functions may be modified dynamically later.
     3406   2). Functions can be omitted with a NULL function pointer.
    32953407   If io_index is non zero, the corresponding io zone is
    32963408   modified. If it is zero, a new io zone is allocated. The return
    32973409   value can be used with cpu_register_physical_memory(). (-1) is
    32983410   returned if error. */
    3299 int cpu_register_io_memory(int io_index,
    3300                            CPUReadMemoryFunc **mem_read,
    3301                            CPUWriteMemoryFunc **mem_write,
    3302                            void *opaque)
     3411static int cpu_register_io_memory_fixed(int io_index,
     3412                                        CPUReadMemoryFunc **mem_read,
     3413                                        CPUWriteMemoryFunc **mem_write,
     3414                                        void *opaque)
    33033415{
    33043416    int i, subwidth = 0;
     
    33093421            return io_index;
    33103422    } else {
     3423        io_index >>= IO_MEM_SHIFT;
    33113424        if (io_index >= IO_MEM_NB_ENTRIES)
    33123425            return -1;
     
    33233436}
    33243437
     3438int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
     3439                           CPUWriteMemoryFunc **mem_write,
     3440                           void *opaque)
     3441{
     3442    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
     3443}
     3444
    33253445void cpu_unregister_io_memory(int io_table_address)
    33263446{
     
    33363456}
    33373457
    3338 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
    3339 {
    3340     return io_mem_write[io_index >> IO_MEM_SHIFT];
    3341 }
    3342 
    3343 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
    3344 {
    3345     return io_mem_read[io_index >> IO_MEM_SHIFT];
     3458static void io_mem_init(void)
     3459{
     3460    int i;
     3461
     3462    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
     3463    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
     3464    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
     3465    for (i=0; i<5; i++)
     3466        io_mem_used[i] = 1;
     3467
     3468    io_mem_watch = cpu_register_io_memory(watch_mem_read,
     3469                                          watch_mem_write, NULL);
     3470#ifdef CONFIG_KQEMU
     3471    if (kqemu_phys_ram_base) {
     3472        /* alloc dirty bits array */
     3473        phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
     3474        memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
     3475    }
     3476#endif
    33463477}
    33473478
     
    34563587                remR3PhysWrite(addr1, buf, l); NOREF(ptr);
    34573588#else
    3458                 ptr = phys_ram_base + addr1;
     3589                ptr = qemu_get_ram_ptr(addr1);
    34593590                memcpy(ptr, buf, l);
    34603591#endif
     
    35113642                remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
    35123643#else
    3513                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
     3644                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
    35143645                    (addr & ~TARGET_PAGE_MASK);
    35153646                memcpy(buf, ptr, l);
     
    35553686            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
    35563687            /* ROM/RAM case */
    3557             ptr = phys_ram_base + addr1;
     3688            ptr = qemu_get_ram_ptr(addr1);
    35583689            memcpy(ptr, buf, l);
    35593690        }
     
    35963727
    35973728    LIST_REMOVE(client, link);
     3729    qemu_free(client);
    35983730}
    35993731
     
    36053737        client = LIST_FIRST(&map_client_list);
    36063738        client->callback(client->opaque);
    3607         LIST_REMOVE(client, link);
     3739        cpu_unregister_map_client(client);
    36083740    }
    36093741}
     
    36553787        } else {
    36563788            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
    3657             ptr = phys_ram_base + addr1;
     3789            ptr = qemu_get_ram_ptr(addr1);
    36583790        }
    36593791        if (!done) {
     
    36803812    if (buffer != bounce.buffer) {
    36813813        if (is_write) {
    3682             unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
     3814            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
    36833815            while (access_len) {
    36843816                unsigned l;
     
    37353867        /* RAM case */
    37363868#ifndef VBOX
    3737         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
     3869        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
    37383870            (addr & ~TARGET_PAGE_MASK);
    37393871        val = ldl_p(ptr);
     
    37773909        /* RAM case */
    37783910#ifndef VBOX
    3779         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
     3911        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
    37803912            (addr & ~TARGET_PAGE_MASK);
    37813913        val = ldq_p(ptr);
     
    38283960#ifndef VBOX
    38293961        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
    3830         ptr = phys_ram_base + addr1;
     3962        ptr = qemu_get_ram_ptr(addr1);
    38313963        stl_p(ptr, val);
    38323964#else
     
    38754007    } else {
    38764008#ifndef VBOX
    3877         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
     4009        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
    38784010            (addr & ~TARGET_PAGE_MASK);
    38794011        stq_p(ptr, val);
     
    39094041        /* RAM case */
    39104042#ifndef VBOX
    3911         ptr = phys_ram_base + addr1;
     4043        ptr = qemu_get_ram_ptr(addr1);
    39124044        stl_p(ptr, val);
    39134045#else
     
    39504082#endif
    39514083
    3952 /* virtual memory access for debug */
     4084#ifndef VBOX
     4085/* virtual memory access for debug (includes writing to ROM) */
    39534086int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
    39544087                        uint8_t *buf, int len, int is_write)
     
    39674100        if (l > len)
    39684101            l = len;
    3969         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
    3970                                buf, l, is_write);
     4102        phys_addr += (addr & ~TARGET_PAGE_MASK);
     4103#if !defined(CONFIG_USER_ONLY)
     4104        if (is_write)
     4105            cpu_physical_memory_write_rom(phys_addr, buf, l);
     4106        else
     4107#endif
     4108            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
    39714109        len -= l;
    39724110        buf += l;
     
    39754113    return 0;
    39764114}
     4115#endif /* !VBOX */
    39774116
    39784117/* in deterministic execution mode, instructions doing device I/Os
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette