VirtualBox

Changeset 37675 in vbox for trunk/src


Ignore:
Timestamp:
Jun 29, 2011 7:07:14 AM (13 years ago)
Author:
vboxsync
Message:

rem: Synced with v0.12.5.

Location:
trunk/src/recompiler
Files:
3 added
3 deleted
47 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/recompiler/Makefile.kmk

    r36768 r37675  
    1919SUB_DEPTH = ../..
    2020include $(KBUILD_PATH)/subheader.kmk
    21 
    22 ifn1of ($(KBUILD_TARGET).$(KBUILD_TARGET_ARCH), darwin.x86 darwin.amd64 linux.amd64)
    23  include $(PATH_SUB_CURRENT)/Makefile-old.kmk
    24 else  # new stuff
    2521
    2622#
     
    6359VBoxRemPrimary_DEFS           += IN_REM_R3 REM_INCLUDE_CPU_H NEED_CPU_H
    6460#VBoxRemPrimary_DEFS           += REM_PHYS_ADDR_IN_TLB
    65 #VBoxRemPrimary_DEFS           += DEBUG_ALL_LOGGING DEBUG_DISAS DEBUG_PCALL DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB  # Enables huge amounts of debug logging.
    66 #VBoxRemPrimary_DEFS           += DEBUG_TMP_LOGGING # log qemu parts to "/tmp/vbox-qemu.log" - does not work with VBoxREM2.
     61#VBoxRemPrimary_DEFS           += DEBUG_ALL_LOGGING DEBUG_DISAS DEBUG_PCALL CONFIG_DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB  # Enables huge amounts of debug logging.
    6762ifdef IEM_VERIFICATION_MODE
    6863 VBoxRemPrimary_DEFS          += IEM_VERIFICATION_MODE
     
    7065VBoxRemPrimary_DEFS.linux      = _GNU_SOURCE
    7166ifdef VBOX_SOLARIS_10
    72  VBoxRemPrimary_DEFS.solaris   = HOST_SOLARIS=10
    73 else
    74  VBoxRemPrimary_DEFS.solaris   = HOST_SOLARIS=11
     67 VBoxRemPrimary_DEFS.solaris   = CONFIG_SOLARIS_VERSION=10
     68else
     69 VBoxRemPrimary_DEFS.solaris   = CONFIG_SOLARIS_VERSION=11
    7570endif
    7671VBoxRemPrimary_DEFS.freebsd   += _BSD
     
    10196        host-utils.c \
    10297        cutils.c \
     98        tcg-runtime.c \
    10399        tcg/tcg.c \
    104100        tcg/tcg-dyngen.c \
    105         tcg/tcg-runtime.c \
    106101        fpu/softfloat-native.c \
    107102        target-i386/op_helper.c \
     
    273268
    274269
    275 endif # new stuff
    276270include $(KBUILD_PATH)/subfooter.kmk
    277271
  • trunk/src/recompiler/Sun/config-host.h

    r36170 r37675  
    3838# endif
    3939#endif
    40 #define QEMU_VERSION "0.8.1"
     40#define QEMU_VERSION "0.12.5"
    4141#define CONFIG_UNAME_RELEASE ""
    4242#define CONFIG_QEMU_SHAREDIR "."
  • trunk/src/recompiler/Sun/kvm.h

    r36211 r37675  
    2323#define kvm_set_phys_mem(a, b, c)       AssertFailed()
    2424#define kvm_arch_get_registers(a)       AssertFailed()
     25#define cpu_synchronize_state(a)        do { } while (0)
    2526
    2627#endif
  • trunk/src/recompiler/VBoxRecompiler.c

    r36811 r37675  
    310310    CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
    311311
     312    cpu_reset(&pVM->rem.s.Env);
     313
    312314    /* allocate code buffer for single instruction emulation. */
    313315    pVM->rem.s.Env.cbCodeBuffer = 4096;
     
    315317    AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
    316318
    317     /* finally, set the cpu_single_env global. */
     319    /* Finally, set the cpu_single_env global. */
    318320    cpu_single_env = &pVM->rem.s.Env;
    319321
     
    416418#ifdef DEBUG_ALL_LOGGING
    417419    loglevel = ~0;
    418 # ifdef DEBUG_TMP_LOGGING
    419     logfile = fopen("/tmp/vbox-qemu.log", "w");
    420 # endif
    421 #endif
     420#endif
     421//loglevel = CPU_LOG_EXEC | CPU_LOG_INT | CPU_LOG_PCALL | CPU_LOG_TB_CPU; /// DONT COMMIT ME
    422422
    423423    /*
     
    999999                    CPUBreakpoint  *pBP;
    10001000                    RTGCPTR         GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
    1001                     TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
     1001                    QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
    10021002                        if (pBP->pc == GCPtrPC)
    10031003                            break;
     
    11821182                    CPUBreakpoint  *pBP;
    11831183                    RTGCPTR         GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
    1184                     TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
     1184                    QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
    11851185                        if (pBP->pc == GCPtrPC)
    11861186                            break;
     
    13401340                CPUBreakpoint  *pBP;
    13411341                RTGCPTR         GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
    1342                 TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
     1342                QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
    13431343                    if (pBP->pc == GCPtrPC)
    13441344                        break;
     
    15311531    }
    15321532
    1533     if (!TAILQ_EMPTY(&env->breakpoints))
     1533    if (!QTAILQ_EMPTY(&env->breakpoints))
    15341534    {
    15351535        //Log2(("raw mode refused: Breakpoints\n"));
     
    15371537    }
    15381538
    1539     if (!TAILQ_EMPTY(&env->watchpoints))
     1539    if (!QTAILQ_EMPTY(&env->watchpoints))
    15401540    {
    15411541        //Log2(("raw mode refused: Watchpoints\n"));
     
    40024002void disas(FILE *phFile, void *pvCode, unsigned long cb)
    40034003{
    4004 #ifdef DEBUG_TMP_LOGGING
    4005 # define DISAS_PRINTF(x...) fprintf(phFile, x)
    4006 #else
    4007 # define DISAS_PRINTF(x...) RTLogPrintf(x)
    40084004    if (LogIs2Enabled())
    4009 #endif
    40104005    {
    40114006        unsigned        off = 0;
     
    40204015#endif
    40214016
    4022         DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
     4017        RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
    40234018        while (off < cb)
    40244019        {
    40254020            uint32_t cbInstr;
    40264021            if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
    4027                 DISAS_PRINTF("%s", szOutput);
     4022                RTLogPrintf("%s", szOutput);
    40284023            else
    40294024            {
    4030                 DISAS_PRINTF("disas error\n");
     4025                RTLogPrintf("disas error\n");
    40314026                cbInstr = 1;
    40324027#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
     
    40374032        }
    40384033    }
    4039 
    4040 #undef  DISAS_PRINTF
    40414034}
    40424035
     
    40524045void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
    40534046{
    4054 #ifdef DEBUG_TMP_LOGGING
    4055 # define DISAS_PRINTF(x...) fprintf(phFile, x)
    4056 #else
    4057 # define DISAS_PRINTF(x...) RTLogPrintf(x)
    40584047    if (LogIs2Enabled())
    4059 #endif
    40604048    {
    40614049        PVM         pVM = cpu_single_env->pVM;
     
    40744062         * Do the disassembling.
    40754063         */
    4076         DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
     4064        RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
    40774065        cs = cpu_single_env->segs[R_CS].selector;
    40784066        eip = uCode - cpu_single_env->segs[R_CS].base;
     
    40894077                                        &cbInstr);
    40904078            if (RT_SUCCESS(rc))
    4091                 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
     4079                RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
    40924080            else
    40934081            {
    4094                 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
     4082                RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
    40954083                cbInstr = 1;
    40964084            }
     
    41044092        }
    41054093    }
    4106 #undef DISAS_PRINTF
    41074094}
    41084095
  • trunk/src/recompiler/bswap.h

    r36175 r37675  
    66#include <inttypes.h>
    77
    8 #ifdef HAVE_MACHINE_BSWAP_H
     8#ifdef CONFIG_MACHINE_BSWAP_H
    99#include <sys/endian.h>
    1010#include <sys/types.h>
     
    1212#else
    1313
    14 #ifdef HAVE_BYTESWAP_H
     14#ifdef CONFIG_BYTESWAP_H
    1515#include <byteswap.h>
    1616#else
     
    4848})
    4949
    50 #endif /* !HAVE_BYTESWAP_H */
     50#endif /* !CONFIG_BYTESWAP_H */
    5151
    5252static inline uint16_t bswap16(uint16_t x)
     
    6565}
    6666
    67 #endif /* ! HAVE_MACHINE_BSWAP_H */
     67#endif /* ! CONFIG_MACHINE_BSWAP_H */
    6868
    6969static inline void bswap16s(uint16_t *s)
     
    8282}
    8383
    84 #if defined(WORDS_BIGENDIAN)
     84#if defined(HOST_WORDS_BIGENDIAN)
    8585#define be_bswap(v, size) (v)
    8686#define le_bswap(v, size) bswap ## size(v)
     
    204204#endif
    205205
    206 #ifdef WORDS_BIGENDIAN
     206#ifdef HOST_WORDS_BIGENDIAN
    207207#define cpu_to_32wu cpu_to_be32wu
    208208#else
  • trunk/src/recompiler/cpu-all.h

    r36175 r37675  
    4545 * memory accesses.
    4646 *
    47  * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
     47 * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
    4848 * otherwise little endian.
    4949 *
     
    5555#include "softfloat.h"
    5656
    57 #if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
     57#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
    5858#define BSWAP_NEEDED
    5959#endif
     
    141141typedef union {
    142142    float64 d;
    143 #if defined(WORDS_BIGENDIAN) \
     143#if defined(HOST_WORDS_BIGENDIAN) \
    144144    || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
    145145    struct {
     
    159159typedef union {
    160160    float128 q;
    161 #if defined(WORDS_BIGENDIAN) \
     161#if defined(HOST_WORDS_BIGENDIAN) \
    162162    || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
    163163    struct {
     
    240240void        remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val);
    241241
    242 #ifndef REM_PHYS_ADDR_IN_TLB
     242# ifndef REM_PHYS_ADDR_IN_TLB
    243243void       *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable);
    244 #endif
     244# endif
    245245
    246246#endif /* VBOX */
     
    308308}
    309309
    310 #undef VBOX_CHECK_ADDR
     310# undef VBOX_CHECK_ADDR
    311311
    312312/* float access */
     
    368368   kernel handles unaligned load/stores may give better results, but
    369369   it is a system wide setting : bad */
    370 #if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
     370#if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
    371371
    372372/* conservative code for little endian unaligned accesses */
     
    546546#endif /* !VBOX */
    547547
    548 #if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
     548#if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
    549549
    550550static inline int lduw_be_p(const void *ptr)
     
    772772 * This allows the guest address space to be offset to a convenient location.
    773773 */
    774 //#define GUEST_BASE 0x20000000
    775 #define GUEST_BASE 0
     774#if defined(CONFIG_USE_GUEST_BASE)
     775extern unsigned long guest_base;
     776extern int have_guest_base;
     777#define GUEST_BASE guest_base
     778#else
     779#define GUEST_BASE 0ul
     780#endif
    776781
    777782/* All direct uses of g2h and h2g need to go away for usermode softmmu.  */
     
    10421047#define VGA_DIRTY_FLAG       0x01
    10431048#define CODE_DIRTY_FLAG      0x02
    1044 #define KQEMU_DIRTY_FLAG     0x04
    10451049#define MIGRATION_DIRTY_FLAG 0x08
    10461050
     
    12111215}
    12121216
    1213 #elif defined(__mips__)
     1217#elif defined(__mips__) && \
     1218      ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
     1219/*
     1220 * binutils wants to use rdhwr only on mips32r2
     1221 * but as linux kernel emulate it, it's fine
     1222 * to use it.
     1223 *
     1224 */
     1225#define MIPS_RDHWR(rd, value) {                 \
     1226    __asm__ __volatile__ (                      \
     1227                          ".set   push\n\t"     \
     1228                          ".set mips32r2\n\t"   \
     1229                          "rdhwr  %0, "rd"\n\t" \
     1230                          ".set   pop"          \
     1231                          : "=r" (value));      \
     1232}
    12141233
    12151234static inline int64_t cpu_get_real_ticks(void)
    12161235{
    1217 #if __mips_isa_rev >= 2
     1236/* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
    12181237    uint32_t count;
    12191238    static uint32_t cyc_per_count = 0;
    12201239
    12211240    if (!cyc_per_count)
    1222         __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));
    1223 
    1224     __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));
     1241        MIPS_RDHWR("$3", cyc_per_count);
     1242
     1243    MIPS_RDHWR("$2", count);
    12251244    return (int64_t)(count * cyc_per_count);
    1226 #else
    1227     /* FIXME */
    1228     static int64_t ticks = 0;
    1229     return ticks++;
    1230 #endif
    12311245}
    12321246
     
    12491263}
    12501264
    1251 extern int64_t kqemu_time, kqemu_time_start;
    12521265extern int64_t qemu_time, qemu_time_start;
    12531266extern int64_t tlb_flush_time;
    1254 extern int64_t kqemu_exec_count;
    12551267extern int64_t dev_time;
    1256 extern int64_t kqemu_ret_int_count;
    1257 extern int64_t kqemu_ret_excp_count;
    1258 extern int64_t kqemu_ret_intr_count;
    12591268#endif
    12601269
  • trunk/src/recompiler/cpu-common.h

    r36175 r37675  
    1111
    1212/* address in the RAM (different from a physical address) */
    13 #ifdef CONFIG_KQEMU
    14 /* FIXME: This is wrong.  */
    15 typedef uint32_t ram_addr_t;
    16 #else
    1713typedef unsigned long ram_addr_t;
    18 #endif
    1914
    2015/* memory API */
     
    4237ram_addr_t qemu_ram_addr_from_host(void *ptr);
    4338
    44 int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
    45                            CPUWriteMemoryFunc **mem_write,
     39int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
     40                           CPUWriteMemoryFunc * const *mem_write,
    4641                           void *opaque);
    4742void cpu_unregister_io_memory(int table_address);
  • trunk/src/recompiler/cpu-defs.h

    r36177 r37675  
    4141#endif
    4242#include "osdep.h"
    43 #include "sys-queue.h"
     43#include "qemu-queue.h"
    4444#include "targphys.h"
    4545
     
    125125} CPUTLBEntry;
    126126
    127 #ifdef WORDS_BIGENDIAN
     127#ifdef HOST_WORDS_BIGENDIAN
    128128typedef struct icount_decr_u16 {
    129129    uint16_t high;
     
    143143    target_ulong pc;
    144144    int flags; /* BP_* */
    145     TAILQ_ENTRY(CPUBreakpoint) entry;
     145    QTAILQ_ENTRY(CPUBreakpoint) entry;
    146146} CPUBreakpoint;
    147147
     
    150150    target_ulong len_mask;
    151151    int flags; /* BP_* */
    152     TAILQ_ENTRY(CPUWatchpoint) entry;
     152    QTAILQ_ENTRY(CPUWatchpoint) entry;
    153153} CPUWatchpoint;
    154154
     
    171171    /* The meaning of the MMU modes is defined in the target code. */   \
    172172    CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE];                  \
    173     target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE];               \
     173    target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
    174174    /** addends for HVA -> GPA translations */                          \
    175175    VBOX_ONLY(target_phys_addr_t   phys_addends[NB_MMU_MODES][CPU_TLB_SIZE]); \
     
    190190    /* from this point: preserved by CPU reset */                       \
    191191    /* ice debug support */                                             \
    192     TAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;            \
     192    QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;            \
    193193    int singlestep_enabled;                                             \
    194194                                                                        \
    195     TAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;            \
     195    QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;            \
    196196    CPUWatchpoint *watchpoint_hit;                                      \
    197197                                                                        \
     
    206206    uint32_t host_tid; /* host thread ID */                             \
    207207    int numa_node; /* NUMA node this cpu is belonging to  */            \
     208    int nr_cores;  /* number of cores within this CPU package */        \
     209    int nr_threads;/* number of threads within this CPU */              \
    208210    int running; /* Nonzero if cpu is currently running(usermode).  */  \
    209211    /* user data */                                                     \
  • trunk/src/recompiler/cpu-exec.c

    r36768 r37675  
    4949#endif
    5050
    51 #if defined(__sparc__) && !defined(HOST_SOLARIS)
     51#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
    5252// Work around ugly bugs in glibc that mangle global register contents
    5353#undef env
     
    5757int tb_invalidated_flag;
    5858
    59 //#define DEBUG_EXEC
     59//#define CONFIG_DEBUG_EXEC
    6060//#define DEBUG_SIGNAL
    6161
     
    217217
    218218    if (!env->watchpoint_hit)
    219         TAILQ_FOREACH(wp, &env->watchpoints, entry)
     219        QTAILQ_FOREACH(wp, &env->watchpoints, entry)
    220220            wp->flags &= ~BP_WATCHPOINT_HIT;
    221221
     
    425425                RAWEx_ProfileStop(env, STATS_RAW_CHECK);
    426426
     427{
     428    RTGCPTR mypc = env->eip + env->segs[R_CS].base;
     429if (mypc == 0x00fe0d2 || mypc == 0x00f19e9 || mypc == 0x000f0827 || mypc == 0x000fe090) {
     430    RTLogFlags(NULL, "enabled");
     431    loglevel = ~0;
     432    Log(("BANG CRASH!\n"));
     433}
     434}
     435#ifdef CONFIG_DEBUG_EXEC
     436                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
     437                    /* restore flags in standard format */
     438                    regs_to_env();
     439                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
     440                    log_cpu_state(env, X86_DUMP_CCOP);
     441                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
     442                }
     443#endif
    427444                RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
    428445                spin_lock(&tb_lock);
     
    437454                    tb_invalidated_flag = 0;
    438455                }
     456#ifdef CONFIG_DEBUG_EXEC
     457                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s [sp=%RGv, bp=%RGv\n",
     458                             (long)tb->tc_ptr, tb->pc, lookup_symbol(tb->pc), (RTGCPTR)env->regs[R_ESP], (RTGCPTR)env->regs[R_EBP]);
     459#endif
     460
    439461
    440462                /* see if we can patch the calling TB. When the TB
     
    562584    env_to_regs();
    563585#if defined(TARGET_I386)
    564     /* put eflags in CPU temporary format */
    565     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
    566     DF = 1 - (2 * ((env->eflags >> 10) & 1));
    567     CC_OP = CC_OP_EFLAGS;
    568     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
     586    if (!kvm_enabled()) {
     587        /* put eflags in CPU temporary format */
     588        CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
     589        DF = 1 - (2 * ((env->eflags >> 10) & 1));
     590        CC_OP = CC_OP_EFLAGS;
     591        env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
     592    }
    569593#elif defined(TARGET_SPARC)
    570594#elif defined(TARGET_M68K)
     
    579603#elif defined(TARGET_SH4)
    580604#elif defined(TARGET_CRIS)
     605#elif defined(TARGET_S390X)
    581606    /* XXXXX */
    582607#else
     
    588613    for(;;) {
    589614        if (setjmp(env->jmp_env) == 0) {
    590 #if defined(__sparc__) && !defined(HOST_SOLARIS)
     615#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
    591616#undef env
    592617                    env = cpu_single_env;
     
    651676                env->exception_index = -1;
    652677            }
    653 #ifdef CONFIG_KQEMU
    654             if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
    655                 int ret;
    656                 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
    657                 ret = kqemu_cpu_exec(env);
    658                 /* put eflags in CPU temporary format */
    659                 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
    660                 DF = 1 - (2 * ((env->eflags >> 10) & 1));
    661                 CC_OP = CC_OP_EFLAGS;
    662                 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
    663                 if (ret == 1) {
    664                     /* exception */
    665                     longjmp(env->jmp_env, 1);
    666                 } else if (ret == 2) {
    667                     /* softmmu execution needed */
    668                 } else {
    669                     if (env->interrupt_request != 0 || env->exit_request != 0) {
    670                         /* hardware interrupt will be executed just after */
    671                     } else {
    672                         /* otherwise, we restart */
    673                         longjmp(env->jmp_env, 1);
    674                     }
    675                 }
    676             }
    677 #endif
    678678
    679679            if (kvm_enabled()) {
     
    744744                            intno = cpu_get_pic_interrupt(env);
    745745                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
    746 #if defined(__sparc__) && !defined(HOST_SOLARIS)
     746#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
    747747#undef env
    748748                    env = cpu_single_env;
     
    771771#if 0
    772772                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
    773                         cpu_ppc_reset(env);
     773                        cpu_reset(env);
    774774                    }
    775775#endif
     
    815815                            do_interrupt(env);
    816816                            env->interrupt_index = 0;
    817 #if !defined(CONFIG_USER_ONLY)
    818                             cpu_check_irqs(env);
    819 #endif
    820817                        next_tb = 0;
    821818                        }
     
    898895                    cpu_loop_exit();
    899896                }
    900 #ifdef DEBUG_EXEC
     897#ifdef CONFIG_DEBUG_EXEC
    901898                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
    902899                    /* restore flags in standard format */
     
    944941                    tb_invalidated_flag = 0;
    945942                }
    946 #ifdef DEBUG_EXEC
     943#ifdef CONFIG_DEBUG_EXEC
    947944                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
    948945                             (long)tb->tc_ptr, tb->pc,
     
    953950                   jump. */
    954951                {
    955                     if (next_tb != 0 &&
    956 #ifdef CONFIG_KQEMU
    957                         (env->kqemu_enabled != 2) &&
    958 #endif
    959                         tb->page_addr[1] == -1) {
     952                    if (next_tb != 0 && tb->page_addr[1] == -1) {
    960953                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
    961954                }
     
    974967                    tc_ptr = tb->tc_ptr;
    975968                /* execute the generated code */
    976 #if defined(__sparc__) && !defined(HOST_SOLARIS)
     969#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
    977970#undef env
    978971                    env = cpu_single_env;
     
    10111004                /* reset soft MMU for next block (it can currently
    10121005                   only be set by a memory fault) */
    1013 #if defined(CONFIG_KQEMU)
    1014 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
    1015                 if (kqemu_is_ok(env) &&
    1016                     (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
    1017                     cpu_loop_exit();
    1018                 }
    1019 #endif
    10201006            } /* for(;;) */
    10211007        } else {
     
    10421028#elif defined(TARGET_ALPHA)
    10431029#elif defined(TARGET_CRIS)
     1030#elif defined(TARGET_S390X)
    10441031    /* XXXXX */
    10451032#else
     
    11171104
    11181105#if defined(TARGET_I386)
     1106#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
     1107#else
     1108#define EXCEPTION_ACTION cpu_loop_exit()
     1109#endif
    11191110
    11201111/* 'pc' is the host PC at which the exception was raised. 'address' is
     
    11411132
    11421133    /* see if it is an MMU fault */
    1143     ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
     1134    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    11441135    if (ret < 0)
    11451136        return 0; /* not an MMU fault */
     
    11531144        cpu_restore_state(tb, env, pc, puc);
    11541145    }
    1155     if (ret == 1) {
    1156 #if 0
    1157         printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
    1158                env->eip, env->cr[2], env->error_code);
    1159 #endif
    1160         /* we restore the process signal mask as the sigreturn should
    1161            do it (XXX: use sigsetjmp) */
    1162         sigprocmask(SIG_SETMASK, old_set, NULL);
    1163         raise_exception_err(env->exception_index, env->error_code);
    1164     } else {
    1165         /* activate soft MMU for this block */
    1166         env->hflags |= HF_SOFTMMU_MASK;
    1167         cpu_resume_from_signal(env, puc);
    1168     }
    1169     /* never comes here */
    1170     return 1;
    1171 }
    1172 
    1173 #elif defined(TARGET_ARM)
    1174 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
    1175                                     int is_write, sigset_t *old_set,
    1176                                     void *puc)
    1177 {
    1178     TranslationBlock *tb;
    1179     int ret;
    1180 
    1181     if (cpu_single_env)
    1182         env = cpu_single_env; /* XXX: find a correct solution for multithread */
    1183 #if defined(DEBUG_SIGNAL)
    1184     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
    1185            pc, address, is_write, *(unsigned long *)old_set);
    1186 #endif
    1187     /* XXX: locking issue */
    1188     if (is_write && page_unprotect(h2g(address), pc, puc)) {
    1189         return 1;
    1190     }
    1191     /* see if it is an MMU fault */
    1192     ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    1193     if (ret < 0)
    1194         return 0; /* not an MMU fault */
    1195     if (ret == 0)
    1196         return 1; /* the MMU fault was handled without causing real CPU fault */
    1197     /* now we have a real cpu fault */
    1198     tb = tb_find_pc(pc);
    1199     if (tb) {
    1200         /* the PC is inside the translated code. It means that we have
    1201            a virtual CPU fault */
    1202         cpu_restore_state(tb, env, pc, puc);
    1203     }
     1146
    12041147    /* we restore the process signal mask as the sigreturn should
    12051148       do it (XXX: use sigsetjmp) */
    12061149    sigprocmask(SIG_SETMASK, old_set, NULL);
    1207     cpu_loop_exit();
     1150    EXCEPTION_ACTION;
     1151
    12081152    /* never comes here */
    12091153    return 1;
    12101154}
    1211 #elif defined(TARGET_SPARC)
    1212 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
    1213                                     int is_write, sigset_t *old_set,
    1214                                     void *puc)
    1215 {
    1216     TranslationBlock *tb;
    1217     int ret;
    1218 
    1219     if (cpu_single_env)
    1220         env = cpu_single_env; /* XXX: find a correct solution for multithread */
    1221 #if defined(DEBUG_SIGNAL)
    1222     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
    1223            pc, address, is_write, *(unsigned long *)old_set);
    1224 #endif
    1225     /* XXX: locking issue */
    1226     if (is_write && page_unprotect(h2g(address), pc, puc)) {
    1227         return 1;
    1228     }
    1229     /* see if it is an MMU fault */
    1230     ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    1231     if (ret < 0)
    1232         return 0; /* not an MMU fault */
    1233     if (ret == 0)
    1234         return 1; /* the MMU fault was handled without causing real CPU fault */
    1235     /* now we have a real cpu fault */
    1236     tb = tb_find_pc(pc);
    1237     if (tb) {
    1238         /* the PC is inside the translated code. It means that we have
    1239            a virtual CPU fault */
    1240         cpu_restore_state(tb, env, pc, puc);
    1241     }
    1242     /* we restore the process signal mask as the sigreturn should
    1243        do it (XXX: use sigsetjmp) */
    1244     sigprocmask(SIG_SETMASK, old_set, NULL);
    1245     cpu_loop_exit();
    1246     /* never comes here */
    1247     return 1;
    1248 }
    1249 #elif defined (TARGET_PPC)
    1250 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
    1251                                     int is_write, sigset_t *old_set,
    1252                                     void *puc)
    1253 {
    1254     TranslationBlock *tb;
    1255     int ret;
    1256 
    1257     if (cpu_single_env)
    1258         env = cpu_single_env; /* XXX: find a correct solution for multithread */
    1259 #if defined(DEBUG_SIGNAL)
    1260     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
    1261            pc, address, is_write, *(unsigned long *)old_set);
    1262 #endif
    1263     /* XXX: locking issue */
    1264     if (is_write && page_unprotect(h2g(address), pc, puc)) {
    1265         return 1;
    1266     }
    1267 
    1268     /* see if it is an MMU fault */
    1269     ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    1270     if (ret < 0)
    1271         return 0; /* not an MMU fault */
    1272     if (ret == 0)
    1273         return 1; /* the MMU fault was handled without causing real CPU fault */
    1274 
    1275     /* now we have a real cpu fault */
    1276     tb = tb_find_pc(pc);
    1277     if (tb) {
    1278         /* the PC is inside the translated code. It means that we have
    1279            a virtual CPU fault */
    1280         cpu_restore_state(tb, env, pc, puc);
    1281     }
    1282     if (ret == 1) {
    1283 #if 0
    1284         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
    1285                env->nip, env->error_code, tb);
    1286 #endif
    1287     /* we restore the process signal mask as the sigreturn should
    1288        do it (XXX: use sigsetjmp) */
    1289         sigprocmask(SIG_SETMASK, old_set, NULL);
    1290         cpu_loop_exit();
    1291     } else {
    1292         /* activate soft MMU for this block */
    1293         cpu_resume_from_signal(env, puc);
    1294     }
    1295     /* never comes here */
    1296     return 1;
    1297 }
    1298 
    1299 #elif defined(TARGET_M68K)
    1300 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
    1301                                     int is_write, sigset_t *old_set,
    1302                                     void *puc)
    1303 {
    1304     TranslationBlock *tb;
    1305     int ret;
    1306 
    1307     if (cpu_single_env)
    1308         env = cpu_single_env; /* XXX: find a correct solution for multithread */
    1309 #if defined(DEBUG_SIGNAL)
    1310     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
    1311            pc, address, is_write, *(unsigned long *)old_set);
    1312 #endif
    1313     /* XXX: locking issue */
    1314     if (is_write && page_unprotect(address, pc, puc)) {
    1315         return 1;
    1316     }
    1317     /* see if it is an MMU fault */
    1318     ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    1319     if (ret < 0)
    1320         return 0; /* not an MMU fault */
    1321     if (ret == 0)
    1322         return 1; /* the MMU fault was handled without causing real CPU fault */
    1323     /* now we have a real cpu fault */
    1324     tb = tb_find_pc(pc);
    1325     if (tb) {
    1326         /* the PC is inside the translated code. It means that we have
    1327            a virtual CPU fault */
    1328         cpu_restore_state(tb, env, pc, puc);
    1329     }
    1330     /* we restore the process signal mask as the sigreturn should
    1331        do it (XXX: use sigsetjmp) */
    1332     sigprocmask(SIG_SETMASK, old_set, NULL);
    1333     cpu_loop_exit();
    1334     /* never comes here */
    1335     return 1;
    1336 }
    1337 
    1338 #elif defined (TARGET_MIPS)
    1339 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
    1340                                     int is_write, sigset_t *old_set,
    1341                                     void *puc)
    1342 {
    1343     TranslationBlock *tb;
    1344     int ret;
    1345 
    1346     if (cpu_single_env)
    1347         env = cpu_single_env; /* XXX: find a correct solution for multithread */
    1348 #if defined(DEBUG_SIGNAL)
    1349     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
    1350            pc, address, is_write, *(unsigned long *)old_set);
    1351 #endif
    1352     /* XXX: locking issue */
    1353     if (is_write && page_unprotect(h2g(address), pc, puc)) {
    1354         return 1;
    1355     }
    1356 
    1357     /* see if it is an MMU fault */
    1358     ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    1359     if (ret < 0)
    1360         return 0; /* not an MMU fault */
    1361     if (ret == 0)
    1362         return 1; /* the MMU fault was handled without causing real CPU fault */
    1363 
    1364     /* now we have a real cpu fault */
    1365     tb = tb_find_pc(pc);
    1366     if (tb) {
    1367         /* the PC is inside the translated code. It means that we have
    1368            a virtual CPU fault */
    1369         cpu_restore_state(tb, env, pc, puc);
    1370     }
    1371     if (ret == 1) {
    1372 #if 0
    1373         printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
    1374                env->PC, env->error_code, tb);
    1375 #endif
    1376     /* we restore the process signal mask as the sigreturn should
    1377        do it (XXX: use sigsetjmp) */
    1378         sigprocmask(SIG_SETMASK, old_set, NULL);
    1379         cpu_loop_exit();
    1380     } else {
    1381         /* activate soft MMU for this block */
    1382         cpu_resume_from_signal(env, puc);
    1383     }
    1384     /* never comes here */
    1385     return 1;
    1386 }
    1387 
    1388 #elif defined (TARGET_MICROBLAZE)
    1389 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
    1390                                     int is_write, sigset_t *old_set,
    1391                                     void *puc)
    1392 {
    1393     TranslationBlock *tb;
    1394     int ret;
    1395 
    1396     if (cpu_single_env)
    1397         env = cpu_single_env; /* XXX: find a correct solution for multithread */
    1398 #if defined(DEBUG_SIGNAL)
    1399     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
    1400            pc, address, is_write, *(unsigned long *)old_set);
    1401 #endif
    1402     /* XXX: locking issue */
    1403     if (is_write && page_unprotect(h2g(address), pc, puc)) {
    1404         return 1;
    1405     }
    1406 
    1407     /* see if it is an MMU fault */
    1408     ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    1409     if (ret < 0)
    1410         return 0; /* not an MMU fault */
    1411     if (ret == 0)
    1412         return 1; /* the MMU fault was handled without causing real CPU fault */
    1413 
    1414     /* now we have a real cpu fault */
    1415     tb = tb_find_pc(pc);
    1416     if (tb) {
    1417         /* the PC is inside the translated code. It means that we have
    1418            a virtual CPU fault */
    1419         cpu_restore_state(tb, env, pc, puc);
    1420     }
    1421     if (ret == 1) {
    1422 #if 0
    1423         printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
    1424                env->PC, env->error_code, tb);
    1425 #endif
    1426     /* we restore the process signal mask as the sigreturn should
    1427        do it (XXX: use sigsetjmp) */
    1428         sigprocmask(SIG_SETMASK, old_set, NULL);
    1429         cpu_loop_exit();
    1430     } else {
    1431         /* activate soft MMU for this block */
    1432         cpu_resume_from_signal(env, puc);
    1433     }
    1434     /* never comes here */
    1435     return 1;
    1436 }
    1437 
    1438 #elif defined (TARGET_SH4)
    1439 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
    1440                                     int is_write, sigset_t *old_set,
    1441                                     void *puc)
    1442 {
    1443     TranslationBlock *tb;
    1444     int ret;
    1445 
    1446     if (cpu_single_env)
    1447         env = cpu_single_env; /* XXX: find a correct solution for multithread */
    1448 #if defined(DEBUG_SIGNAL)
    1449     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
    1450            pc, address, is_write, *(unsigned long *)old_set);
    1451 #endif
    1452     /* XXX: locking issue */
    1453     if (is_write && page_unprotect(h2g(address), pc, puc)) {
    1454         return 1;
    1455     }
    1456 
    1457     /* see if it is an MMU fault */
    1458     ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    1459     if (ret < 0)
    1460         return 0; /* not an MMU fault */
    1461     if (ret == 0)
    1462         return 1; /* the MMU fault was handled without causing real CPU fault */
    1463 
    1464     /* now we have a real cpu fault */
    1465     tb = tb_find_pc(pc);
    1466     if (tb) {
    1467         /* the PC is inside the translated code. It means that we have
    1468            a virtual CPU fault */
    1469         cpu_restore_state(tb, env, pc, puc);
    1470     }
    1471 #if 0
    1472         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
    1473                env->nip, env->error_code, tb);
    1474 #endif
    1475     /* we restore the process signal mask as the sigreturn should
    1476        do it (XXX: use sigsetjmp) */
    1477     sigprocmask(SIG_SETMASK, old_set, NULL);
    1478     cpu_loop_exit();
    1479     /* never comes here */
    1480     return 1;
    1481 }
    1482 
    1483 #elif defined (TARGET_ALPHA)
    1484 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
    1485                                     int is_write, sigset_t *old_set,
    1486                                     void *puc)
    1487 {
    1488     TranslationBlock *tb;
    1489     int ret;
    1490 
    1491     if (cpu_single_env)
    1492         env = cpu_single_env; /* XXX: find a correct solution for multithread */
    1493 #if defined(DEBUG_SIGNAL)
    1494     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
    1495            pc, address, is_write, *(unsigned long *)old_set);
    1496 #endif
    1497     /* XXX: locking issue */
    1498     if (is_write && page_unprotect(h2g(address), pc, puc)) {
    1499         return 1;
    1500     }
    1501 
    1502     /* see if it is an MMU fault */
    1503     ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    1504     if (ret < 0)
    1505         return 0; /* not an MMU fault */
    1506     if (ret == 0)
    1507         return 1; /* the MMU fault was handled without causing real CPU fault */
    1508 
    1509     /* now we have a real cpu fault */
    1510     tb = tb_find_pc(pc);
    1511     if (tb) {
    1512         /* the PC is inside the translated code. It means that we have
    1513            a virtual CPU fault */
    1514         cpu_restore_state(tb, env, pc, puc);
    1515     }
    1516 #if 0
    1517         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
    1518                env->nip, env->error_code, tb);
    1519 #endif
    1520     /* we restore the process signal mask as the sigreturn should
    1521        do it (XXX: use sigsetjmp) */
    1522     sigprocmask(SIG_SETMASK, old_set, NULL);
    1523     cpu_loop_exit();
    1524     /* never comes here */
    1525     return 1;
    1526 }
    1527 #elif defined (TARGET_CRIS)
    1528 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
    1529                                     int is_write, sigset_t *old_set,
    1530                                     void *puc)
    1531 {
    1532     TranslationBlock *tb;
    1533     int ret;
    1534 
    1535     if (cpu_single_env)
    1536         env = cpu_single_env; /* XXX: find a correct solution for multithread */
    1537 #if defined(DEBUG_SIGNAL)
    1538     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
    1539            pc, address, is_write, *(unsigned long *)old_set);
    1540 #endif
    1541     /* XXX: locking issue */
    1542     if (is_write && page_unprotect(h2g(address), pc, puc)) {
    1543         return 1;
    1544     }
    1545 
    1546     /* see if it is an MMU fault */
    1547     ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    1548     if (ret < 0)
    1549         return 0; /* not an MMU fault */
    1550     if (ret == 0)
    1551         return 1; /* the MMU fault was handled without causing real CPU fault */
    1552 
    1553     /* now we have a real cpu fault */
    1554     tb = tb_find_pc(pc);
    1555     if (tb) {
    1556         /* the PC is inside the translated code. It means that we have
    1557            a virtual CPU fault */
    1558         cpu_restore_state(tb, env, pc, puc);
    1559     }
    1560     /* we restore the process signal mask as the sigreturn should
    1561        do it (XXX: use sigsetjmp) */
    1562     sigprocmask(SIG_SETMASK, old_set, NULL);
    1563     cpu_loop_exit();
    1564     /* never comes here */
    1565     return 1;
    1566 }
    1567 
    1568 #else
    1569 #error unsupported target CPU
    1570 #endif
    15711155
    15721156#if defined(__i386__)
     
    15781162# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
    15791163# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
     1164# define MASK_sig(context)    ((context)->uc_sigmask)
     1165#elif defined (__NetBSD__)
     1166# include <ucontext.h>
     1167
     1168# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
     1169# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
     1170# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
     1171# define MASK_sig(context)    ((context)->uc_sigmask)
     1172#elif defined (__FreeBSD__) || defined(__DragonFly__)
     1173# include <ucontext.h>
     1174
     1175# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
     1176# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
     1177# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
    15801178# define MASK_sig(context)    ((context)->uc_sigmask)
    15811179#elif defined(__OpenBSD__)
     
    15951193{
    15961194    siginfo_t *info = pinfo;
    1597 #if defined(__OpenBSD__)
     1195#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
     1196    ucontext_t *uc = puc;
     1197#elif defined(__OpenBSD__)
    15981198    struct sigcontext *uc = puc;
    15991199#else
     
    16291229#define ERROR_sig(context)    ((context)->sc_err)
    16301230#define MASK_sig(context)     ((context)->sc_mask)
     1231#elif defined (__FreeBSD__) || defined(__DragonFly__)
     1232#include <ucontext.h>
     1233
     1234#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
     1235#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
     1236#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
     1237#define MASK_sig(context)     ((context)->uc_sigmask)
    16311238#else
    16321239#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
     
    16411248    siginfo_t *info = pinfo;
    16421249    unsigned long pc;
    1643 #ifdef __NetBSD__
     1250#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
    16441251    ucontext_t *uc = puc;
    16451252#elif defined(__OpenBSD__)
     
    17671374    int is_write;
    17681375    uint32_t insn;
    1769 #if !defined(__arch64__) || defined(HOST_SOLARIS)
     1376#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
    17701377    uint32_t *regs = (uint32_t *)(info + 1);
    17711378    void *sigmask = (regs + 20);
  • trunk/src/recompiler/cutils.c

    r36175 r37675  
    2626
    2727#ifdef VBOX
    28 #include "osdep.h"
     28# include "osdep.h"
    2929
    3030
     
    612612
    613613#ifndef VBOX
     614/*
     615 * Make sure data goes on disk, but if possible do not bother to
     616 * write out the inode just for timestamp updates.
     617 *
     618 * Unfortunately even in 2009 many operating systems do not support
     619 * fdatasync and have to fall back to fsync.
     620 */
     621int qemu_fdatasync(int fd)
     622{
     623#ifdef CONFIG_FDATASYNC
     624    return fdatasync(fd);
     625#else
     626    return fsync(fd);
     627#endif
     628}
     629
    614630/* io vectors */
    615631
     
    646662    qiov->size += len;
    647663    ++qiov->niov;
     664}
     665
     666/*
     667 * Copies iovecs from src to the end dst until src is completely copied or the
     668 * total size of the copied iovec reaches size. The size of the last copied
     669 * iovec is changed in order to fit the specified total size if it isn't a
     670 * perfect fit already.
     671 */
     672void qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t size)
     673{
     674    int i;
     675    size_t done;
     676
     677    assert(dst->nalloc != -1);
     678
     679    done = 0;
     680    for (i = 0; (i < src->niov) && (done != size); i++) {
     681        if (done + src->iov[i].iov_len > size) {
     682            qemu_iovec_add(dst, src->iov[i].iov_base, size - done);
     683            break;
     684        } else {
     685            qemu_iovec_add(dst, src->iov[i].iov_base, src->iov[i].iov_len);
     686        }
     687        done += src->iov[i].iov_len;
     688    }
    648689}
    649690
  • trunk/src/recompiler/def-helper.h

    r36170 r37675  
    7979#define dh_retvar_decl0_i32 TCGv_i32 retval
    8080#define dh_retvar_decl0_i64 TCGv_i64 retval
    81 #define dh_retvar_decl0_ptr TCGv_iptr retval
     81#define dh_retvar_decl0_ptr TCGv_ptr retval
    8282#define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t))
    8383
     
    8585#define dh_retvar_decl_i32 TCGv_i32 retval,
    8686#define dh_retvar_decl_i64 TCGv_i64 retval,
    87 #define dh_retvar_decl_ptr TCGv_iptr retval,
     87#define dh_retvar_decl_ptr TCGv_ptr retval,
    8888#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t))
    8989
  • trunk/src/recompiler/disas.h

    r36175 r37675  
    44#include "qemu-common.h"
    55
     6#ifdef NEED_CPU_H
    67/* Disassemble this for me please... (debugging). */
    78void disas(FILE *out, void *code, unsigned long size);
     
    1415                   target_ulong pc, int nb_insn, int is_physical, int flags);
    1516#endif
    16 #endif
     17#endif /*!VBOX*/
    1718
    1819/* Look up symbol for debugging purpose.  Returns "" if unknown. */
    1920const char *lookup_symbol(target_ulong orig_addr);
     21#endif
    2022
    2123struct syminfo;
     
    2325struct elf64_sym;
    2426
    25 typedef const char *(*lookup_symbol_t)(struct syminfo *s, target_ulong orig_addr);
     27typedef const char *(*lookup_symbol_t)(struct syminfo *s, target_phys_addr_t orig_addr);
    2628
    2729struct syminfo {
  • trunk/src/recompiler/dyngen-exec.h

    r36176 r37675  
    4646#include <stdint.h>
    4747
    48 
    4948#ifdef __OpenBSD__
    5049#include <sys/types.h>
     
    5453typedef void * host_reg_t;
    5554
    56 #ifdef HOST_BSD
     55#ifdef CONFIG_BSD
    5756typedef struct __sFILE FILE;
    5857#else
     
    6261extern int fputs(const char *, FILE *);
    6362extern int printf(const char *, ...);
    64 #undef NULL
    65 #define NULL 0
    6663
    6764#else  /* VBOX */
     
    10198#define AREG2 "r15"
    10299#elif defined(__mips__)
    103 #define AREG0 "fp"
    104 #define AREG1 "s0"
    105 #define AREG2 "s1"
     100#define AREG0 "s0"
     101#define AREG1 "s1"
     102#define AREG2 "fp"
    106103#elif defined(__sparc__)
    107 #ifdef HOST_SOLARIS
     104#ifdef CONFIG_SOLARIS
    108105#define AREG0 "g2"
    109106#define AREG1 "g3"
     
    149146/* The return address may point to the start of the next instruction.
    150147   Subtracting one gets us the call instruction itself.  */
    151 #if defined(__s390__)
     148#if defined(__s390__) && !defined(__s390x__)
    152149# define GETPC() ((void*)(((unsigned long)__builtin_return_address(0) & 0x7fffffffUL) - 1))
    153150#elif defined(__arm__)
  • trunk/src/recompiler/elf.h

    r36175 r37675  
    455455#define R_PPC_SECTOFF_HA        36
    456456/* Keep this the last entry.  */
     457#ifndef R_PPC_NUM
    457458#define R_PPC_NUM               37
     459#endif
    458460
    459461/* ARM specific declarations */
  • trunk/src/recompiler/exec-all.h

    r36175 r37675  
    6060/* A Call op needs up to 6 + 2N parameters (N = number of arguments).  */
    6161#define MAX_OPC_PARAM 10
    62 #define OPC_BUF_SIZE 512
     62#define OPC_BUF_SIZE 640
    6363#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
    6464
    6565/* Maximum size a TCG op can expand to.  This is complicated because a
    66    single op may require several host instructions and regirster reloads.
    67    For now take a wild guess at 128 bytes, which should allow at least
     66   single op may require several host instructions and register reloads.
     67   For now take a wild guess at 192 bytes, which should allow at least
    6868   a couple of fixup instructions per argument.  */
    69 #define TCG_MAX_OP_SIZE 128
     69#define TCG_MAX_OP_SIZE 192
    7070
    7171#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
     
    137137#endif
    138138
    139 #if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__)
    140 #define USE_DIRECT_JUMP
    141 #endif
    142 #if defined(__i386__) && !defined(_WIN32)
     139#if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__) || defined(__i386__)
    143140#define USE_DIRECT_JUMP
    144141#endif
     
    400397#endif
    401398
    402 #ifdef CONFIG_KQEMU
    403 #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
    404 
    405 #define MSR_QPI_COMMBASE 0xfabe0010
    406 
    407 int kqemu_init(CPUState *env);
    408 int kqemu_cpu_exec(CPUState *env);
    409 void kqemu_flush_page(CPUState *env, target_ulong addr);
    410 void kqemu_flush(CPUState *env, int global);
    411 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
    412 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
    413 void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size,
    414                         ram_addr_t phys_offset);
    415 void kqemu_cpu_interrupt(CPUState *env);
    416 void kqemu_record_dump(void);
    417 
    418 extern uint32_t kqemu_comm_base;
    419 
    420 extern ram_addr_t kqemu_phys_ram_size;
    421 extern uint8_t *kqemu_phys_ram_base;
    422 
    423 static inline int kqemu_is_ok(CPUState *env)
    424 {
    425     return(env->kqemu_enabled &&
    426            (env->cr[0] & CR0_PE_MASK) &&
    427            !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
    428            (env->eflags & IF_MASK) &&
    429            !(env->eflags & VM_MASK) &&
    430            (env->kqemu_enabled == 2 ||
    431             ((env->hflags & HF_CPL_MASK) == 3 &&
    432              (env->eflags & IOPL_MASK) != IOPL_MASK)));
    433 }
    434 
    435 #endif
    436 
    437399typedef void (CPUDebugExcpHandler)(CPUState *env);
    438400
     
    442404#ifndef VBOX
    443405extern int singlestep;
    444 #endif
    445 
    446 #endif
     406#endif /*!VBOX*/
     407
     408#endif
  • trunk/src/recompiler/exec.c

    r36490 r37675  
    9292#elif defined(TARGET_PPC64)
    9393#define TARGET_PHYS_ADDR_SPACE_BITS 42
    94 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
     94#elif defined(TARGET_X86_64)
    9595#define TARGET_PHYS_ADDR_SPACE_BITS 42
    96 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
     96#elif defined(TARGET_I386)
    9797#define TARGET_PHYS_ADDR_SPACE_BITS 36
    9898#else
    99 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
    10099#define TARGET_PHYS_ADDR_SPACE_BITS 32
    101100#endif
     
    261260typedef struct subpage_t {
    262261    target_phys_addr_t base;
    263     CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
    264     CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
     262    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
     263    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
    265264    void *opaque[TARGET_PAGE_SIZE][2][4];
    266265    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
     
    514513
    515514#ifdef VBOX /*  We don't need such huge codegen buffer size, as execute
    516                 most of the code  in raw or hwacc mode. */
     515                most of the code in raw or hwacc mode. */
    517516#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
    518517#else  /* !VBOX */
     
    554553        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
    555554#else
    556         /* XXX: needs ajustments */
    557         code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
     555        /* XXX: needs adjustments */
     556        code_gen_buffer_size = (unsigned long)(ram_size / 4);
    558557#endif
    559558    }
     
    604603        }
    605604    }
    606 #elif defined(__FreeBSD__) || defined(__DragonFly__)
     605#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
    607606    {
    608607        int flags;
     
    632631# endif /* !VBOX */
    633632#endif /* !USE_STATIC_CODE_GEN_BUFFER */
    634 #ifndef VBOX
     633#ifndef VBOX /** @todo r=bird: why are we different? */
    635634    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
    636635#else
     
    660659#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
    661660
    662 #define CPU_COMMON_SAVE_VERSION 1
    663 
    664 static void cpu_common_save(QEMUFile *f, void *opaque)
     661static void cpu_common_pre_save(void *opaque)
    665662{
    666663    CPUState *env = opaque;
    667664
    668     cpu_synchronize_state(env, 0);
    669 
    670     qemu_put_be32s(f, &env->halted);
    671     qemu_put_be32s(f, &env->interrupt_request);
    672 }
    673 
    674 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
     665    cpu_synchronize_state(env);
     666}
     667
     668static int cpu_common_pre_load(void *opaque)
    675669{
    676670    CPUState *env = opaque;
    677671
    678     if (version_id != CPU_COMMON_SAVE_VERSION)
    679         return -EINVAL;
    680 
    681     qemu_get_be32s(f, &env->halted);
    682     qemu_get_be32s(f, &env->interrupt_request);
     672    cpu_synchronize_state(env);
     673    return 0;
     674}
     675
     676static int cpu_common_post_load(void *opaque, int version_id)
     677{
     678    CPUState *env = opaque;
     679
    683680    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
    684681       version_id is increased. */
    685682    env->interrupt_request &= ~0x01;
    686683    tlb_flush(env, 1);
    687     cpu_synchronize_state(env, 1);
    688684
    689685    return 0;
    690686}
     687
     688static const VMStateDescription vmstate_cpu_common = {
     689    .name = "cpu_common",
     690    .version_id = 1,
     691    .minimum_version_id = 1,
     692    .minimum_version_id_old = 1,
     693    .pre_save = cpu_common_pre_save,
     694    .pre_load = cpu_common_pre_load,
     695    .post_load = cpu_common_post_load,
     696    .fields      = (VMStateField []) {
     697        VMSTATE_UINT32(halted, CPUState),
     698        VMSTATE_UINT32(interrupt_request, CPUState),
     699        VMSTATE_END_OF_LIST()
     700    }
     701};
    691702#endif
    692703
     
    723734    env->cpu_index = cpu_index;
    724735    env->numa_node = 0;
    725     TAILQ_INIT(&env->breakpoints);
    726     TAILQ_INIT(&env->watchpoints);
     736    QTAILQ_INIT(&env->breakpoints);
     737    QTAILQ_INIT(&env->watchpoints);
    727738    *penv = env;
    728739#ifndef VBOX
     
    731742#endif
    732743#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
    733     register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
    734                     cpu_common_save, cpu_common_load, env);
     744    vmstate_register(cpu_index, &vmstate_cpu_common, env);
    735745    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
    736746                    cpu_save, cpu_load, env);
     
    15621572    /* keep all GDB-injected watchpoints in front */
    15631573    if (flags & BP_GDB)
    1564         TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
     1574        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
    15651575    else
    1566         TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
     1576        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
    15671577
    15681578    tlb_flush_page(env, addr);
     
    15801590    CPUWatchpoint *wp;
    15811591
    1582     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
     1592    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
    15831593        if (addr == wp->vaddr && len_mask == wp->len_mask
    15841594                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
     
    15971607void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
    15981608{
    1599     TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
     1609    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
    16001610
    16011611    tlb_flush_page(env, watchpoint->vaddr);
     
    16091619    CPUWatchpoint *wp, *next;
    16101620
    1611     TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
     1621    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
    16121622        if (wp->flags & mask)
    16131623            cpu_watchpoint_remove_by_ref(env, wp);
     
    16291639    /* keep all GDB-injected breakpoints in front */
    16301640    if (flags & BP_GDB)
    1631         TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
     1641        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
    16321642    else
    1633         TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
     1643        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
    16341644
    16351645    breakpoint_invalidate(env, pc);
     
    16491659    CPUBreakpoint *bp;
    16501660
    1651     TAILQ_FOREACH(bp, &env->breakpoints, entry) {
     1661    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
    16521662        if (bp->pc == pc && bp->flags == flags) {
    16531663            cpu_breakpoint_remove_by_ref(env, bp);
     
    16691679{
    16701680#if defined(TARGET_HAS_ICE)
    1671     TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
     1681    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
    16721682
    16731683    breakpoint_invalidate(env, breakpoint->pc);
     
    16831693    CPUBreakpoint *bp, *next;
    16841694
    1685     TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
     1695    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
    16861696        if (bp->flags & mask)
    16871697            cpu_breakpoint_remove_by_ref(env, bp);
     
    17261736            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
    17271737        }
    1728 #else
     1738#elif !defined(_WIN32)
     1739        /* Win32 doesn't support line-buffering and requires size >= 2 */
    17291740        setvbuf(logfile, NULL, _IOLBF, 0);
    17301741#endif
     
    17511762static void cpu_unlink_tb(CPUState *env)
    17521763{
    1753 #if defined(USE_NPTL)
     1764#if defined(CONFIG_USE_NPTL)
    17541765    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
    17551766       problem and hope the cpu will stop of its own accord.  For userspace
     
    19371948#endif /* !VBOX */
    19381949
    1939 #ifndef VBOX
     1950#ifndef VBOX /* not needed */
    19401951CPUState *cpu_copy(CPUState *env)
    19411952{
     
    19571968       Note: Once we support ptrace with hw-debug register access, make sure
    19581969       BP_CPU break/watchpoints are handled correctly on clone. */
    1959     TAILQ_INIT(&env->breakpoints);
    1960     TAILQ_INIT(&env->watchpoints);
     1970    QTAILQ_INIT(&env->breakpoints);
     1971    QTAILQ_INIT(&env->watchpoints);
    19611972#if defined(TARGET_HAS_ICE)
    1962     TAILQ_FOREACH(bp, &env->breakpoints, entry) {
     1973    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
    19631974        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
    19641975    }
    1965     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
     1976    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
    19661977        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
    19671978                              wp->flags, NULL);
     
    20242035    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
    20252036
    2026 #ifdef CONFIG_KQEMU
    2027     if (env->kqemu_enabled) {
    2028         kqemu_flush(env, flush_global);
    2029     }
    2030 #endif
    20312037#ifdef VBOX
    20322038    /* inform raw mode about TLB flush */
     
    20662072
    20672073    tlb_flush_jmp_cache(env, addr);
    2068 
    2069 #ifdef CONFIG_KQEMU
    2070     if (env->kqemu_enabled) {
    2071         kqemu_flush_page(env, addr);
    2072     }
    2073 #endif
    20742074}
    20752075
     
    21312131        return;
    21322132    len = length >> TARGET_PAGE_BITS;
    2133 #ifdef CONFIG_KQEMU
    2134     /* XXX: should not depend on cpu context */
    2135     env = first_cpu;
    2136     if (env->kqemu_enabled) {
    2137         ram_addr_t addr;
    2138         addr = start;
    2139         for(i = 0; i < len; i++) {
    2140             kqemu_set_notdirty(env, addr);
    2141             addr += TARGET_PAGE_SIZE;
    2142         }
    2143     }
    2144 #endif
    21452133    mask = ~dirty_flags;
    21462134    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
     
    23672355    /* Make accesses to pages with watchpoints go via the
    23682356       watchpoint trap routines.  */
    2369     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
     2357    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
    23702358        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
    23712359            iotlb = io_mem_watch + paddr;
     
    26722660    } while (0)
    26732661
    2674 /* register physical memory. 'size' must be a multiple of the target
    2675    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
     2662/* register physical memory.
     2663   For RAM, 'size' must be a multiple of the target page size.
     2664   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
    26762665   io memory page.  The address used when calling the IO function is
    26772666   the offset from the start of the region, plus region_offset.  Both
     
    26902679    void *subpage;
    26912680
    2692 #ifdef CONFIG_KQEMU
    2693     /* XXX: should not depend on cpu context */
    2694     env = first_cpu;
    2695     if (env->kqemu_enabled) {
    2696         kqemu_set_phys_mem(start_addr, size, phys_offset);
    2697     }
    2698 #endif
    26992681    if (kvm_enabled())
    27002682        kvm_set_phys_mem(start_addr, size, phys_offset);
     
    27922774}
    27932775
    2794 #ifdef CONFIG_KQEMU
    2795 /* XXX: better than nothing */
    2796 static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
    2797 {
    2798     ram_addr_t addr;
    2799     if ((last_ram_offset + size) > kqemu_phys_ram_size) {
    2800         fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
    2801                 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
    2802         abort();
    2803     }
    2804     addr = last_ram_offset;
    2805     last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
    2806     return addr;
    2807 }
    2808 #endif
    2809 
    28102776ram_addr_t qemu_ram_alloc(ram_addr_t size)
    28112777{
    28122778    RAMBlock *new_block;
    2813 
    2814 #ifdef CONFIG_KQEMU
    2815     if (kqemu_phys_ram_base) {
    2816         return kqemu_ram_alloc(size);
    2817     }
    2818 #endif
    28192779
    28202780    size = TARGET_PAGE_ALIGN(size);
    28212781    new_block = qemu_malloc(sizeof(*new_block));
    28222782
     2783#if defined(TARGET_S390X) && defined(CONFIG_KVM)
     2784    /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
     2785    new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
     2786                           MAP_SHARED | MAP_ANONYMOUS, -1, 0);
     2787#else
    28232788    new_block->host = qemu_vmalloc(size);
     2789#endif
     2790#ifdef MADV_MERGEABLE
     2791    madvise(new_block->host, size, MADV_MERGEABLE);
     2792#endif
    28242793    new_block->offset = last_ram_offset;
    28252794    new_block->length = size;
     
    28592828    RAMBlock **prevp;
    28602829    RAMBlock *block;
    2861 
    2862 #ifdef CONFIG_KQEMU
    2863     if (kqemu_phys_ram_base) {
    2864         return kqemu_phys_ram_base + addr;
    2865     }
    2866 #endif
    28672830
    28682831    prev = NULL;
     
    28982861    uint8_t *host = ptr;
    28992862
    2900 #ifdef CONFIG_KQEMU
    2901     if (kqemu_phys_ram_base) {
    2902         return host - kqemu_phys_ram_base;
    2903     }
    2904 #endif
    2905 
    29062863    prev = NULL;
    29072864    prevp = &ram_blocks;
     
    29282885    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
    29292886#endif
    2930 #if defined(TARGET_SPARC)
     2887#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
    29312888    do_unassigned_access(addr, 0, 0, 0, 1);
    29322889#endif
     
    29392896    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
    29402897#endif
    2941 #if defined(TARGET_SPARC)
     2898#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
    29422899    do_unassigned_access(addr, 0, 0, 0, 2);
    29432900#endif
     
    29502907    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
    29512908#endif
    2952 #if defined(TARGET_SPARC)
     2909#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
    29532910    do_unassigned_access(addr, 0, 0, 0, 4);
    29542911#endif
     
    29612918    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
    29622919#endif
    2963 #if defined(TARGET_SPARC)
     2920#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
    29642921    do_unassigned_access(addr, 1, 0, 0, 1);
    29652922#endif
     
    29712928    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
    29722929#endif
    2973 #if defined(TARGET_SPARC)
     2930#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
    29742931    do_unassigned_access(addr, 1, 0, 0, 2);
    29752932#endif
     
    29812938    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
    29822939#endif
    2983 #if defined(TARGET_SPARC)
     2940#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
    29842941    do_unassigned_access(addr, 1, 0, 0, 4);
    29852942#endif
    29862943}
    29872944
    2988 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
     2945static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
    29892946    unassigned_mem_readb,
    29902947    unassigned_mem_readw,
     
    29922949};
    29932950
    2994 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
     2951static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
    29952952    unassigned_mem_writeb,
    29962953    unassigned_mem_writew,
     
    30663023    stw_p(qemu_get_ram_ptr(ram_addr), val);
    30673024#endif
    3068 #ifdef CONFIG_KQEMU
    3069     if (cpu_single_env->kqemu_enabled &&
    3070         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
    3071         kqemu_modify_page(cpu_single_env, ram_addr);
    3072 #endif
    30733025    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
    30743026#ifdef VBOX
     
    31083060    stl_p(qemu_get_ram_ptr(ram_addr), val);
    31093061#endif
    3110 #ifdef CONFIG_KQEMU
    3111     if (cpu_single_env->kqemu_enabled &&
    3112         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
    3113         kqemu_modify_page(cpu_single_env, ram_addr);
    3114 #endif
    31153062    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
    31163063#ifdef VBOX
     
    31243071}
    31253072
    3126 static CPUReadMemoryFunc *error_mem_read[3] = {
     3073static CPUReadMemoryFunc * const error_mem_read[3] = {
    31273074    NULL, /* never used */
    31283075    NULL, /* never used */
     
    31303077};
    31313078
    3132 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
     3079static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
    31333080    notdirty_mem_writeb,
    31343081    notdirty_mem_writew,
     
    31543101    }
    31553102    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
    3156     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
     3103    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
    31573104        if ((vaddr == (wp->vaddr & len_mask) ||
    31583105             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
     
    32233170}
    32243171
    3225 static CPUReadMemoryFunc *watch_mem_read[3] = {
     3172static CPUReadMemoryFunc * const watch_mem_read[3] = {
    32263173    watch_mem_readb,
    32273174    watch_mem_readw,
     
    32293176};
    32303177
    3231 static CPUWriteMemoryFunc *watch_mem_write[3] = {
     3178static CPUWriteMemoryFunc * const watch_mem_write[3] = {
    32323179    watch_mem_writeb,
    32333180    watch_mem_writew,
     
    33213268}
    33223269
    3323 static CPUReadMemoryFunc *subpage_read[] = {
     3270static CPUReadMemoryFunc * const subpage_read[] = {
    33243271    &subpage_readb,
    33253272    &subpage_readw,
     
    33273274};
    33283275
    3329 static CPUWriteMemoryFunc *subpage_write[] = {
     3276static CPUWriteMemoryFunc * const subpage_write[] = {
    33303277    &subpage_writeb,
    33313278    &subpage_writew,
     
    34083355   returned if error. */
    34093356static int cpu_register_io_memory_fixed(int io_index,
    3410                                         CPUReadMemoryFunc **mem_read,
    3411                                         CPUWriteMemoryFunc **mem_write,
     3357                                        CPUReadMemoryFunc * const *mem_read,
     3358                                        CPUWriteMemoryFunc * const *mem_write,
    34123359                                        void *opaque)
    34133360{
     
    34343381}
    34353382
    3436 int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
    3437                            CPUWriteMemoryFunc **mem_write,
     3383int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
     3384                           CPUWriteMemoryFunc * const *mem_write,
    34383385                           void *opaque)
    34393386{
     
    34663413    io_mem_watch = cpu_register_io_memory(watch_mem_read,
    34673414                                          watch_mem_write, NULL);
    3468 #ifdef CONFIG_KQEMU
    3469     if (kqemu_phys_ram_base) {
    3470         /* alloc dirty bits array */
    3471         phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
    3472         memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
    3473     }
    3474 #endif
    34753415}
    34763416
     
    37043644    void *opaque;
    37053645    void (*callback)(void *opaque);
    3706     LIST_ENTRY(MapClient) link;
     3646    QLIST_ENTRY(MapClient) link;
    37073647} MapClient;
    37083648
    3709 static LIST_HEAD(map_client_list, MapClient) map_client_list
    3710     = LIST_HEAD_INITIALIZER(map_client_list);
     3649static QLIST_HEAD(map_client_list, MapClient) map_client_list
     3650    = QLIST_HEAD_INITIALIZER(map_client_list);
    37113651
    37123652void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
     
    37163656    client->opaque = opaque;
    37173657    client->callback = callback;
    3718     LIST_INSERT_HEAD(&map_client_list, client, link);
     3658    QLIST_INSERT_HEAD(&map_client_list, client, link);
    37193659    return client;
    37203660}
     
    37243664    MapClient *client = (MapClient *)_client;
    37253665
    3726     LIST_REMOVE(client, link);
     3666    QLIST_REMOVE(client, link);
    37273667    qemu_free(client);
    37283668}
     
    37323672    MapClient *client;
    37333673
    3734     while (!LIST_EMPTY(&map_client_list)) {
    3735         client = LIST_FIRST(&map_client_list);
     3674    while (!QLIST_EMPTY(&map_client_list)) {
     3675        client = QLIST_FIRST(&map_client_list);
    37363676        client->callback(client->opaque);
    37373677        cpu_unregister_map_client(client);
     
    38323772        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
    38333773    }
    3834     qemu_free(bounce.buffer);
     3774    qemu_vfree(bounce.buffer);
    38353775    bounce.buffer = NULL;
    38363776    cpu_notify_map_clients();
  • trunk/src/recompiler/fpu/softfloat-native.c

    r36175 r37675  
    33#include "softfloat.h"
    44#include <math.h>
    5 #if defined(HOST_SOLARIS)
     5#if defined(CONFIG_SOLARIS)
    66#include <fenv.h>
    77#endif
     
    1010{
    1111    STATUS(float_rounding_mode) = val;
    12 #if defined(HOST_BSD) && !defined(__APPLE__) ||        \
    13     (defined(HOST_SOLARIS) && (HOST_SOLARIS < 10 || HOST_SOLARIS == 11)) /* VBOX adds sol 11 */
     12#if (defined(CONFIG_BSD) && !defined(__APPLE__) && !defined(__GLIBC__)) || \
     13    (defined(CONFIG_SOLARIS) && (CONFIG_SOLARIS_VERSION < 10 || CONFIG_SOLARIS_VERSION == 11)) /* VBOX adds sol 11 */
    1414    fpsetround(val);
    1515#elif defined(__arm__)
     
    2727#endif
    2828
    29 #if defined(HOST_BSD) || (defined(HOST_SOLARIS) && HOST_SOLARIS < 10)
     29#if defined(CONFIG_BSD) || \
     30    (defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10)
    3031#define lrint(d)                ((int32_t)rint(d))
    3132#define llrint(d)               ((int64_t)rint(d))
     
    4344#  define remainderl(fa, fb)   (remainder(fa, fb))
    4445# endif /* VBOX && _BSD */
    45 
    46 #if !defined(__sparc__) && defined(HOST_SOLARIS) && HOST_SOLARIS < 10
     46#if !defined(__sparc__) && \
     47    (defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10)
    4748extern long double rintl(long double);
    4849extern long double scalbnl(long double, int);
     
    359360| Software IEC/IEEE double-precision operations.
    360361*----------------------------------------------------------------------------*/
    361 #if defined(__sun__) && defined(HOST_SOLARIS) && HOST_SOLARIS < 10
     362#if defined(__sun__) && \
     363    (defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10)
    362364static inline float64 trunc(float64 x)
    363365{
  • trunk/src/recompiler/fpu/softfloat-native.h

    r36414 r37675  
    2020 *   are defined in <iso/math_c99.h> with a compiler directive
    2121 */
    22 #if defined(HOST_SOLARIS) && (( HOST_SOLARIS <= 9 ) || ((HOST_SOLARIS >= 10) \
    23                                                         && (__GNUC__ < 4))) \
     22#if defined(CONFIG_SOLARIS) && \
     23           ((CONFIG_SOLARIS_VERSION <= 9 ) || \
     24           ((CONFIG_SOLARIS_VERSION >= 10) && (__GNUC__ < 4))) \
    2425    || (defined(__OpenBSD__) && (OpenBSD < 200811))
    2526/*
     
    6263#endif
    6364
    64 #if defined(__sun__) && !defined(NEED_LIBSUNMATH)
     65#if defined(__sun__) && !defined(CONFIG_NEEDS_LIBSUNMATH)
    6566
    6667#ifndef isnan
     
    112113| Software IEC/IEEE floating-point rounding mode.
    113114*----------------------------------------------------------------------------*/
    114 #if (defined(HOST_BSD) && !defined(__APPLE__)) || defined(HOST_SOLARIS)
     115#if (defined(CONFIG_BSD) && !defined(__APPLE__) && !defined(__GLIBC__)) \
     116    || defined(CONFIG_SOLARIS)
    115117#if defined(__OpenBSD__)
    116118#define FE_RM FP_RM
  • trunk/src/recompiler/fpu/softfloat.c

    r36170 r37675  
    24562456    return roundAndPackFloat32( aSign, aExp, zSig STATUS_VAR );
    24572457
     2458}
     2459
     2460
     2461/*----------------------------------------------------------------------------
     2462| Packs the sign `zSign', exponent `zExp', and significand `zSig' into a
     2463| half-precision floating-point value, returning the result.  After being
     2464| shifted into the proper positions, the three fields are simply added
     2465| together to form the result.  This means that any integer portion of `zSig'
     2466| will be added into the exponent.  Since a properly normalized significand
     2467| will have an integer portion equal to 1, the `zExp' input should be 1 less
     2468| than the desired result exponent whenever `zSig' is a complete, normalized
     2469| significand.
     2470*----------------------------------------------------------------------------*/
     2471static bits16 packFloat16(flag zSign, int16 zExp, bits16 zSig)
     2472{
     2473    return (((bits32)zSign) << 15) + (((bits32)zExp) << 10) + zSig;
     2474}
     2475
     2476/* Half precision floats come in two formats: standard IEEE and "ARM" format.
     2477   The latter gains extra exponent range by omitting the NaN/Inf encodings.  */
     2478
     2479float32 float16_to_float32( bits16 a, flag ieee STATUS_PARAM )
     2480{
     2481    flag aSign;
     2482    int16 aExp;
     2483    bits32 aSig;
     2484
     2485    aSign = a >> 15;
     2486    aExp = (a >> 10) & 0x1f;
     2487    aSig = a & 0x3ff;
     2488
     2489    if (aExp == 0x1f && ieee) {
     2490        if (aSig) {
     2491            /* Make sure correct exceptions are raised.  */
     2492            float32ToCommonNaN(a STATUS_VAR);
     2493            aSig |= 0x200;
     2494        }
     2495        return packFloat32(aSign, 0xff, aSig << 13);
     2496    }
     2497    if (aExp == 0) {
     2498        int8 shiftCount;
     2499
     2500        if (aSig == 0) {
     2501            return packFloat32(aSign, 0, 0);
     2502        }
     2503
     2504        shiftCount = countLeadingZeros32( aSig ) - 21;
     2505        aSig = aSig << shiftCount;
     2506        aExp = -shiftCount;
     2507    }
     2508    return packFloat32( aSign, aExp + 0x70, aSig << 13);
     2509}
     2510
     2511bits16 float32_to_float16( float32 a, flag ieee STATUS_PARAM)
     2512{
     2513    flag aSign;
     2514    int16 aExp;
     2515    bits32 aSig;
     2516    bits32 mask;
     2517    bits32 increment;
     2518    int8 roundingMode;
     2519
     2520    aSig = extractFloat32Frac( a );
     2521    aExp = extractFloat32Exp( a );
     2522    aSign = extractFloat32Sign( a );
     2523    if ( aExp == 0xFF ) {
     2524        if (aSig) {
     2525            /* Make sure correct exceptions are raised.  */
     2526            float32ToCommonNaN(a STATUS_VAR);
     2527            aSig |= 0x00400000;
     2528        }
     2529        return packFloat16(aSign, 0x1f, aSig >> 13);
     2530    }
     2531    if (aExp == 0 && aSign == 0) {
     2532        return packFloat16(aSign, 0, 0);
     2533    }
     2534    /* Decimal point between bits 22 and 23.  */
     2535    aSig |= 0x00800000;
     2536    aExp -= 0x7f;
     2537    if (aExp < -14) {
     2538        mask = 0x007fffff;
     2539        if (aExp < -24) {
     2540            aExp = -25;
     2541        } else {
     2542            mask >>= 24 + aExp;
     2543        }
     2544    } else {
     2545        mask = 0x00001fff;
     2546    }
     2547    if (aSig & mask) {
     2548        float_raise( float_flag_underflow STATUS_VAR );
     2549        roundingMode = STATUS(float_rounding_mode);
     2550        switch (roundingMode) {
     2551        case float_round_nearest_even:
     2552            increment = (mask + 1) >> 1;
     2553            if ((aSig & mask) == increment) {
     2554                increment = aSig & (increment << 1);
     2555            }
     2556            break;
     2557        case float_round_up:
     2558            increment = aSign ? 0 : mask;
     2559            break;
     2560        case float_round_down:
     2561            increment = aSign ? mask : 0;
     2562            break;
     2563        default: /* round_to_zero */
     2564            increment = 0;
     2565            break;
     2566        }
     2567        aSig += increment;
     2568        if (aSig >= 0x01000000) {
     2569            aSig >>= 1;
     2570            aExp++;
     2571        }
     2572    } else if (aExp < -14
     2573          && STATUS(float_detect_tininess) == float_tininess_before_rounding) {
     2574        float_raise( float_flag_underflow STATUS_VAR);
     2575    }
     2576
     2577    if (ieee) {
     2578        if (aExp > 15) {
     2579            float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR);
     2580            return packFloat16(aSign, 0x1f, 0);
     2581        }
     2582    } else {
     2583        if (aExp > 16) {
     2584            float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR);
     2585            return packFloat16(aSign, 0x1f, 0x3ff);
     2586        }
     2587    }
     2588    if (aExp < -24) {
     2589        return packFloat16(aSign, 0, 0);
     2590    }
     2591    if (aExp < -14) {
     2592        aSig >>= -14 - aExp;
     2593        aExp = -14;
     2594    }
     2595    return packFloat16(aSign, aExp + 14, aSig >> 13);
    24582596}
    24592597
  • trunk/src/recompiler/fpu/softfloat.h

    r36175 r37675  
    3737#endif
    3838
    39 #if defined(HOST_SOLARIS) && defined(NEEDS_LIBSUNMATH)
     39#if defined(CONFIG_SOLARIS) && defined(CONFIG_NEEDS_LIBSUNMATH)
    4040#include <sunmath.h>
    4141#endif
     
    9595#else
    9696/* native float support */
    97 #if (defined(__i386__) || defined(__x86_64__)) && (!defined(HOST_BSD) || defined(VBOX)) /** @todo VBOX: not correct on windows */
     97#if (defined(__i386__) || defined(__x86_64__)) && (!defined(CONFIG_BSD) || defined(VBOX)) /** @todo VBOX: not correct on windows */
    9898#define FLOATX80
    9999#endif
     
    155155#ifdef FLOAT128
    156156typedef struct {
    157 #ifdef WORDS_BIGENDIAN
     157#ifdef HOST_WORDS_BIGENDIAN
    158158    uint64_t high, low;
    159159#else
     
    250250float128 int64_to_float128( int64_t STATUS_PARAM );
    251251#endif
     252
     253/*----------------------------------------------------------------------------
     254| Software half-precision conversion routines.
     255*----------------------------------------------------------------------------*/
     256bits16 float32_to_float16( float32, flag STATUS_PARAM );
     257float32 float16_to_float32( bits16, flag STATUS_PARAM );
    252258
    253259/*----------------------------------------------------------------------------
  • trunk/src/recompiler/gen-icount.h

    r36170 r37675  
    1212
    1313    icount_label = gen_new_label();
    14     /* FIXME: This generates lousy code.  We can't use tcg_new_temp because
    15        count needs to live over the conditional branch.  To workaround this
    16        we allow the target to supply a convenient register temporary.  */
    17 #ifndef ICOUNT_TEMP
    1814    count = tcg_temp_local_new_i32();
    19 #else
    20     count = ICOUNT_TEMP;
    21 #endif
    2215    tcg_gen_ld_i32(count, cpu_env, offsetof(CPUState, icount_decr.u32));
    2316    /* This is a horrid hack to allow fixing up the value later.  */
     
    2720    tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
    2821    tcg_gen_st16_i32(count, cpu_env, offsetof(CPUState, icount_decr.u16.low));
    29 #ifndef ICOUNT_TEMP
    3022    tcg_temp_free_i32(count);
    31 #endif
    3223}
    3324
  • trunk/src/recompiler/host-utils.c

    r17040 r37675  
    2424 */
    2525
    26 #include "exec.h"
     26#include <stdlib.h>
     27#ifndef VBOX
     28#include <stdint.h>
     29#else
     30# include <iprt/types.h>
     31#endif
    2732#include "host-utils.h"
    2833
  • trunk/src/recompiler/host-utils.h

    r36125 r37675  
    2828#if defined(__x86_64__)
    2929#define __HAVE_FAST_MULU64__
    30 static always_inline void mulu64 (uint64_t *plow, uint64_t *phigh,
    31                                   uint64_t a, uint64_t b)
     30static inline void mulu64(uint64_t *plow, uint64_t *phigh,
     31                          uint64_t a, uint64_t b)
    3232{
    3333    __asm__ ("mul %0\n\t"
     
    3636}
    3737#define __HAVE_FAST_MULS64__
    38 static always_inline void muls64 (uint64_t *plow, uint64_t *phigh,
    39                                   int64_t a, int64_t b)
     38static inline void muls64(uint64_t *plow, uint64_t *phigh,
     39                          int64_t a, int64_t b)
    4040{
    4141    __asm__ ("imul %0\n\t"
     
    5050/* Binary search for leading zeros.  */
    5151
    52 static always_inline int clz32(uint32_t val)
     52static inline int clz32(uint32_t val)
    5353{
    5454#if QEMU_GNUC_PREREQ(3, 4)
     
    8787}
    8888
    89 static always_inline int clo32(uint32_t val)
     89static inline int clo32(uint32_t val)
    9090{
    9191    return clz32(~val);
    9292}
    9393
    94 static always_inline int clz64(uint64_t val)
     94static inline int clz64(uint64_t val)
    9595{
    9696#if QEMU_GNUC_PREREQ(3, 4)
     
    112112}
    113113
    114 static always_inline int clo64(uint64_t val)
     114static inline int clo64(uint64_t val)
    115115{
    116116    return clz64(~val);
    117117}
    118118
    119 static always_inline int ctz32 (uint32_t val)
     119static inline int ctz32(uint32_t val)
    120120{
    121121#if QEMU_GNUC_PREREQ(3, 4)
     
    129129    cnt = 0;
    130130    if (!(val & 0x0000FFFFUL)) {
    131          cnt += 16;
     131        cnt += 16;
    132132        val >>= 16;
    133      }
     133    }
    134134    if (!(val & 0x000000FFUL)) {
    135          cnt += 8;
     135        cnt += 8;
    136136        val >>= 8;
    137      }
     137    }
    138138    if (!(val & 0x0000000FUL)) {
    139          cnt += 4;
     139        cnt += 4;
    140140        val >>= 4;
    141      }
     141    }
    142142    if (!(val & 0x00000003UL)) {
    143          cnt += 2;
     143        cnt += 2;
    144144        val >>= 2;
    145      }
     145    }
    146146    if (!(val & 0x00000001UL)) {
    147          cnt++;
     147        cnt++;
    148148        val >>= 1;
    149      }
     149    }
    150150    if (!(val & 0x00000001UL)) {
    151          cnt++;
    152      }
    153 
    154      return cnt;
    155 #endif
    156  }
    157 
    158 static always_inline int cto32 (uint32_t val)
     151        cnt++;
     152    }
     153
     154    return cnt;
     155#endif
     156}
     157
     158static inline int cto32(uint32_t val)
    159159{
    160160    return ctz32(~val);
    161161}
    162162
    163 static always_inline int ctz64 (uint64_t val)
    164 {
    165 #if QEMU_GNUC_PREREQ(3, 4)
    166     if (val)
    167         return __builtin_ctz(val);
     163static inline int ctz64(uint64_t val)
     164{
     165#if QEMU_GNUC_PREREQ(3, 4)
     166    if (val)
     167        return __builtin_ctzll(val);
    168168    else
    169169        return 64;
     
    181181}
    182182
    183 static always_inline int cto64 (uint64_t val)
     183static inline int cto64(uint64_t val)
    184184{
    185185    return ctz64(~val);
    186186}
    187187
    188 static always_inline int ctpop8 (uint8_t val)
     188static inline int ctpop8(uint8_t val)
    189189{
    190190    val = (val & 0x55) + ((val >> 1) & 0x55);
     
    195195}
    196196
    197 static always_inline int ctpop16 (uint16_t val)
     197static inline int ctpop16(uint16_t val)
    198198{
    199199    val = (val & 0x5555) + ((val >> 1) & 0x5555);
     
    205205}
    206206
    207 static always_inline int ctpop32 (uint32_t val)
     207static inline int ctpop32(uint32_t val)
    208208{
    209209#if QEMU_GNUC_PREREQ(3, 4)
     
    220220}
    221221
    222 static always_inline int ctpop64 (uint64_t val)
     222static inline int ctpop64(uint64_t val)
    223223{
    224224#if QEMU_GNUC_PREREQ(3, 4)
  • trunk/src/recompiler/hostregs_helper.h

    r36175 r37675  
    11/*
    2  *  Save/restore host registrs.
     2 *  Save/restore host registers.
    33 *
    44 *  Copyright (c) 2007 CodeSourcery
  • trunk/src/recompiler/ioport.h

    r36175 r37675  
    4444
    4545
    46 /* NOTE: as these functions may be even used when there is an isa
    47    brige on non x86 targets, we always defined them */
    48 #if !defined(NO_CPU_IO_DEFS) && defined(NEED_CPU_H)
     46#ifndef VBOX
     47void cpu_outb(pio_addr_t addr, uint8_t val);
     48void cpu_outw(pio_addr_t addr, uint16_t val);
     49void cpu_outl(pio_addr_t addr, uint32_t val);
     50uint8_t cpu_inb(pio_addr_t addr);
     51uint16_t cpu_inw(pio_addr_t addr);
     52uint32_t cpu_inl(pio_addr_t addr);
     53#else
    4954void cpu_outb(CPUState *env, pio_addr_t addr, uint8_t val);
    5055void cpu_outw(CPUState *env, pio_addr_t addr, uint16_t val);
  • trunk/src/recompiler/osdep.h

    r36172 r37675  
    2020#define qemu_vprintf(pszFormat, args) \
    2121                                RTLogPrintfV((pszFormat), (args))
    22 #define qemu_printf             RTLogPrintf
     22
     23/**@todo the following macros belongs elsewhere */
    2324#define qemu_malloc(cb)         RTMemAlloc(cb)
    2425#define qemu_mallocz(cb)        RTMemAllocZ(cb)
    2526#define qemu_realloc(ptr, cb)   RTMemRealloc(ptr, cb)
    26 
    2727#define qemu_free(pv)           RTMemFree(pv)
    2828#define qemu_strdup(psz)        RTStrDup(psz)
    2929
    30 #define qemu_vmalloc(cb)        RTMemPageAlloc(cb)
    31 #define qemu_vfree(pv)          RTMemPageFree(pv, missing_size_parameter)
    32 
    33 #ifndef NULL
    34 # define NULL 0
    35 #endif
    36 
     30/* Misc wrappers */
    3731#define fflush(file)            RTLogFlush(NULL)
    3832#define printf(...)             LogIt(LOG_INSTANCE, 0, LOG_GROUP_REM_PRINTF, (__VA_ARGS__))
     
    4236#endif
    4337
    44 #define assert(cond) Assert(cond)
     38#define assert(cond)            Assert(cond)
    4539
    4640#else /* !VBOX */
    4741
    4842#include <stdarg.h>
     43#include <stddef.h>
    4944
    50 #define VBOX_ONLY(x)
    51 
     45#define VBOX_ONLY(x)             /* nike */
    5246#define qemu_snprintf snprintf   /* bird */
    5347#define qemu_vsnprintf vsnprintf /* bird */
    5448#define qemu_vprintf vprintf     /* bird */
    55 
    56 #define qemu_printf printf
    57 
    58 void *qemu_malloc(size_t size);
    59 void *qemu_mallocz(size_t size);
    60 void qemu_free(void *ptr);
    61 char *qemu_strdup(const char *str);
    62 
    63 void *qemu_vmalloc(size_t size);
    64 void qemu_vfree(void *ptr);
    65 
    66 void *get_mmap_addr(unsigned long size);
    6749
    6850#endif /* !VBOX */
     
    9577#endif
    9678
    97 #ifndef offsetof
     79#ifdef CONFIG_NEED_OFFSETOF
    9880#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *) 0)->MEMBER)
    9981#endif
     
    10385        (type *) ((char *) __mptr - offsetof(type, member));})
    10486#endif
     87
     88/* Convert from a base type to a parent type, with compile time checking.  */
     89#ifdef __GNUC__
     90#define DO_UPCAST(type, field, dev) ( __extension__ ( { \
     91    char __attribute__((unused)) offset_must_be_zero[ \
     92        -offsetof(type, field)]; \
     93    container_of(dev, type, field);}))
     94#else
     95#define DO_UPCAST(type, field, dev) container_of(dev, type, field)
     96#endif
     97
     98#define typeof_field(type, field) typeof(((type *)0)->field)
     99#define type_check(t1,t2) ((t1*)0 - (t2*)0)
    105100
    106101#ifndef MIN
     
    116111
    117112#ifndef always_inline
    118 #if (__GNUC__ < 3) || defined(__APPLE__)
    119 #define always_inline inline
    120 #else
    121 #define always_inline __attribute__ (( always_inline )) __inline__
     113#if !((__GNUC__ < 3) || defined(__APPLE__))
    122114#ifdef __OPTIMIZE__
    123 #define inline always_inline
     115#define inline __attribute__ (( always_inline )) __inline__
    124116#endif
    125117#endif
     
    136128#ifndef VBOX
    137129#define qemu_printf printf
    138 #endif
     130#else  /*VBOX*/
     131#define qemu_printf RTLogPrintf
     132#endif /*VBOX*/
    139133
    140134#if defined (__GNUC__) && defined (__GNUC_MINOR__)
     
    165159#endif /* !_WIN32 */
    166160#else  /* VBOX */
    167 # define qemu_memalign(alignment, size)  ( (alignment) <= PAGE_SIZE ? RTMemPageAlloc((size)) : NULL )
     161# define qemu_memalign(alignment, size) ( (alignment) <= PAGE_SIZE ? RTMemPageAlloc((size)) : NULL )
     162# define qemu_vfree(pv)                 RTMemPageFree(pv, missing_size_parameter)
     163# define qemu_vmalloc(cb)               RTMemPageAlloc(cb)
    168164#endif /* VBOX */
    169165
  • trunk/src/recompiler/qemu-common.h

    r36175 r37675  
    33#define QEMU_COMMON_H
    44
     5#include "config-host.h"
     6
    57#ifdef VBOX
    68
    7 # include <string.h>
    8 # include <inttypes.h>
     9# include <iprt/string.h>
     10# include <iprt/types.h>
    911# include <iprt/ctype.h>
    10 
    11 #define QEMU_NORETURN __attribute__ ((__noreturn__))
    1212
    1313void pstrcpy(char *buf, int buf_size, const char *str);
    1414char *pstrcat(char *buf, int buf_size, const char *s);
    15 # define snprintf RTStrPrintf
    16 
    17 #define qemu_isalnum(c)         RT_C_IS_ALNUM((unsigned char)(c))
    18 #define qemu_isalpha(c)         RT_C_IS_ALPHA((unsigned char)(c))
    19 #define qemu_iscntrl(c)         RT_C_IS_CNTRL((unsigned char)(c))
    20 #define qemu_isdigit(c)         RT_C_IS_DIGIT((unsigned char)(c))
    21 #define qemu_isgraph(c)         RT_C_IS_GRAPH((unsigned char)(c))
    22 #define qemu_islower(c)         RT_C_IS_LOWER((unsigned char)(c))
    23 #define qemu_isprint(c)         RT_C_IS_PRINT((unsigned char)(c))
    24 #define qemu_ispunct(c)         RT_C_IS_PUNCT((unsigned char)(c))
    25 #define qemu_isspace(c)         RT_C_IS_SPACE((unsigned char)(c))
    26 #define qemu_isupper(c)         RT_C_IS_UPPER((unsigned char)(c))
    27 #define qemu_isxdigit(c)        RT_C_IS_XDIGIT((unsigned char)(c))
    28 #define qemu_tolower(c)         RT_C_TO_LOWER((unsigned char)(c))
    29 #define qemu_toupper(c)         RT_C_TO_UPPER((unsigned char)(c))
    30 #define qemu_isascii(c)         RT_C_IS_ASCII((unsigned char)(c))
    31 #define qemu_toascii(c)         RT_C_TO_ASCII((unsigned char)(c))
    32 
    33 #define qemu_init_vcpu(env)     do { } while (0) /* we don't need this :-) */
    34 
     15# define snprintf               RTStrPrintf
     16
     17# define qemu_isalnum(c)        RT_C_IS_ALNUM((unsigned char)(c))
     18# define qemu_isalpha(c)        RT_C_IS_ALPHA((unsigned char)(c))
     19# define qemu_iscntrl(c)        RT_C_IS_CNTRL((unsigned char)(c))
     20# define qemu_isdigit(c)        RT_C_IS_DIGIT((unsigned char)(c))
     21# define qemu_isgraph(c)        RT_C_IS_GRAPH((unsigned char)(c))
     22# define qemu_islower(c)        RT_C_IS_LOWER((unsigned char)(c))
     23# define qemu_isprint(c)        RT_C_IS_PRINT((unsigned char)(c))
     24# define qemu_ispunct(c)        RT_C_IS_PUNCT((unsigned char)(c))
     25# define qemu_isspace(c)        RT_C_IS_SPACE((unsigned char)(c))
     26# define qemu_isupper(c)        RT_C_IS_UPPER((unsigned char)(c))
     27# define qemu_isxdigit(c)       RT_C_IS_XDIGIT((unsigned char)(c))
     28# define qemu_tolower(c)        RT_C_TO_LOWER((unsigned char)(c))
     29# define qemu_toupper(c)        RT_C_TO_UPPER((unsigned char)(c))
     30# define qemu_isascii(c)        RT_C_IS_ASCII((unsigned char)(c))
     31# define qemu_toascii(c)        RT_C_TO_ASCII((unsigned char)(c))
     32
     33# define qemu_init_vcpu(env)    do { } while (0) /* we don't need this :-) */
     34
     35# define QEMU_NORETURN              __attribute__((__noreturn__))
     36# ifdef CONFIG_GCC_ATTRIBUTE_WARN_UNUSED_RESULT
     37#  define QEMU_WARN_UNUSED_RESULT   __attribute__((warn_unused_result))
     38# else
     39#  define QEMU_WARN_UNUSED_RESULT
     40# endif
    3541
    3642#else /* !VBOX */
     
    6369#include <sys/stat.h>
    6470#include <assert.h>
    65 #include "config-host.h"
    6671
    6772#ifndef O_LARGEFILE
     
    7176#define O_BINARY 0
    7277#endif
    73 
     78#ifndef MAP_ANONYMOUS
     79#define MAP_ANONYMOUS MAP_ANON
     80#endif
    7481#ifndef ENOMEDIUM
    7582#define ENOMEDIUM ENODEV
    7683#endif
    77 
    78 #ifndef HAVE_IOVEC
    79 #define HAVE_IOVEC
     84#if !defined(ENOTSUP)
     85#define ENOTSUP 4096
     86#endif
     87
     88#ifndef CONFIG_IOVEC
     89#define CONFIG_IOVEC
    8090struct iovec {
    8191    void *iov_base;
    8292    size_t iov_len;
    8393};
     94/*
     95 * Use the same value as Linux for now.
     96 */
     97#define IOV_MAX         1024
    8498#else
    8599#include <sys/uio.h>
     
    89103#define fsync _commit
    90104#define lseek _lseeki64
    91 #define ENOTSUP 4096
    92105extern int qemu_ftruncate64(int, int64_t);
    93106#define ftruncate qemu_ftruncate64
    94 
    95107
    96108static inline char *realpath(const char *path, char *resolved_path)
     
    123135
    124136typedef void QEMUBHFunc(void *opaque);
     137
     138void async_context_push(void);
     139void async_context_pop(void);
     140int get_async_context_id(void);
    125141
    126142QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
     
    136152void qemu_bh_delete(QEMUBH *bh);
    137153int qemu_bh_poll(void);
     154void qemu_bh_update_timeout(int *timeout);
    138155
    139156uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c);
     
    150167time_t mktimegm(struct tm *tm);
    151168int qemu_fls(int i);
     169int qemu_fdatasync(int fd);
     170
     171/* path.c */
     172void init_paths(const char *prefix);
     173const char *path(const char *pathname);
    152174
    153175#define qemu_isalnum(c)         isalnum((unsigned char)(c))
     
    177199
    178200
     201void qemu_mutex_lock_iothread(void);
     202void qemu_mutex_unlock_iothread(void);
     203
     204int qemu_open(const char *name, int flags, ...);
     205void qemu_set_cloexec(int fd);
     206
     207#ifndef _WIN32
     208int qemu_pipe(int pipefd[2]);
     209#endif
     210
    179211/* Error handling.  */
    180212
     
    208240typedef TextConsole QEMUConsole;
    209241typedef struct CharDriverState CharDriverState;
     242typedef struct MACAddr MACAddr;
    210243typedef struct VLANState VLANState;
     244typedef struct VLANClientState VLANClientState;
    211245typedef struct QEMUFile QEMUFile;
    212246typedef struct i2c_bus i2c_bus;
     
    214248typedef struct SMBusDevice SMBusDevice;
    215249typedef struct QEMUTimer QEMUTimer;
     250typedef struct PCIHostState PCIHostState;
     251typedef struct PCIExpressHost PCIExpressHost;
    216252typedef struct PCIBus PCIBus;
    217253typedef struct PCIDevice PCIDevice;
     
    255291void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
    256292void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len);
     293void qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t size);
    257294void qemu_iovec_destroy(QEMUIOVector *qiov);
    258295void qemu_iovec_reset(QEMUIOVector *qiov);
     
    263300typedef struct Monitor Monitor;
    264301
     302/* Convert a byte between binary and BCD.  */
     303static inline uint8_t to_bcd(uint8_t val)
     304{
     305    return ((val / 10) << 4) | (val % 10);
     306}
     307
     308static inline uint8_t from_bcd(uint8_t val)
     309{
     310    return ((val >> 4) * 10) + (val & 0x0f);
     311}
     312
    265313#include "module.h"
    266314
  • trunk/src/recompiler/qemu-lock.h

    r36175 r37675  
    3333   than physical CPUs (the extreme case being a single CPU host) a spinlock
    3434   simply wastes CPU until the OS decides to preempt it.  */
    35 #if defined(USE_NPTL)
     35#if defined(CONFIG_USE_NPTL)
    3636
    3737#include <pthread.h>
  • trunk/src/recompiler/qemu-log.h

    r36170 r37675  
    1616/* Returns true if qemu_log() will really write somewhere
    1717 */
     18#ifndef VBOX
    1819#define qemu_log_enabled() (logfile != NULL)
     20#else
     21# define qemu_log_enabled() LogIsEnabled()
     22#endif
    1923
    2024/* Returns true if a bit is set in the current loglevel mask
     
    2327
    2428
     29
    2530/* Logging functions: */
    2631
    2732/* main logging function
    2833 */
     34#ifndef VBOX
    2935#define qemu_log(...) do {                 \
    3036        if (logfile)                       \
    3137            fprintf(logfile, ## __VA_ARGS__); \
    3238    } while (0)
     39#else
     40# define qemu_log(...) Log((__VA_ARGS__))
     41#endif
    3342
    3443/* vfprintf-like logging function
    3544 */
     45#ifndef VBOX
    3646#define qemu_log_vprintf(fmt, va) do {     \
    3747        if (logfile)                       \
    3848            vfprintf(logfile, fmt, va);    \
    3949    } while (0)
     50#else
     51# define qemu_log_vprintf(fmt, va) do { \
     52        if (LogIsEnabled()) \
     53            RTLogLoggerExV(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_1, LOG_GROUP, fmt, va); \
     54    } while (0)
     55#endif
    4056
    4157/* log only if a bit is set on the current loglevel mask
    4258 */
     59#ifndef VBOX
    4360#define qemu_log_mask(b, ...) do {         \
    4461        if (loglevel & (b))                \
    4562            fprintf(logfile, ## __VA_ARGS__); \
    4663    } while (0)
     64#else
     65# define qemu_log_mask(b, ...) do { \
     66        if (loglevel & (b)) \
     67            Log((__VA_ARGS__)); \
     68    } while (0)
     69#endif
    4770
    4871
     
    5275
    5376/* cpu_dump_state() logging functions: */
     77#ifndef VBOX
    5478#define log_cpu_state(env, f) cpu_dump_state((env), logfile, fprintf, (f));
     79#else
     80#define log_cpu_state(env, f) cpu_dump_state((env), NULL, NULL, (f));
     81#endif
    5582#define log_cpu_state_mask(b, env, f) do {           \
    5683      if (loglevel & (b)) log_cpu_state((env), (f)); \
     
    7198
    7299/* fflush() the log file */
     100#ifndef VBOX
    73101#define qemu_log_flush() fflush(logfile)
     102#else
     103# define qemu_log_flush()           RTLogFlush(LOG_INSTANCE)
     104#endif
    74105
    75106/* Close the log file */
     107#ifndef VBOX
    76108#define qemu_log_close() do { \
    77109        fclose(logfile);      \
    78110        logfile = NULL;       \
    79111    } while (0)
     112#else
     113# define qemu_log_close()           do { } while (0)
     114#endif
    80115
    81116/* Set up a new log file */
     117#ifndef VBOX
    82118#define qemu_log_set_file(f) do { \
    83119        logfile = (f);            \
    84120    } while (0)
     121#else
     122# define qemu_log_set_file(f)       do { } while (0)
     123#endif
    85124
    86125/* Set up a new log file, only if none is set */
     126#ifndef VBOX
    87127#define qemu_log_try_set_file(f) do { \
    88128        if (!logfile)                 \
    89129            logfile = (f);            \
    90130    } while (0)
     131#else
     132#define qemu_log_try_set_file(f)    do { } while (0)
     133#endif
    91134
    92135
  • trunk/src/recompiler/softmmu_template.h

    r36175 r37675  
    9898#endif
    9999#endif /* SHIFT > 2 */
    100 #ifdef CONFIG_KQEMU
    101     env->last_io_time = cpu_get_time_fast();
    102 #endif
    103100    return res;
    104101}
     
    252249#endif
    253250#endif /* SHIFT > 2 */
    254 #ifdef CONFIG_KQEMU
    255     env->last_io_time = cpu_get_time_fast();
    256 #endif
    257251}
    258252
  • trunk/src/recompiler/target-i386/cpu.h

    r36299 r37675  
    347347#define MSR_GSBASE                      0xc0000101
    348348#define MSR_KERNELGSBASE                0xc0000102
     349#define MSR_TSC_AUX                     0xc0000103
    349350
    350351#define MSR_VM_HSAVE_PA                 0xc0010117
     
    557558} MMXReg;
    558559
    559 #ifdef WORDS_BIGENDIAN
     560#ifdef HOST_WORDS_BIGENDIAN
    560561#define XMM_B(n) _b[15 - (n)]
    561562#define XMM_W(n) _w[7 - (n)]
     
    584585#define MMX_Q(n) q
    585586
     587typedef union {
     588#ifdef USE_X86LDOUBLE
     589    CPU86_LDouble d __attribute__((aligned(16)));
     590#else
     591    CPU86_LDouble d;
     592#endif
     593    MMXReg mmx;
     594} FPReg;
     595
     596typedef struct {
     597    uint64_t base;
     598    uint64_t mask;
     599} MTRRVar;
     600
     601#define CPU_NB_REGS64 16
     602#define CPU_NB_REGS32 8
     603
    586604#ifdef TARGET_X86_64
    587 #define CPU_NB_REGS 16
     605#define CPU_NB_REGS CPU_NB_REGS64
    588606#else
    589 #define CPU_NB_REGS 8
     607#define CPU_NB_REGS CPU_NB_REGS32
    590608#endif
    591609
     
    617635
    618636    target_ulong cr[5]; /* NOTE: cr1 is unused */
    619     uint64_t a20_mask;
     637    int32_t a20_mask;
    620638
    621639    /* FPU state */
    622640    unsigned int fpstt; /* top of stack index */
    623     unsigned int fpus;
    624     unsigned int fpuc;
     641    uint16_t fpus;
     642    uint16_t fpuc;
    625643    uint8_t fptags[8];   /* 0 = valid, 1 = empty */
    626     union {
    627 #ifdef USE_X86LDOUBLE
    628         CPU86_LDouble d __attribute__((aligned(16)));
    629 #else
    630         CPU86_LDouble d;
    631 #endif
    632         MMXReg mmx;
    633     } fpregs[8];
     644    FPReg fpregs[8];
    634645
    635646    /* emulator internal variables */
     
    678689    target_ulong kernelgsbase;
    679690#endif
     691    uint64_t system_time_msr;
     692    uint64_t wall_clock_msr;
    680693
    681694    uint64_t tsc;
     
    733746    uint64_t mtrr_fixed[11];
    734747    uint64_t mtrr_deftype;
    735     struct {
    736         uint64_t base;
    737         uint64_t mask;
    738     } mtrr_var[8];
    739 
    740 #ifdef CONFIG_KQEMU
    741     int kqemu_enabled;
    742     int last_io_time;
    743 #endif
     748    MTRRVar mtrr_var[8];
    744749
    745750    /* For KVM */
    746     uint64_t interrupt_bitmap[256 / 64];
    747751    uint32_t mp_state;
     752    int32_t exception_injected;
     753    int32_t interrupt_injected;
     754    uint8_t soft_interrupt;
     755    uint8_t nmi_injected;
     756    uint8_t nmi_pending;
     757    uint8_t has_error_code;
     758    uint32_t sipi_vector;
    748759
    749760    /* in order to simplify APIC support, we leave this pointer to the
     
    754765    uint64 mcg_status;
    755766    uint64 mcg_ctl;
    756     uint64 *mce_banks;
     767    uint64 mce_banks[MCE_BANKS_DEF*4];
     768
     769    uint64_t tsc_aux;
     770
     771    /* vmstate */
     772    uint16_t fpus_vmstate;
     773    uint16_t fptag_vmstate;
     774    uint16_t fpregs_format_vmstate;
    757775#else  /* VBOX */
    758776
     
    956974}
    957975
     976int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
     977                            target_ulong *base, unsigned int *limit,
     978                            unsigned int *flags);
     979
    958980/* wrapper, just in case memory mappings must be changed */
    959981static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
     
    9871009int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
    9881010                             int is_write, int mmu_idx, int is_softmmu);
     1011#define cpu_handle_mmu_fault cpu_x86_handle_mmu_fault
    9891012void cpu_x86_set_a20(CPUX86State *env, int a20_state);
    9901013void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
     
    9991022static inline int hw_breakpoint_type(unsigned long dr7, int index)
    10001023{
    1001     return (dr7 >> (DR7_TYPE_SHIFT + (index * 2))) & 3;
     1024    return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
    10021025}
    10031026
    10041027static inline int hw_breakpoint_len(unsigned long dr7, int index)
    10051028{
    1006     int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 2))) & 3);
     1029    int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
    10071030    return (len == 2) ? 8 : len + 1;
    10081031}
     
    10321055#define X86_DUMP_FPU  0x0001 /* dump FPU state too */
    10331056#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
    1034 
    1035 #ifdef CONFIG_KQEMU
    1036 static inline int cpu_get_time_fast(void)
    1037 {
    1038     int low, high;
    1039     asm volatile("rdtsc" : "=a" (low), "=d" (high));
    1040     return low;
    1041 }
    1042 #endif
    10431057
    10441058#ifdef VBOX
     
    10701084#define cpu_list x86_cpu_list
    10711085
    1072 #define CPU_SAVE_VERSION 10
     1086#define CPU_SAVE_VERSION 11
    10731087
    10741088/* MMU modes definitions */
  • trunk/src/recompiler/target-i386/exec.h

    r36175 r37675  
    4444#include "qemu-log.h"
    4545
     46#undef EAX
    4647#define EAX (env->regs[R_EAX])
     48#undef ECX
    4749#define ECX (env->regs[R_ECX])
     50#undef EDX
    4851#define EDX (env->regs[R_EDX])
     52#undef EBX
    4953#define EBX (env->regs[R_EBX])
     54#undef ESP
    5055#define ESP (env->regs[R_ESP])
     56#undef EBP
    5157#define EBP (env->regs[R_EBP])
     58#undef ESI
    5259#define ESI (env->regs[R_ESI])
     60#undef EDI
    5361#define EDI (env->regs[R_EDI])
     62#undef EIP
    5463#define EIP (env->eip)
    5564#define DF  (env->df)
     
    116125#define floatx_compare floatx80_compare
    117126#define floatx_compare_quiet floatx80_compare_quiet
    118 #ifdef VBOX
    119 #undef sin
    120 #undef cos
    121 #undef sqrt
    122 #undef pow
    123 #undef log
    124 #undef tan
    125 #undef atan2
    126 #undef floor
    127 #undef ceil
    128 #undef ldexp
    129 #endif /* !VBOX */
    130 #if !defined(VBOX) || !defined(_MSC_VER)
    131 #define sin sinl
    132 #define cos cosl
    133 #define sqrt sqrtl
    134 #define pow powl
    135 #define log logl
    136 #define tan tanl
    137 #define atan2 atan2l
    138 #define floor floorl
    139 #define ceil ceill
    140 #define ldexp ldexpl
    141 #endif
    142127#else
    143128#define floatx_to_int32 float64_to_int32
     
    158143#endif
    159144
    160 #ifdef VBOX
    161 extern CPU86_LDouble sin(CPU86_LDouble x);
    162 extern CPU86_LDouble cos(CPU86_LDouble x);
    163 extern CPU86_LDouble sqrt(CPU86_LDouble x);
    164 extern CPU86_LDouble pow(CPU86_LDouble, CPU86_LDouble);
    165 extern CPU86_LDouble log(CPU86_LDouble x);
    166 extern CPU86_LDouble tan(CPU86_LDouble x);
    167 extern CPU86_LDouble atan2(CPU86_LDouble, CPU86_LDouble);
    168 extern CPU86_LDouble floor(CPU86_LDouble x);
    169 extern CPU86_LDouble ceil(CPU86_LDouble x);
    170 #endif /* VBOX */
    171 
    172145#define RC_MASK         0xc00
    173146#define RC_NEAR         0x000
     
    202175typedef union {
    203176    double d;
    204 #if !defined(WORDS_BIGENDIAN) && !defined(__arm__)
     177#if !defined(HOST_WORDS_BIGENDIAN) && !defined(__arm__)
    205178    struct {
    206179        uint32_t lower;
  • trunk/src/recompiler/target-i386/helper.c

    r36175 r37675  
    135135    {
    136136        .name = "qemu64",
    137         .level = 2,
     137        .level = 4,
    138138        .vendor1 = CPUID_VENDOR_AMD_1,
    139139        .vendor2 = CPUID_VENDOR_AMD_2,
     
    147147        /* this feature is needed for Solaris and isn't fully implemented */
    148148            CPUID_PSE36,
    149         .ext_features = CPUID_EXT_SSE3,
     149        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
    150150        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
    151151            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
    152         .ext3_features = CPUID_EXT3_SVM,
     152        .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
     153            CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
    153154        .xlevel = 0x8000000A,
    154155        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
     
    167168            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
    168169            CPUID_PSE36,
    169         /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
    170         .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
     170        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
     171            CPUID_EXT_POPCNT,
    171172        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
    172173        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
     
    174175            CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
    175176            CPUID_EXT2_FFXSR,
    176         /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
    177                     CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
     177        /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
     178                    CPUID_EXT3_CR8LEG,
    178179                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
    179180                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
    180         .ext3_features = CPUID_EXT3_SVM,
     181        .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
     182            CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
    181183        .xlevel = 0x8000001A,
    182184        .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
     
    199201        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
    200202        .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
    201         /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
     203        .ext3_features = CPUID_EXT3_LAHF_LM,
    202204        .xlevel = 0x80000008,
    203205        .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
    204206    },
     207    {
     208        .name = "kvm64",
     209        .level = 5,
     210        .vendor1 = CPUID_VENDOR_INTEL_1,
     211        .vendor2 = CPUID_VENDOR_INTEL_2,
     212        .vendor3 = CPUID_VENDOR_INTEL_3,
     213        .family = 15,
     214        .model = 6,
     215        .stepping = 1,
     216        /* Missing: CPUID_VME, CPUID_HT */
     217        .features = PPRO_FEATURES |
     218            CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
     219            CPUID_PSE36,
     220        /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
     221        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
     222        /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
     223        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
     224            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
     225        /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
     226                    CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
     227                    CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
     228                    CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
     229        .ext3_features = 0,
     230        .xlevel = 0x80000008,
     231        .model_id = "Common KVM processor"
     232    },
    205233#endif
    206234    {
    207235        .name = "qemu32",
    208         .level = 2,
     236        .level = 4,
    209237        .family = 6,
    210238        .model = 3,
    211239        .stepping = 3,
    212240        .features = PPRO_FEATURES,
    213         .ext_features = CPUID_EXT_SSE3,
     241        .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
    214242        .xlevel = 0,
    215243        .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
     
    365393    uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
    366394    uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
    367     int family = -1, model = -1, stepping = -1;
     395    uint32_t numvalue;
    368396
    369397    def = NULL;
     
    397425            if (!strcmp(featurestr, "family")) {
    398426                char *err;
    399                 family = strtol(val, &err, 10);
    400                 if (!*val || *err || family < 0) {
     427                numvalue = strtoul(val, &err, 0);
     428                if (!*val || *err) {
    401429                    fprintf(stderr, "bad numerical value %s\n", val);
    402430                    goto error;
    403431                }
    404                 x86_cpu_def->family = family;
     432                x86_cpu_def->family = numvalue;
    405433            } else if (!strcmp(featurestr, "model")) {
    406434                char *err;
    407                 model = strtol(val, &err, 10);
    408                 if (!*val || *err || model < 0 || model > 0xff) {
     435                numvalue = strtoul(val, &err, 0);
     436                if (!*val || *err || numvalue > 0xff) {
    409437                    fprintf(stderr, "bad numerical value %s\n", val);
    410438                    goto error;
    411439                }
    412                 x86_cpu_def->model = model;
     440                x86_cpu_def->model = numvalue;
    413441            } else if (!strcmp(featurestr, "stepping")) {
    414442                char *err;
    415                 stepping = strtol(val, &err, 10);
    416                 if (!*val || *err || stepping < 0 || stepping > 0xf) {
     443                numvalue = strtoul(val, &err, 0);
     444                if (!*val || *err || numvalue > 0xf) {
    417445                    fprintf(stderr, "bad numerical value %s\n", val);
    418446                    goto error;
    419447                }
    420                 x86_cpu_def->stepping = stepping;
     448                x86_cpu_def->stepping = numvalue ;
     449            } else if (!strcmp(featurestr, "level")) {
     450                char *err;
     451                numvalue = strtoul(val, &err, 0);
     452                if (!*val || *err) {
     453                    fprintf(stderr, "bad numerical value %s\n", val);
     454                    goto error;
     455                }
     456                x86_cpu_def->level = numvalue;
     457            } else if (!strcmp(featurestr, "xlevel")) {
     458                char *err;
     459                numvalue = strtoul(val, &err, 0);
     460                if (!*val || *err) {
     461                    fprintf(stderr, "bad numerical value %s\n", val);
     462                    goto error;
     463                }
     464                if (numvalue < 0x80000000) {
     465                        numvalue += 0x80000000;
     466                }
     467                x86_cpu_def->xlevel = numvalue;
    421468            } else if (!strcmp(featurestr, "vendor")) {
    422469                if (strlen(val) != 12) {
     
    594641    cpu_breakpoint_remove_all(env, BP_CPU);
    595642    cpu_watchpoint_remove_all(env, BP_CPU);
     643
     644#ifndef VBOX
     645    env->mcg_status = 0;
     646#endif
    596647}
    597648
     
    666717                       const char *name, struct SegmentCache *sc)
    667718{
     719#ifdef VBOX
     720# define cpu_fprintf(f, ...)    RTLogPrintf(__VA_ARGS__)
     721#endif
    668722#ifdef TARGET_X86_64
    669723    if (env->hflags & HF_CS64_MASK) {
     
    714768done:
    715769    cpu_fprintf(f, "\n");
     770#ifdef VBOX
     771# undef cpu_fprintf
     772#endif
    716773}
    717774
     
    724781    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
    725782
    726     if (kvm_enabled())
    727         kvm_arch_get_registers(env);
     783#ifdef VBOX
     784# define cpu_fprintf(f, ...)    RTLogPrintf(__VA_ARGS__)
     785#endif
     786    cpu_synchronize_state(env);
    728787
    729788    eflags = env->eflags;
     
    762821                    env->hflags & HF_CPL_MASK,
    763822                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
    764                     (int)(env->a20_mask >> 20) & 1,
     823                    (env->a20_mask >> 20) & 1,
    765824                    (env->hflags >> HF_SMM_SHIFT) & 1,
    766825                    env->halted);
     
    789848                    env->hflags & HF_CPL_MASK,
    790849                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
    791                     (int)(env->a20_mask >> 20) & 1,
     850                    (env->a20_mask >> 20) & 1,
    792851                    (env->hflags >> HF_SMM_SHIFT) & 1,
    793852                    env->halted);
     
    900959        }
    901960    }
     961#ifdef VBOX
     962# undef cpu_fprintf
     963#endif
    902964}
    903965
     
    920982           we must flush everything */
    921983        tlb_flush(env, 1);
    922         env->a20_mask = (~0x100000) | (a20_state << 20);
     984        env->a20_mask = ~(1 << 20) | (a20_state << 20);
    923985    }
    924986}
     
    10281090/* XXX: This value should match the one returned by CPUID
    10291091 * and in exec.c */
    1030 #if defined(CONFIG_KQEMU)
    1031 #define PHYS_ADDR_MASK 0xfffff000LL
    1032 #else
    10331092# if defined(TARGET_X86_64)
    10341093# define PHYS_ADDR_MASK 0xfffffff000LL
     
    10361095# define PHYS_ADDR_MASK 0xffffff000LL
    10371096# endif
    1038 #endif
    10391097
    10401098/* return value:
     
    15141572        }
    15151573    } else {
    1516         TAILQ_FOREACH(bp, &env->breakpoints, entry)
     1574        QTAILQ_FOREACH(bp, &env->breakpoints, entry)
    15171575            if (bp->pc == env->eip) {
    15181576                if (bp->flags & BP_CPU) {
     
    15951653        cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
    15961654        cenv->mcg_ctl = ~(uint64_t)0;
    1597         bank_num = cenv->mcg_cap & 0xff;
    1598         cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4);
     1655        bank_num = MCE_BANKS_DEF;
    15991656        for (bank = 0; bank < bank_num; bank++)
    16001657            cenv->mce_banks[bank*4] = ~(uint64_t)0;
     
    16371694}
    16381695
     1696static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
     1697                             uint32_t *ecx, uint32_t *edx)
     1698{
     1699    *ebx = env->cpuid_vendor1;
     1700    *edx = env->cpuid_vendor2;
     1701    *ecx = env->cpuid_vendor3;
     1702
     1703    /* sysenter isn't supported on compatibility mode on AMD, syscall
     1704     * isn't supported in compatibility mode on Intel.
     1705     * Normally we advertise the actual cpu vendor, but you can override
     1706     * this if you want to use KVM's sysenter/syscall emulation
     1707     * in compatibility mode and when doing cross vendor migration
     1708     */
     1709    if (kvm_enabled() && env->cpuid_vendor_override) {
     1710        host_cpuid(0, 0, NULL, ebx, ecx, edx);
     1711    }
     1712}
     1713
    16391714void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
    16401715                   uint32_t *eax, uint32_t *ebx,
     
    16531728    case 0:
    16541729        *eax = env->cpuid_level;
    1655         *ebx = env->cpuid_vendor1;
    1656         *edx = env->cpuid_vendor2;
    1657         *ecx = env->cpuid_vendor3;
    1658 
    1659         /* sysenter isn't supported on compatibility mode on AMD.  and syscall
    1660          * isn't supported in compatibility mode on Intel.  so advertise the
    1661          * actuall cpu, and say goodbye to migration between different vendors
    1662          * is you use compatibility mode. */
    1663         if (kvm_enabled() && !env->cpuid_vendor_override)
    1664             host_cpuid(0, 0, NULL, ebx, ecx, edx);
     1730        get_cpuid_vendor(env, ebx, ecx, edx);
    16651731        break;
    16661732    case 1:
     
    16691735        *ecx = env->cpuid_ext_features;
    16701736        *edx = env->cpuid_features;
     1737        if (env->nr_cores * env->nr_threads > 1) {
     1738            *ebx |= (env->nr_cores * env->nr_threads) << 16;
     1739            *edx |= 1 << 28;    /* HTT bit */
     1740        }
    16711741        break;
    16721742    case 2:
     
    16791749    case 4:
    16801750        /* cache info: needed for Core compatibility */
     1751        if (env->nr_cores > 1) {
     1752                *eax = (env->nr_cores - 1) << 26;
     1753        } else {
     1754                *eax = 0;
     1755        }
    16811756        switch (count) {
    16821757            case 0: /* L1 dcache info */
    1683                 *eax = 0x0000121;
     1758                *eax |= 0x0000121;
    16841759                *ebx = 0x1c0003f;
    16851760                *ecx = 0x000003f;
     
    16871762                break;
    16881763            case 1: /* L1 icache info */
    1689                 *eax = 0x0000122;
     1764                *eax |= 0x0000122;
    16901765                *ebx = 0x1c0003f;
    16911766                *ecx = 0x000003f;
     
    16931768                break;
    16941769            case 2: /* L2 cache info */
    1695                 *eax = 0x0000143;
     1770                *eax |= 0x0000143;
     1771                if (env->nr_threads > 1) {
     1772                    *eax |= (env->nr_threads - 1) << 14;
     1773                }
    16961774                *ebx = 0x3c0003f;
    16971775                *ecx = 0x0000fff;
     
    17461824        *edx = env->cpuid_ext2_features;
    17471825
     1826        /* The Linux kernel checks for the CMPLegacy bit and
     1827         * discards multiple thread information if it is set.
     1828         * So dont set it here for Intel to make Linux guests happy.
     1829         */
     1830        if (env->nr_cores * env->nr_threads > 1) {
     1831            uint32_t tebx, tecx, tedx;
     1832            get_cpuid_vendor(env, &tebx, &tecx, &tedx);
     1833            if (tebx != CPUID_VENDOR_INTEL_1 ||
     1834                tedx != CPUID_VENDOR_INTEL_2 ||
     1835                tecx != CPUID_VENDOR_INTEL_3) {
     1836                *ecx |= 1 << 1;    /* CmpLegacy bit */
     1837            }
     1838        }
     1839
    17481840        if (kvm_enabled()) {
    1749             /* Nested SVM not yet supported in KVM */
     1841            /* Nested SVM not yet supported in upstream QEMU */
    17501842            *ecx &= ~CPUID_EXT3_SVM;
    1751         } else {
    1752             /* AMD 3DNow! is not supported in QEMU */
    1753             *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT);
    17541843        }
    17551844        break;
     
    17811870        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
    17821871            /* 64 bit processor */
    1783 #if defined(CONFIG_KQEMU)
    1784             *eax = 0x00003020;  /* 48 bits virtual, 32 bits physical */
    1785 #else
    17861872/* XXX: The physical address space is limited to 42 bits in exec.c. */
    17871873            *eax = 0x00003028;  /* 48 bits virtual, 40 bits physical */
    1788 #endif
    17891874        } else {
    1790 #if defined(CONFIG_KQEMU)
    1791             *eax = 0x00000020;  /* 32 bits physical */
    1792 #else
    17931875            if (env->cpuid_features & CPUID_PSE36)
    17941876                *eax = 0x00000024; /* 36 bits physical */
    17951877            else
    17961878                *eax = 0x00000020; /* 32 bits physical */
    1797 #endif
    17981879        }
    17991880        *ebx = 0;
    18001881        *ecx = 0;
    18011882        *edx = 0;
     1883        if (env->nr_cores * env->nr_threads > 1) {
     1884            *ecx |= (env->nr_cores * env->nr_threads) - 1;
     1885        }
    18021886        break;
    18031887    case 0x8000000A:
     
    18161900    }
    18171901}
     1902
    18181903
    18191904int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
     
    18811966    mce_init(env);
    18821967#endif
    1883     cpu_reset(env);
    1884 #ifdef CONFIG_KQEMU
    1885     kqemu_init(env);
    1886 #endif
    18871968
    18881969    qemu_init_vcpu(env);
  • trunk/src/recompiler/target-i386/helper.h

    r36175 r37675  
    8181DEF_HELPER_0(cpuid, void)
    8282DEF_HELPER_0(rdtsc, void)
     83DEF_HELPER_0(rdtscp, void)
    8384DEF_HELPER_0(rdpmc, void)
    8485DEF_HELPER_0(rdmsr, void)
     
    209210DEF_HELPER_1(bsf, tl, tl)
    210211DEF_HELPER_1(bsr, tl, tl)
     212DEF_HELPER_2(lzcnt, tl, tl, int)
    211213
    212214/* MMX/SSE */
     
    237239DEF_HELPER_0(cli_vme, void)
    238240DEF_HELPER_0(sti_vme, void)
    239 DEF_HELPER_0(rdtscp, void)
    240241DEF_HELPER_0(check_external_event, void)
    241242DEF_HELPER_0(dump_state, void)
  • trunk/src/recompiler/target-i386/op_helper.c

    r36765 r37675  
    675675
    676676#ifdef VBOX
     677
    677678/* Keep in sync with gen_check_external_event() */
    678679void helper_check_external_event()
     
    695696        sync_seg(env, reg, env->segs[reg].newselector);
    696697}
     698
    697699#endif /* VBOX */
    698700
     
    714716void helper_outb(uint32_t port, uint32_t data)
    715717{
     718#ifndef VBOX
     719    cpu_outb(port, data & 0xff);
     720#else
    716721    cpu_outb(env, port, data & 0xff);
     722#endif
    717723}
    718724
    719725target_ulong helper_inb(uint32_t port)
    720726{
     727#ifndef VBOX
     728    return cpu_inb(port);
     729#else
    721730    return cpu_inb(env, port);
     731#endif
    722732}
    723733
    724734void helper_outw(uint32_t port, uint32_t data)
    725735{
     736#ifndef VBOX
     737    cpu_outw(port, data & 0xffff);
     738#else
    726739    cpu_outw(env, port, data & 0xffff);
     740#endif
    727741}
    728742
    729743target_ulong helper_inw(uint32_t port)
    730744{
     745#ifndef VBOX
     746    return cpu_inw(port);
     747#else
    731748    return cpu_inw(env, port);
     749#endif
    732750}
    733751
    734752void helper_outl(uint32_t port, uint32_t data)
    735753{
     754#ifndef VBOX
     755    cpu_outl(port, data);
     756#else
    736757    cpu_outl(env, port, data);
     758#endif
    737759}
    738760
    739761target_ulong helper_inl(uint32_t port)
    740762{
     763#ifndef VBOX
     764    return cpu_inl(port);
     765#else
    741766    return cpu_inl(env, port);
     767#endif
    742768}
    743769
     
    14061432        cpu_x86_set_cpl(env, 3);
    14071433    }
    1408 #ifdef CONFIG_KQEMU
    1409     if (kqemu_is_ok(env)) {
    1410         if (env->hflags & HF_LMA_MASK)
    1411             CC_OP = CC_OP_EFLAGS;
    1412         env->exception_index = -1;
    1413         cpu_loop_exit();
    1414     }
    1415 #endif
    14161434}
    14171435#endif
    14181436
    14191437#ifdef VBOX
     1438
    14201439/**
    14211440 * Checks and processes external VMM events.
     
    14591478    }
    14601479}
     1480
    14611481/* helper for recording call instruction addresses for later scanning */
    14621482void helper_record_call()
     
    14671487        remR3RecordCall(env);
    14681488}
     1489
    14691490#endif /* VBOX */
    14701491
     
    29452966        EIP = offset;
    29462967    }
    2947 #ifdef CONFIG_KQEMU
    2948     if (kqemu_is_ok(env)) {
    2949         env->exception_index = -1;
    2950         cpu_loop_exit();
    2951     }
    2952 #endif
    29532968}
    29542969
     
    30943109        if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
    30953110        {
    3096 #ifdef DEBUG
     3111# ifdef DEBUG
    30973112            printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
    3098 #endif
     3113# endif
    30993114            new_cs = new_cs & 0xfffc;
    31003115        }
     
    33263341    }
    33273342    env->hflags2 &= ~HF2_NMI_MASK;
    3328 #ifdef CONFIG_KQEMU
    3329     if (kqemu_is_ok(env)) {
    3330         CC_OP = CC_OP_EFLAGS;
    3331         env->exception_index = -1;
    3332         cpu_loop_exit();
    3333     }
    3334 #endif
    33353343}
    33363344
     
    33383346{
    33393347    helper_ret_protected(shift, 0, addend);
    3340 #ifdef CONFIG_KQEMU
    3341     if (kqemu_is_ok(env)) {
    3342         env->exception_index = -1;
    3343         cpu_loop_exit();
    3344     }
    3345 #endif
    33463348}
    33473349
     
    34163418    ESP = ECX;
    34173419    EIP = EDX;
    3418 #ifdef CONFIG_KQEMU
    3419     if (kqemu_is_ok(env)) {
    3420         env->exception_index = -1;
    3421         cpu_loop_exit();
    3422     }
    3423 #endif
    34243420}
    34253421
     
    35363532}
    35373533
    3538 #ifdef VBOX
    35393534void helper_rdtscp(void)
    35403535{
     3536#ifndef VBOX
     3537    helper_rdtsc();
     3538    ECX = (uint32_t)(env->tsc_aux);
     3539#else
    35413540    uint64_t val;
    35423541    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
     
    35513550    else
    35523551        ECX = 0;
    3553 }
    35543552#endif /* VBOX */
     3553}
    35553554
    35563555void helper_rdpmc(void)
     
    37053704            env->mcg_ctl = val;
    37063705        break;
     3706    case MSR_TSC_AUX:
     3707        env->tsc_aux = val;
     3708        break;
    37073709# endif /* !VBOX */
    37083710    default:
     
    37883790        val = env->kernelgsbase;
    37893791        break;
    3790 #endif
    3791 #ifdef CONFIG_KQEMU
    3792     case MSR_QPI_COMMBASE:
    3793         if (env->kqemu_enabled) {
    3794             val = kqemu_comm_base;
    3795         } else {
    3796             val = 0;
    3797         }
    3798         break;
     3792# ifndef VBOX
     3793    case MSR_TSC_AUX:
     3794        val = env->tsc_aux;
     3795        break;
     3796# endif /*!VBOX*/
    37993797#endif
    38003798# ifndef VBOX
     
    50105008    target_ulong addr;
    50115009
     5010    /* The operand must be 16 byte aligned */
     5011    if (ptr & 0xf) {
     5012        raise_exception(EXCP0D_GPF);
     5013    }
     5014
    50125015    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
    50135016    fptag = 0;
     
    50655068    CPU86_LDouble tmp;
    50665069    target_ulong addr;
     5070
     5071    /* The operand must be 16 byte aligned */
     5072    if (ptr & 0xf) {
     5073        raise_exception(EXCP0D_GPF);
     5074    }
    50675075
    50685076    env->fpuc = lduw(ptr);
     
    67326740}
    67336741
    6734 target_ulong helper_bsr(target_ulong t0)
     6742target_ulong helper_lzcnt(target_ulong t0, int wordsize)
    67356743{
    67366744    int count;
    67376745    target_ulong res, mask;
    67386746
     6747    if (wordsize > 0 && t0 == 0) {
     6748        return wordsize;
     6749    }
    67396750    res = t0;
    67406751    count = TARGET_LONG_BITS - 1;
     
    67446755        res <<= 1;
    67456756    }
     6757    if (wordsize > 0) {
     6758        return wordsize - 1 - count;
     6759    }
    67466760    return count;
    67476761}
    67486762
     6763target_ulong helper_bsr(target_ulong t0)
     6764{
     6765        return helper_lzcnt(t0, 0);
     6766}
    67496767
    67506768static int compute_all_eflags(void)
  • trunk/src/recompiler/target-i386/ops_sse.h

    r36175 r37675  
    811811{
    812812    d->XMM_S(0) = approx_rcp(s->XMM_S(0));
     813}
     814
     815static inline uint64_t helper_extrq(uint64_t src, int shift, int len)
     816{
     817    uint64_t mask;
     818
     819    if (len == 0) {
     820        mask = ~0LL;
     821    } else {
     822        mask = (1ULL << len) - 1;
     823    }
     824    return (src >> shift) & mask;
     825}
     826
     827void helper_extrq_r(XMMReg *d, XMMReg *s)
     828{
     829    d->XMM_Q(0) = helper_extrq(d->XMM_Q(0), s->XMM_B(1), s->XMM_B(0));
     830}
     831
     832void helper_extrq_i(XMMReg *d, int index, int length)
     833{
     834    d->XMM_Q(0) = helper_extrq(d->XMM_Q(0), index, length);
     835}
     836
     837static inline uint64_t helper_insertq(uint64_t src, int shift, int len)
     838{
     839    uint64_t mask;
     840
     841    if (len == 0) {
     842        mask = ~0ULL;
     843    } else {
     844        mask = (1ULL << len) - 1;
     845    }
     846    return (src & ~(mask << shift)) | ((src & mask) << shift);
     847}
     848
     849void helper_insertq_r(XMMReg *d, XMMReg *s)
     850{
     851    d->XMM_Q(0) = helper_insertq(s->XMM_Q(0), s->XMM_B(9), s->XMM_B(8));
     852}
     853
     854void helper_insertq_i(XMMReg *d, int index, int length)
     855{
     856    d->XMM_Q(0) = helper_insertq(d->XMM_Q(0), index, length);
    813857}
    814858
     
    906950SSE_HELPER_CMP(cmpord, FPU_CMPORD)
    907951
    908 const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
     952static const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
    909953
    910954void helper_ucomiss(Reg *d, Reg *s)
  • trunk/src/recompiler/target-i386/ops_sse_header.h

    r36175 r37675  
    198198DEF_HELPER_2(rcpps, void, XMMReg, XMMReg)
    199199DEF_HELPER_2(rcpss, void, XMMReg, XMMReg)
     200DEF_HELPER_2(extrq_r, void, XMMReg, XMMReg)
     201DEF_HELPER_3(extrq_i, void, XMMReg, int, int)
     202DEF_HELPER_2(insertq_r, void, XMMReg, XMMReg)
     203DEF_HELPER_3(insertq_i, void, XMMReg, int, int)
    200204DEF_HELPER_2(haddps, void, XMMReg, XMMReg)
    201205DEF_HELPER_2(haddpd, void, XMMReg, XMMReg)
  • trunk/src/recompiler/target-i386/translate.c

    r36266 r37675  
    7575static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp;
    7676static TCGv_i32 cpu_cc_op;
     77static TCGv cpu_regs[CPU_NB_REGS];
    7778/* local temps */
    7879static TCGv cpu_T[2], cpu_T3;
     
    8283static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
    8384static TCGv_i64 cpu_tmp1_i64;
    84 static TCGv cpu_tmp5, cpu_tmp6;
     85static TCGv cpu_tmp5;
    8586
    8687#include "gen-icount.h"
     
    306307#endif /* !TARGET_X86_64 */
    307308
    308 #if defined(WORDS_BIGENDIAN)
     309#if defined(HOST_WORDS_BIGENDIAN)
    309310#define REG_B_OFFSET (sizeof(target_ulong) - 1)
    310311#define REG_H_OFFSET (sizeof(target_ulong) - 2)
     
    322323static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
    323324{
     325    TCGv tmp;
     326
     327    switch(ot) {
     328    case OT_BYTE:
     329        tmp = tcg_temp_new();
     330        tcg_gen_ext8u_tl(tmp, t0);
     331        if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
     332            tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xff);
     333            tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
     334        } else {
     335            tcg_gen_shli_tl(tmp, tmp, 8);
     336            tcg_gen_andi_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], ~0xff00);
     337            tcg_gen_or_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], tmp);
     338        }
     339        tcg_temp_free(tmp);
     340        break;
     341    case OT_WORD:
     342        tmp = tcg_temp_new();
     343        tcg_gen_ext16u_tl(tmp, t0);
     344        tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
     345        tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
     346        tcg_temp_free(tmp);
     347        break;
     348    default: /* XXX this shouldn't be reached;  abort? */
     349    case OT_LONG:
     350        /* For x86_64, this sets the higher half of register to zero.
     351           For i386, this is equivalent to a mov. */
     352        tcg_gen_ext32u_tl(cpu_regs[reg], t0);
     353        break;
     354#ifdef TARGET_X86_64
     355    case OT_QUAD:
     356        tcg_gen_mov_tl(cpu_regs[reg], t0);
     357        break;
     358#endif
     359    }
     360}
     361
     362static inline void gen_op_mov_reg_T0(int ot, int reg)
     363{
     364    gen_op_mov_reg_v(ot, reg, cpu_T[0]);
     365}
     366
     367static inline void gen_op_mov_reg_T1(int ot, int reg)
     368{
     369    gen_op_mov_reg_v(ot, reg, cpu_T[1]);
     370}
     371
     372static inline void gen_op_mov_reg_A0(int size, int reg)
     373{
     374    TCGv tmp;
     375
     376    switch(size) {
     377    case 0:
     378        tmp = tcg_temp_new();
     379        tcg_gen_ext16u_tl(tmp, cpu_A0);
     380        tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
     381        tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
     382        tcg_temp_free(tmp);
     383        break;
     384    default: /* XXX this shouldn't be reached;  abort? */
     385    case 1:
     386        /* For x86_64, this sets the higher half of register to zero.
     387           For i386, this is equivalent to a mov. */
     388        tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
     389        break;
     390#ifdef TARGET_X86_64
     391    case 2:
     392        tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
     393        break;
     394#endif
     395    }
     396}
     397
     398static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
     399{
    324400    switch(ot) {
    325401    case OT_BYTE:
    326402        if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
    327             tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_B_OFFSET);
     403            goto std_case;
    328404        } else {
    329             tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
    330         }
    331         break;
    332     case OT_WORD:
    333         tcg_gen_st16_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
    334         break;
    335 #ifdef TARGET_X86_64
    336     case OT_LONG:
    337         tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
    338         /* high part of register set to zero */
    339         tcg_gen_movi_tl(cpu_tmp0, 0);
    340         tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
     405            tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
     406            tcg_gen_ext8u_tl(t0, t0);
     407        }
    341408        break;
    342409    default:
    343     case OT_QUAD:
    344         tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
    345         break;
    346 #else
    347     default:
    348     case OT_LONG:
    349         tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
    350         break;
    351 #endif
     410    std_case:
     411        tcg_gen_mov_tl(t0, cpu_regs[reg]);
     412        break;
    352413    }
    353414}
    354415
    355 static inline void gen_op_mov_reg_T0(int ot, int reg)
    356 {
    357     gen_op_mov_reg_v(ot, reg, cpu_T[0]);
    358 }
    359 
    360 static inline void gen_op_mov_reg_T1(int ot, int reg)
    361 {
    362     gen_op_mov_reg_v(ot, reg, cpu_T[1]);
    363 }
    364 
    365 static inline void gen_op_mov_reg_A0(int size, int reg)
    366 {
    367     switch(size) {
    368     case 0:
    369         tcg_gen_st16_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
    370         break;
    371 #ifdef TARGET_X86_64
    372     case 1:
    373         tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
    374         /* high part of register set to zero */
    375         tcg_gen_movi_tl(cpu_tmp0, 0);
    376         tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
    377         break;
    378     default:
    379     case 2:
    380         tcg_gen_st_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
    381         break;
    382 #else
    383     default:
    384     case 1:
    385         tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
    386         break;
    387 #endif
    388     }
    389 }
    390 
    391 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
    392 {
    393     switch(ot) {
    394     case OT_BYTE:
    395         if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
    396 #ifndef VBOX
    397             goto std_case;
    398 #else
    399             tcg_gen_ld8u_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_B_OFFSET);
    400 #endif
    401         } else {
    402             tcg_gen_ld8u_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
    403         }
    404         break;
    405     default:
    406 #ifndef VBOX
    407     std_case:
    408 #endif
    409         tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
    410         break;
    411     }
    412 }
    413 
    414416static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
    415417{
     
    419421static inline void gen_op_movl_A0_reg(int reg)
    420422{
    421     tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
     423    tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
    422424}
    423425
     
    461463    switch(size) {
    462464    case 0:
    463         tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
    464         tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
    465         tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
     465        tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
     466        tcg_gen_ext16u_tl(cpu_tmp0, cpu_tmp0);
     467        tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
     468        tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0);
    466469        break;
    467470    case 1:
    468         tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
    469         tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
    470 #ifdef TARGET_X86_64
    471         tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
    472 #endif
    473         tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
     471        tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
     472        /* For x86_64, this sets the higher half of register to zero.
     473           For i386, this is equivalent to a nop. */
     474        tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
     475        tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
    474476        break;
    475477#ifdef TARGET_X86_64
    476478    case 2:
    477         tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
    478         tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
    479         tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
     479        tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
    480480        break;
    481481#endif
     
    487487    switch(size) {
    488488    case 0:
    489         tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
    490         tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
    491         tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
     489        tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
     490        tcg_gen_ext16u_tl(cpu_tmp0, cpu_tmp0);
     491        tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
     492        tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0);
    492493        break;
    493494    case 1:
    494         tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
    495         tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
    496 #ifdef TARGET_X86_64
    497         tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
    498 #endif
    499         tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
     495        tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
     496        /* For x86_64, this sets the higher half of register to zero.
     497           For i386, this is equivalent to a nop. */
     498        tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
     499        tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
    500500        break;
    501501#ifdef TARGET_X86_64
    502502    case 2:
    503         tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
    504         tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
    505         tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
     503        tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
    506504        break;
    507505#endif
     
    516514static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
    517515{
    518     tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
     516    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
    519517    if (shift != 0)
    520518        tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
    521519    tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
    522 #ifdef TARGET_X86_64
    523     tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
    524 #endif
     520    /* For x86_64, this sets the higher half of register to zero.
     521       For i386, this is equivalent to a nop. */
     522    tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
    525523}
    526524
     
    620618static inline void gen_op_movq_A0_reg(int reg)
    621619{
    622     tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
     620    tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
    623621}
    624622
    625623static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
    626624{
    627     tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
     625    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
    628626    if (shift != 0)
    629627        tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
     
    877875static inline void gen_op_jnz_ecx(int size, int label1)
    878876{
    879     tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
     877    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
    880878    gen_extu(size + 1, cpu_tmp0);
    881879    tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
     
    884882static inline void gen_op_jz_ecx(int size, int label1)
    885883{
    886     tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
     884    tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
    887885    gen_extu(size + 1, cpu_tmp0);
    888886    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
     
    17731771    if (is_right) {
    17741772        tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
    1775         tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);
     1773        tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
    17761774        tcg_gen_shl_tl(t0, t0, cpu_tmp0);
    17771775    } else {
    17781776        tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
    1779         tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);
     1777        tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
    17801778        tcg_gen_shr_tl(t0, t0, cpu_tmp0);
    17811779    }
     
    19981996
    19991997            /* only needed if count > 16, but a test would complicate */
    2000             tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
     1998            tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
    20011999            tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
    20022000
     
    20122010
    20132011            tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
    2014             tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(32), cpu_tmp5);
    2015             tcg_gen_shr_tl(cpu_tmp6, t1, cpu_tmp0);
    2016             tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp6);
     2012            tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
     2013            tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
     2014            tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
    20172015
    20182016            tcg_gen_shl_tl(t0, t0, t2);
    2019             tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
     2017            tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
    20202018            tcg_gen_shr_tl(t1, t1, cpu_tmp5);
    20212019            tcg_gen_or_tl(t0, t0, t1);
     
    20302028
    20312029            tcg_gen_shr_tl(t0, t0, t2);
    2032             tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
     2030            tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
    20332031            tcg_gen_shl_tl(t1, t1, cpu_tmp5);
    20342032            tcg_gen_or_tl(t0, t0, t1);
     
    20412039
    20422040            tcg_gen_shl_tl(t0, t0, t2);
    2043             tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
     2041            tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
    20442042            tcg_gen_shr_tl(t1, t1, cpu_tmp5);
    20452043            tcg_gen_or_tl(t0, t0, t1);
     
    22232221            }
    22242222        }
    2225         /* XXX: index == 4 is always invalid */
    2226         if (havesib && (index != 4 || scale != 0)) {
     2223        /* index == 4 means no index */
     2224        if (havesib && (index != 4)) {
    22272225#ifdef TARGET_X86_64
    22282226            if (s->aflag == 2) {
     
    30213019    [0x29] = { SSE_SPECIAL, SSE_SPECIAL },  /* movaps, movapd */
    30223020    [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
    3023     [0x2b] = { SSE_SPECIAL, SSE_SPECIAL },  /* movntps, movntpd */
     3021    [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
    30243022    [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
    30253023    [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
     
    30783076    [0x76] = MMX_OP2(pcmpeql),
    30793077    [0x77] = { SSE_DUMMY }, /* emms */
     3078    [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
     3079    [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
    30803080    [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
    30813081    [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
     
    33583358        case 0x02b: /* movntps */
    33593359        case 0x12b: /* movntps */
     3360            if (mod == 3)
     3361                goto illegal_op;
     3362            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
     3363            gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
     3364            break;
    33603365        case 0x3f0: /* lddqu */
    33613366            if (mod == 3)
    33623367                goto illegal_op;
    33633368            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
    3364             gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
     3369            gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
     3370            break;
     3371        case 0x22b: /* movntss */
     3372        case 0x32b: /* movntsd */
     3373            if (mod == 3)
     3374                goto illegal_op;
     3375            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
     3376            if (b1 & 1) {
     3377                gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
     3378                    xmm_regs[reg]));
     3379            } else {
     3380                tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
     3381                    xmm_regs[reg].XMM_L(0)));
     3382                gen_op_st_T0_A0(OT_LONG + s->mem_index);
     3383            }
    33653384            break;
    33663385        case 0x6e: /* movd mm, ea */
     
    35183537            gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
    35193538                        offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
     3539            break;
     3540        case 0x178:
     3541        case 0x378:
     3542            {
     3543                int bit_index, field_length;
     3544
     3545                if (b1 == 1 && reg != 0)
     3546                    goto illegal_op;
     3547                field_length = ldub_code(s->pc++) & 0x3F;
     3548                bit_index = ldub_code(s->pc++) & 0x3F;
     3549                tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
     3550                    offsetof(CPUX86State,xmm_regs[reg]));
     3551                if (b1 == 1)
     3552                    gen_helper_extrq_i(cpu_ptr0, tcg_const_i32(bit_index),
     3553                        tcg_const_i32(field_length));
     3554                else
     3555                    gen_helper_insertq_i(cpu_ptr0, tcg_const_i32(bit_index),
     3556                        tcg_const_i32(field_length));
     3557            }
    35203558            break;
    35213559        case 0x7e: /* movd ea, mm */
     
    49124950                ot = OT_QUAD;
    49134951            } else if (op == 3 || op == 5) {
    4914                 /* for call calls, the operand is 16 or 32 bit, even
    4915                    in long mode */
    4916                 ot = dflag ? OT_LONG : OT_WORD;
     4952                ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
    49174953            } else if (op == 6) {
    49184954                /* default push size is 64 bit */
     
    51985234            }
    51995235            label1 = gen_new_label();
    5200             tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUState, regs[R_EAX]));
    5201             tcg_gen_sub_tl(t2, t2, t0);
     5236            tcg_gen_sub_tl(t2, cpu_regs[R_EAX], t0);
    52025237            gen_extu(ot, t2);
    52035238            tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
     
    56235658
    56245659    case 0x91 ... 0x97: /* xchg R, EAX */
     5660    do_xchg_reg_eax:
    56255661        ot = dflag + OT_WORD;
    56265662        reg = (b & 7) | REX_B(s);
     
    57775813            tcg_gen_movi_tl(cpu_T3, val);
    57785814        } else {
    5779             tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUState, regs[R_ECX]));
     5815            tcg_gen_mov_tl(cpu_T3, cpu_regs[R_ECX]);
    57805816        }
    57815817        gen_shiftd_rm_T1_T3(s, ot, opreg, op);
     
    65936629            if (s->dflag == 0)
    65946630                tval &= 0xffff;
    6595 #ifdef VBOX /* upstream fix */
    6596             else if (!CODE64(s))
     6631            else if(!CODE64(s))
    65976632                tval &= 0xffffffff;
    6598 #endif
    65996633            gen_movtl_T0_im(next_eip);
    66006634            gen_push_T0(s);
     
    66936727                l1 = gen_new_label();
    66946728                gen_jcc1(s, s->cc_op, b ^ 1, l1);
    6695                 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
     6729                tcg_gen_mov_tl(cpu_regs[reg], t0);
    66966730                gen_set_label(l1);
    6697                 tcg_gen_movi_tl(cpu_tmp0, 0);
    6698                 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
     6731                tcg_gen_ext32u_tl(cpu_regs[reg], cpu_regs[reg]);
    66996732            } else
    67006733#endif
     
    69306963            modrm = ldub_code(s->pc++);
    69316964            reg = ((modrm >> 3) & 7) | rex_r;
    6932             gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
     6965            gen_ldst_modrm(s,modrm, ot, OR_TMP0, 0);
    69336966            gen_extu(ot, cpu_T[0]);
    6934             label1 = gen_new_label();
    6935             tcg_gen_movi_tl(cpu_cc_dst, 0);
    69366967            t0 = tcg_temp_local_new();
    69376968            tcg_gen_mov_tl(t0, cpu_T[0]);
    6938             tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
    6939             if (b & 1) {
    6940                 gen_helper_bsr(cpu_T[0], t0);
     6969            if ((b & 1) && (prefixes & PREFIX_REPZ) &&
     6970                (s->cpuid_ext3_features & CPUID_EXT3_ABM)) {
     6971                switch(ot) {
     6972                case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0,
     6973                    tcg_const_i32(16)); break;
     6974                case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0,
     6975                    tcg_const_i32(32)); break;
     6976                case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0,
     6977                    tcg_const_i32(64)); break;
     6978                }
     6979                gen_op_mov_reg_T0(ot, reg);
    69416980            } else {
    6942                 gen_helper_bsf(cpu_T[0], t0);
    6943             }
    6944             gen_op_mov_reg_T0(ot, reg);
    6945             tcg_gen_movi_tl(cpu_cc_dst, 1);
    6946             gen_set_label(label1);
    6947             tcg_gen_discard_tl(cpu_cc_src);
    6948             s->cc_op = CC_OP_LOGICB + ot;
     6981                label1 = gen_new_label();
     6982                tcg_gen_movi_tl(cpu_cc_dst, 0);
     6983                tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
     6984                if (b & 1) {
     6985                    gen_helper_bsr(cpu_T[0], t0);
     6986                } else {
     6987                    gen_helper_bsf(cpu_T[0], t0);
     6988                }
     6989                gen_op_mov_reg_T0(ot, reg);
     6990                tcg_gen_movi_tl(cpu_cc_dst, 1);
     6991                gen_set_label(label1);
     6992                tcg_gen_discard_tl(cpu_cc_src);
     6993                s->cc_op = CC_OP_LOGICB + ot;
     6994            }
    69496995            tcg_temp_free(t0);
    69506996        }
     
    70057051        /* misc */
    70067052    case 0x90: /* nop */
    7007         /* XXX: xchg + rex handling */
    70087053        /* XXX: correct lock test for all insn */
    7009         if (prefixes & PREFIX_LOCK)
     7054        if (prefixes & PREFIX_LOCK) {
    70107055            goto illegal_op;
     7056        }
     7057        /* If REX_B is set, then this is xchg eax, r8d, not a nop.  */
     7058        if (REX_B(s)) {
     7059            goto do_xchg_reg_eax;
     7060        }
    70117061        if (prefixes & PREFIX_REPZ) {
    70127062            gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
     
    75867636        case 4: /* smsw */
    75877637            gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
    7588 #if defined TARGET_X86_64 && defined WORDS_BIGENDIAN
     7638#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
    75897639            tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
    75907640#else
     
    76047654            }
    76057655            break;
    7606         case 7: /* invlpg */
    7607             if (s->cpl != 0) {
    7608                 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
    7609             } else {
    7610                 if (mod == 3) {
    7611 #ifdef TARGET_X86_64
    7612                     if (CODE64(s) && rm == 0) {
    7613                         /* swapgs */
    7614                         tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
    7615                         tcg_gen_ld_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,kernelgsbase));
    7616                         tcg_gen_st_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
    7617                         tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,kernelgsbase));
    7618                     } else
    7619 #endif
    7620                     {
    7621                         goto illegal_op;
    7622                     }
     7656        case 7:
     7657            if (mod != 3) { /* invlpg */
     7658                if (s->cpl != 0) {
     7659                    gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
    76237660                } else {
    76247661                    if (s->cc_op != CC_OP_DYNAMIC)
     
    76297666                    gen_jmp_im(s->pc - s->cs_base);
    76307667                    gen_eob(s);
     7668                }
     7669            } else {
     7670                switch (rm) {
     7671                case 0: /* swapgs */
     7672#ifdef TARGET_X86_64
     7673                    if (CODE64(s)) {
     7674                        if (s->cpl != 0) {
     7675                            gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
     7676                        } else {
     7677                            tcg_gen_ld_tl(cpu_T[0], cpu_env,
     7678                                offsetof(CPUX86State,segs[R_GS].base));
     7679                            tcg_gen_ld_tl(cpu_T[1], cpu_env,
     7680                                offsetof(CPUX86State,kernelgsbase));
     7681                            tcg_gen_st_tl(cpu_T[1], cpu_env,
     7682                                offsetof(CPUX86State,segs[R_GS].base));
     7683                            tcg_gen_st_tl(cpu_T[0], cpu_env,
     7684                                offsetof(CPUX86State,kernelgsbase));
     7685                        }
     7686                    } else
     7687#endif
     7688                    {
     7689                        goto illegal_op;
     7690                    }
     7691                    break;
     7692                case 1: /* rdtscp */
     7693                    if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
     7694                        goto illegal_op;
     7695                    if (s->cc_op != CC_OP_DYNAMIC)
     7696                        gen_op_set_cc_op(s->cc_op);
     7697                    gen_jmp_im(pc_start - s->cs_base);
     7698                    if (use_icount)
     7699                        gen_io_start();
     7700                    gen_helper_rdtscp();
     7701                    if (use_icount) {
     7702                        gen_io_end();
     7703                        gen_jmp(s, s->pc - s->cs_base);
     7704                    }
     7705                    break;
     7706                default:
     7707                    goto illegal_op;
    76317708                }
    76327709            }
     
    76767753        {
    76777754            int label1;
    7678             TCGv t0, t1, t2;
    7679 #ifdef VBOX
    7680             TCGv a0;
    7681 #endif
     7755            TCGv t0, t1, t2, a0;
    76827756
    76837757            if (!s->pe || s->vm86)
     
    76867760            t1 = tcg_temp_local_new();
    76877761            t2 = tcg_temp_local_new();
    7688 #ifdef VBOX
    7689             a0 = tcg_temp_local_new();
    7690 #endif
    76917762            ot = OT_WORD;
    76927763            modrm = ldub_code(s->pc++);
     
    76967767            if (mod != 3) {
    76977768                gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
    7698 #ifdef VBOX
     7769                gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
     7770                a0 = tcg_temp_local_new();
    76997771                tcg_gen_mov_tl(a0, cpu_A0);
    7700 #endif
    7701                 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
    77027772            } else {
    77037773                gen_op_mov_v_reg(ot, t0, rm);
     7774                TCGV_UNUSED(a0);
    77047775            }
    77057776            gen_op_mov_v_reg(ot, t1, reg);
     
    77147785            gen_set_label(label1);
    77157786            if (mod != 3) {
    7716 #ifdef VBOX
    7717                 /* cpu_A0 doesn't survive branch */
    77187787                gen_op_st_v(ot + s->mem_index, t0, a0);
    7719 #else
    7720                 gen_op_st_v(ot + s->mem_index, t0, cpu_A0);
    7721 #endif
    7722             } else {
     7788                tcg_temp_free(a0);
     7789           } else {
    77237790                gen_op_mov_reg_v(ot, rm, t0);
    77247791            }
     
    77327799            tcg_temp_free(t1);
    77337800            tcg_temp_free(t2);
    7734 #ifdef VBOX
    7735             tcg_temp_free(a0);
    7736 #endif
    77377801        }
    77387802        break;
     
    78037867            else
    78047868                ot = OT_LONG;
     7869            if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
     7870                (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
     7871                reg = 8;
     7872            }
    78057873            switch(reg) {
    78067874            case 0:
     
    78907958        case 0: /* fxsave */
    78917959            if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
    7892                 (s->flags & HF_EM_MASK))
     7960                (s->prefix & PREFIX_LOCK))
    78937961                goto illegal_op;
    7894             if (s->flags & HF_TS_MASK) {
     7962            if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
    78957963                gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
    78967964                break;
     
    79047972        case 1: /* fxrstor */
    79057973            if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
    7906                 (s->flags & HF_EM_MASK))
     7974                (s->prefix & PREFIX_LOCK))
    79077975                goto illegal_op;
    7908             if (s->flags & HF_TS_MASK) {
     7976            if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
    79097977                gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
    79107978                break;
     
    80058073    case 0x128 ... 0x12f:
    80068074    case 0x138 ... 0x13a:
    8007     case 0x150 ... 0x177:
     8075    case 0x150 ... 0x179:
    80088076    case 0x17c ... 0x17f:
    80098077    case 0x1c2:
     
    80438111    cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp),
    80448112                                    "cc_tmp");
     8113
     8114#ifdef TARGET_X86_64
     8115    cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
     8116                                             offsetof(CPUState, regs[R_EAX]), "rax");
     8117    cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
     8118                                             offsetof(CPUState, regs[R_ECX]), "rcx");
     8119    cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
     8120                                             offsetof(CPUState, regs[R_EDX]), "rdx");
     8121    cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
     8122                                             offsetof(CPUState, regs[R_EBX]), "rbx");
     8123    cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
     8124                                             offsetof(CPUState, regs[R_ESP]), "rsp");
     8125    cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
     8126                                             offsetof(CPUState, regs[R_EBP]), "rbp");
     8127    cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
     8128                                             offsetof(CPUState, regs[R_ESI]), "rsi");
     8129    cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
     8130                                             offsetof(CPUState, regs[R_EDI]), "rdi");
     8131    cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
     8132                                         offsetof(CPUState, regs[8]), "r8");
     8133    cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
     8134                                          offsetof(CPUState, regs[9]), "r9");
     8135    cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
     8136                                          offsetof(CPUState, regs[10]), "r10");
     8137    cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
     8138                                          offsetof(CPUState, regs[11]), "r11");
     8139    cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
     8140                                          offsetof(CPUState, regs[12]), "r12");
     8141    cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
     8142                                          offsetof(CPUState, regs[13]), "r13");
     8143    cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
     8144                                          offsetof(CPUState, regs[14]), "r14");
     8145    cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
     8146                                          offsetof(CPUState, regs[15]), "r15");
     8147#else
     8148    cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
     8149                                             offsetof(CPUState, regs[R_EAX]), "eax");
     8150    cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
     8151                                             offsetof(CPUState, regs[R_ECX]), "ecx");
     8152    cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
     8153                                             offsetof(CPUState, regs[R_EDX]), "edx");
     8154    cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
     8155                                             offsetof(CPUState, regs[R_EBX]), "ebx");
     8156    cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
     8157                                             offsetof(CPUState, regs[R_ESP]), "esp");
     8158    cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
     8159                                             offsetof(CPUState, regs[R_EBP]), "ebp");
     8160    cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
     8161                                             offsetof(CPUState, regs[R_ESI]), "esi");
     8162    cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
     8163                                             offsetof(CPUState, regs[R_EDI]), "edi");
     8164#endif
    80458165
    80468166    /* register helpers */
     
    81438263    cpu_tmp4 = tcg_temp_new();
    81448264    cpu_tmp5 = tcg_temp_new();
    8145     cpu_tmp6 = tcg_temp_new();
    81468265    cpu_ptr0 = tcg_temp_new_ptr();
    81478266    cpu_ptr1 = tcg_temp_new_ptr();
     
    81598278    gen_icount_start();
    81608279    for(;;) {
    8161         if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
    8162             TAILQ_FOREACH(bp, &env->breakpoints, entry) {
     8280        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
     8281            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
    81638282                if (bp->pc == pc_ptr &&
    81648283                    !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
  • trunk/src/recompiler/tcg/i386/tcg-target.c

    r36175 r37675  
    293293}
    294294
    295 static inline void tgen_arithi(TCGContext *s, int c, int r0, int32_t val)
    296 {
    297     if (val == (int8_t)val) {
     295static inline void tgen_arithi(TCGContext *s, int c, int r0, int32_t val, int cf)
     296{
     297    if (!cf && ((c == ARITH_ADD && val == 1) || (c == ARITH_SUB && val == -1))) {
     298        /* inc */
     299        tcg_out_opc(s, 0x40 + r0);
     300    } else if (!cf && ((c == ARITH_ADD && val == -1) || (c == ARITH_SUB && val == 1))) {
     301        /* dec */
     302        tcg_out_opc(s, 0x48 + r0);
     303    } else if (val == (int8_t)val) {
    298304        tcg_out_modrm(s, 0x83, c, r0);
    299305        tcg_out8(s, val);
     306    } else if (c == ARITH_AND && val == 0xffu && r0 < 4) {
     307        /* movzbl */
     308        tcg_out_modrm(s, 0xb6 | P_EXT, r0, r0);
     309    } else if (c == ARITH_AND && val == 0xffffu) {
     310        /* movzwl */
     311        tcg_out_modrm(s, 0xb7 | P_EXT, r0, r0);
    300312    } else {
    301313        tcg_out_modrm(s, 0x81, c, r0);
     
    307319{
    308320    if (val != 0)
    309         tgen_arithi(s, ARITH_ADD, reg, val);
     321        tgen_arithi(s, ARITH_ADD, reg, val, 0);
    310322}
    311323
     
    314326{
    315327    if (val != 0)
    316         tgen_arithi(s, ARITH_SUB, reg, val);
     328        tgen_arithi(s, ARITH_SUB, reg, val, 0);
    317329}
    318330#endif
     
    363375            tcg_out_modrm(s, 0x85, arg1, arg1);
    364376        } else {
    365             tgen_arithi(s, ARITH_CMP, arg1, arg2);
     377            tgen_arithi(s, ARITH_CMP, arg1, arg2, 0);
    366378        }
    367379    } else {
     
    370382    tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index);
    371383}
    372 
    373 #ifdef VBOX
    374 
    375 DECLINLINE(void) tcg_out_long_call(TCGContext *s, void* dst)
    376 {
    377     intptr_t disp;
    378 # ifdef VBOX
    379     tcg_gen_stack_alignment_check(s);
    380 # endif
    381     disp = (uintptr_t)dst - (uintptr_t)s->code_ptr - 5;
    382     tcg_out8(s,  0xe8); /* call disp32 */
    383     tcg_out32(s, disp); /* disp32 */
    384 }
    385 
    386 DECLINLINE(void) tcg_out_long_jmp(TCGContext *s, void* dst)
    387 {
    388     intptr_t disp = (uintptr_t)dst - (uintptr_t)s->code_ptr - 5;
    389     tcg_out8(s,  0xe9); /* jmp disp32 */
    390     tcg_out32(s, disp); /* disp32 */
    391 }
    392 
    393 #endif /* VBOX */
    394384
    395385/* XXX: we implement it at the target level to avoid having to
     
    474464#endif
    475465
     466#ifndef CONFIG_USER_ONLY
     467#define GUEST_BASE 0
     468#endif
     469
    476470#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
    477 static void *vbox_ld_helpers[] = {
     471
     472static void * const vbox_ld_helpers[] = {
    478473    __ldub_vbox_phys,
    479474    __lduw_vbox_phys,
     
    486481};
    487482
    488 static void *vbox_st_helpers[] = {
     483static void * const vbox_st_helpers[] = {
    489484    __stb_vbox_phys,
    490485    __stw_vbox_phys,
     
    493488};
    494489
     490DECLINLINE(void) tcg_out_long_call(TCGContext *s, void* dst)
     491{
     492    intptr_t disp;
     493# ifdef VBOX
     494    tcg_gen_stack_alignment_check(s);
     495# endif
     496    disp = (uintptr_t)dst - (uintptr_t)s->code_ptr - 5;
     497    tcg_out8(s,  0xe8); /* call disp32 */
     498    tcg_out32(s, disp); /* disp32 */
     499}
     500
    495501static void tcg_out_vbox_phys_read(TCGContext *s, int index,
    496502                                   int addr_reg,
     
    503509    AssertMsg(sizeof(RTGCPHYS) == 8, ("Physical address must be 64-bits, update caller\n"));
    504510
    505 #if 0
     511# if 0
    506512    tcg_out8(s, 0x6a); tcg_out8(s, 0x00); /* push $0 */
    507513    tcg_out_push(s, addr_reg);
    508 #else
     514# else
    509515    /* mov addr_reg, %eax */
    510516    tcg_out_mov(s, TCG_REG_EAX, addr_reg);
    511 #endif
     517# endif
    512518
    513519    tcg_out_long_call(s, vbox_ld_helpers[index]);
     
    526532    int useReg2 = ((index & 3) == 3);
    527533
    528 #if 0
     534# if 0
    529535    /* out parameter (value2) */
    530536    if (useReg2)
     
    536542    tcg_out8(s, 0x6a); tcg_out8(s, 0x00); /* push $0 */
    537543    tcg_out_push(s, addr_reg);
    538 #else
     544# else
    539545    Assert(val_reg !=  TCG_REG_EAX && (!useReg2 || (val_reg2 != TCG_REG_EAX)));
    540546    /* mov addr_reg, %eax */
     
    546552        tcg_out_mov(s, TCG_REG_ECX, val_reg2);
    547553
    548 #endif
     554# endif
    549555    /* call it */
    550556    tcg_out_long_call(s, vbox_st_helpers[index]);
    551557
    552558    /* clean stack after us */
    553 #if 0
     559# if 0
    554560    tcg_out_addi(s, TCG_REG_ESP, 8 + (useReg2 ? 8 : 4));
    555561# endif
     
    604610    tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
    605611
    606 #ifndef VBOX
    607612    tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
    608613    tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
     614#ifndef VBOX
    609615    tcg_out8(s, (5 << 3) | r1);
     616#else
     617    tcg_out8(s, (TCG_AREG0 << 3) | r1); /* env, not %ebp */
     618    Assert(mem_index >= 0 && mem_index < NB_MMU_MODES);
     619#endif
    610620    tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read));
    611 #else
    612     tcg_out_opc(s, 0x8d); /* lea offset(r1, env), r1 */
    613     tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
    614     tcg_out8(s, (TCG_AREG0 << 3) | r1);
    615     tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read));
    616 #endif
    617621
    618622    /* cmp 0(r1), r0 */
     
    714718    case 0:
    715719        /* movzbl */
    716         tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, 0);
     720        tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, GUEST_BASE);
    717721        break;
    718722    case 0 | 4:
    719723        /* movsbl */
    720         tcg_out_modrm_offset(s, 0xbe | P_EXT, data_reg, r0, 0);
     724        tcg_out_modrm_offset(s, 0xbe | P_EXT, data_reg, r0, GUEST_BASE);
    721725        break;
    722726    case 1:
    723727        /* movzwl */
    724         tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0);
     728        tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, GUEST_BASE);
    725729        if (bswap) {
    726730            /* rolw $8, data_reg */
     
    732736    case 1 | 4:
    733737        /* movswl */
    734         tcg_out_modrm_offset(s, 0xbf | P_EXT, data_reg, r0, 0);
     738        tcg_out_modrm_offset(s, 0xbf | P_EXT, data_reg, r0, GUEST_BASE);
    735739        if (bswap) {
    736740            /* rolw $8, data_reg */
     
    745749    case 2:
    746750        /* movl (r0), data_reg */
    747         tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
     751        tcg_out_modrm_offset(s, 0x8b, data_reg, r0, GUEST_BASE);
    748752        if (bswap) {
    749753            /* bswap */
     
    761765        }
    762766        if (!bswap) {
    763             tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
    764             tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 4);
     767            tcg_out_modrm_offset(s, 0x8b, data_reg, r0, GUEST_BASE);
     768            tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, GUEST_BASE + 4);
    765769        } else {
    766             tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 4);
     770            tcg_out_modrm_offset(s, 0x8b, data_reg, r0, GUEST_BASE + 4);
    767771            tcg_out_opc(s, (0xc8 + data_reg) | P_EXT);
    768772
    769             tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 0);
     773            tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, GUEST_BASE);
    770774            /* bswap */
    771775            tcg_out_opc(s, (0xc8 + data_reg2) | P_EXT);
     
    842846    tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
    843847
    844 #ifndef VBOX
    845848    tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
    846849    tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
     850#ifndef VBOX
    847851    tcg_out8(s, (5 << 3) | r1);
     852#else
     853    tcg_out8(s, (TCG_AREG0 << 3) | r1); /* env is not %ebp */
     854    Assert(mem_index >= 0 && mem_index < NB_MMU_MODES);
     855#endif
    848856    tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write));
    849 #else
    850     tcg_out_opc(s, 0x8d); /* lea offset(r1, env), r1 */
    851     tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
    852     tcg_out8(s, (TCG_AREG0 << 3) | r1);
    853     tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write));
    854 #endif
    855857
    856858    /* cmp 0(r1), r0 */
     
    10041006    case 0:
    10051007        /* movb */
    1006         tcg_out_modrm_offset(s, 0x88, data_reg, r0, 0);
     1008        tcg_out_modrm_offset(s, 0x88, data_reg, r0, GUEST_BASE);
    10071009        break;
    10081010    case 1:
     
    10161018        /* movw */
    10171019        tcg_out8(s, 0x66);
    1018         tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
     1020        tcg_out_modrm_offset(s, 0x89, data_reg, r0, GUEST_BASE);
    10191021        break;
    10201022    case 2:
     
    10261028        }
    10271029        /* movl */
    1028         tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
     1030        tcg_out_modrm_offset(s, 0x89, data_reg, r0, GUEST_BASE);
    10291031        break;
    10301032    case 3:
     
    10331035            /* bswap data_reg */
    10341036            tcg_out_opc(s, (0xc8 + r1) | P_EXT);
    1035             tcg_out_modrm_offset(s, 0x89, r1, r0, 0);
     1037            tcg_out_modrm_offset(s, 0x89, r1, r0, GUEST_BASE);
    10361038            tcg_out_mov(s, r1, data_reg);
    10371039            /* bswap data_reg */
    10381040            tcg_out_opc(s, (0xc8 + r1) | P_EXT);
    1039             tcg_out_modrm_offset(s, 0x89, r1, r0, 4);
     1041            tcg_out_modrm_offset(s, 0x89, r1, r0, GUEST_BASE + 4);
    10401042        } else {
    1041             tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
    1042             tcg_out_modrm_offset(s, 0x89, data_reg2, r0, 4);
     1043            tcg_out_modrm_offset(s, 0x89, data_reg, r0, GUEST_BASE);
     1044            tcg_out_modrm_offset(s, 0x89, data_reg2, r0, GUEST_BASE + 4);
    10431045        }
    10441046        break;
     
    11581160    gen_arith:
    11591161        if (const_args[2]) {
    1160             tgen_arithi(s, c, args[0], args[2]);
     1162            tgen_arithi(s, c, args[0], args[2], 0);
    11611163        } else {
    11621164            tcg_out_modrm(s, 0x01 | (c << 3), args[2], args[0]);
     
    12161218    case INDEX_op_add2_i32:
    12171219        if (const_args[4])
    1218             tgen_arithi(s, ARITH_ADD, args[0], args[4]);
     1220            tgen_arithi(s, ARITH_ADD, args[0], args[4], 1);
    12191221        else
    12201222            tcg_out_modrm(s, 0x01 | (ARITH_ADD << 3), args[4], args[0]);
    12211223        if (const_args[5])
    1222             tgen_arithi(s, ARITH_ADC, args[1], args[5]);
     1224            tgen_arithi(s, ARITH_ADC, args[1], args[5], 1);
    12231225        else
    12241226            tcg_out_modrm(s, 0x01 | (ARITH_ADC << 3), args[5], args[1]);
     
    12261228    case INDEX_op_sub2_i32:
    12271229        if (const_args[4])
    1228             tgen_arithi(s, ARITH_SUB, args[0], args[4]);
     1230            tgen_arithi(s, ARITH_SUB, args[0], args[4], 1);
    12291231        else
    12301232            tcg_out_modrm(s, 0x01 | (ARITH_SUB << 3), args[4], args[0]);
    12311233        if (const_args[5])
    1232             tgen_arithi(s, ARITH_SBB, args[1], args[5]);
     1234            tgen_arithi(s, ARITH_SBB, args[1], args[5], 1);
    12331235        else
    12341236            tcg_out_modrm(s, 0x01 | (ARITH_SBB << 3), args[5], args[1]);
     
    12631265    case INDEX_op_ext16s_i32:
    12641266        tcg_out_modrm(s, 0xbf | P_EXT, args[0], args[1]);
     1267        break;
     1268    case INDEX_op_ext8u_i32:
     1269        tcg_out_modrm(s, 0xb6 | P_EXT, args[0], args[1]);
     1270        break;
     1271    case INDEX_op_ext16u_i32:
     1272        tcg_out_modrm(s, 0xb7 | P_EXT, args[0], args[1]);
    12651273        break;
    12661274
     
    13321340    { INDEX_op_shr_i32, { "r", "0", "ci" } },
    13331341    { INDEX_op_sar_i32, { "r", "0", "ci" } },
    1334     { INDEX_op_sar_i32, { "r", "0", "ci" } },
    13351342    { INDEX_op_rotl_i32, { "r", "0", "ci" } },
    13361343    { INDEX_op_rotr_i32, { "r", "0", "ci" } },
     
    13511358    { INDEX_op_ext8s_i32, { "r", "q" } },
    13521359    { INDEX_op_ext16s_i32, { "r", "r" } },
     1360    { INDEX_op_ext8u_i32, { "r", "q"} },
     1361    { INDEX_op_ext16u_i32, { "r", "r"} },
    13531362
    13541363#if TARGET_LONG_BITS == 32
  • trunk/src/recompiler/tcg/i386/tcg-target.h

    r36175 r37675  
    5353#define TCG_TARGET_HAS_ext16s_i32
    5454#define TCG_TARGET_HAS_rot_i32
     55#define TCG_TARGET_HAS_ext8u_i32
     56#define TCG_TARGET_HAS_ext16u_i32
     57
     58#define TCG_TARGET_HAS_GUEST_BASE
    5559
    5660/* Note: must be synced with dyngen-exec.h */
  • trunk/src/recompiler/tcg/tcg-op.h

    r36175 r37675  
    11901190}
    11911191
    1192 /* These are currently just for convenience.
    1193    We assume a target will recognise these automatically .  */
    11941192static inline void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg)
    11951193{
     1194#ifdef TCG_TARGET_HAS_ext8u_i32
     1195    tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg);
     1196#else
    11961197    tcg_gen_andi_i32(ret, arg, 0xffu);
     1198#endif
    11971199}
    11981200
    11991201static inline void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg)
    12001202{
     1203#ifdef TCG_TARGET_HAS_ext16u_i32
     1204    tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg);
     1205#else
    12011206    tcg_gen_andi_i32(ret, arg, 0xffffu);
     1207#endif
    12021208}
    12031209
     
    13591365static inline void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg)
    13601366{
     1367#ifdef TCG_TARGET_HAS_ext8u_i64
     1368    tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg);
     1369#else
    13611370    tcg_gen_andi_i64(ret, arg, 0xffu);
     1371#endif
    13621372}
    13631373
    13641374static inline void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg)
    13651375{
     1376#ifdef TCG_TARGET_HAS_ext16u_i64
     1377    tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg);
     1378#else
    13661379    tcg_gen_andi_i64(ret, arg, 0xffffu);
     1380#endif
    13671381}
    13681382
    13691383static inline void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg)
    13701384{
     1385#ifdef TCG_TARGET_HAS_ext32u_i64
     1386    tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg);
     1387#else
    13711388    tcg_gen_andi_i64(ret, arg, 0xffffffffu);
     1389#endif
    13721390}
    13731391
     
    13831401static inline void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
    13841402{
    1385     tcg_gen_andi_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg)), 0xffffffffu);
     1403    tcg_gen_ext32u_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg)));
    13861404}
    13871405
  • trunk/src/recompiler/tcg/tcg-opc.h

    r36175 r37675  
    8989#ifdef TCG_TARGET_HAS_ext16s_i32
    9090DEF2(ext16s_i32, 1, 1, 0, 0)
     91#endif
     92#ifdef TCG_TARGET_HAS_ext8u_i32
     93DEF2(ext8u_i32, 1, 1, 0, 0)
     94#endif
     95#ifdef TCG_TARGET_HAS_ext16u_i32
     96DEF2(ext16u_i32, 1, 1, 0, 0)
    9197#endif
    9298#ifdef TCG_TARGET_HAS_bswap16_i32
     
    153159DEF2(ext32s_i64, 1, 1, 0, 0)
    154160#endif
     161#ifdef TCG_TARGET_HAS_ext8u_i64
     162DEF2(ext8u_i64, 1, 1, 0, 0)
     163#endif
     164#ifdef TCG_TARGET_HAS_ext16u_i64
     165DEF2(ext16u_i64, 1, 1, 0, 0)
     166#endif
     167#ifdef TCG_TARGET_HAS_ext32u_i64
     168DEF2(ext32u_i64, 1, 1, 0, 0)
     169#endif
    155170#ifdef TCG_TARGET_HAS_bswap16_i64
    156171DEF2(bswap16_i64, 1, 1, 0, 0)
  • trunk/src/recompiler/tcg/tcg.c

    r36175 r37675  
    2828#include "config.h"
    2929
    30 #ifndef DEBUG_TCG
     30#ifndef CONFIG_DEBUG_TCG
    3131/* define it to suppress various consistency checks (faster) */
    3232#define NDEBUG
     
    5252#include "qemu-common.h"
    5353#include "cache-utils.h"
     54#include "host-utils.h"
    5455
    5556/* Note: the long term plan is to reduce the dependancies on the QEMU
     
    6364#include "elf.h"
    6465
     66#if defined(CONFIG_USE_GUEST_BASE) && !defined(TCG_TARGET_HAS_GUEST_BASE)
     67#error GUEST_BASE not supported on this host.
     68#endif
    6569
    6670#ifdef VBOX
     
    7478#  undef USE_LIVENESS_ANALYSIS
    7579# endif
     80///* With 0.12.5 the liveness analysis does not work well even when targeting
     81//   32-bit guest cpus.  Just disable it wholesale to be on the safe side.  */
     82//#undef USE_LIVENESS_ANALYSIS
    7683#endif /* VBOX */
    7784
     
    8289#define DEF(s, n, copy_size) { #s, 0, 0, n, n, 0, copy_size },
    8390#ifndef VBOX
    84 #define DEF2(s, iargs, oargs, cargs, flags) { #s, iargs, oargs, cargs, iargs + oargs + cargs, flags, 0 },
     91#define DEF2(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags, 0 },
    8592#else  /* VBOX */
    86 # define DEF2(s, iargs, oargs, cargs, flags) { #s, iargs, oargs, cargs, iargs + oargs + cargs, flags, 0, 0, 0 },
     93# define DEF2(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags, 0, 0, 0 },
    8794#endif /* VBOX */
    8895#include "tcg-opc.h"
     
    11171124    nb_ops = gen_opc_ptr - gen_opc_buf;
    11181125
    1119     /* XXX: make it really dynamic */
    1120     s->op_dead_iargs = tcg_malloc(OPC_BUF_SIZE * sizeof(uint16_t));
     1126    s->op_dead_iargs = tcg_malloc(nb_ops * sizeof(uint16_t));
    11211127
    11221128    dead_temps = tcg_malloc(s->nb_temps);
     
    19011907static int64_t tcg_table_op_count[NB_OPS];
    19021908
    1903 void dump_op_count(void)
     1909static void dump_op_count(void)
    19041910{
    19051911    int i;
     
    19391945
    19401946#ifdef DEBUG_DISAS
     1947# ifdef USE_LIVENESS_ANALYSIS /* vbox */
    19411948    if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) {
    1942         qemu_log("OP after la:\n");
     1949        qemu_log("OP after liveness analysis:\n");
    19431950        tcg_dump_ops(s, logfile);
    19441951        qemu_log("\n");
    19451952    }
     1953# endif /* USE_LIVENESS_ANALYSIS - vbox */
    19461954#endif
    19471955
     
    19811989        case INDEX_op_debug_insn_start:
    19821990            /* debug instruction */
     1991//#ifdef VBOX /* HACK ALERT: GROSS HACK to work around registister allocation bugs in v0.12.5 */
     1992//            save_globals(s, s->reserved_regs);
     1993//#endif
    19831994            break;
    19841995        case INDEX_op_nop:
     
    21092120    cpu_fprintf(f, "  avg cycles        %0.1f\n",
    21102121                s->restore_count ? (double)s->restore_time / s->restore_count : 0);
    2111     {
    2112         extern void dump_op_count(void);
    2113         dump_op_count();
    2114     }
     2122
     2123    dump_op_count();
    21152124}
    21162125#else
  • trunk/src/recompiler/tcg/tcg.h

    r36175 r37675  
    2424#include "qemu-common.h"
    2525#include "tcg-target.h"
     26#include "tcg-runtime.h"
    2627
    2728#if TCG_TARGET_REG_BITS == 32
     
    5758#define tcg_regset_set(d, s) (d) = (s)
    5859#define tcg_regset_set32(d, reg, val32) (d) |= (val32) << (reg)
    59 #define tcg_regset_set_reg(d, r) (d) |= 1 << (r)
    60 #define tcg_regset_reset_reg(d, r) (d) &= ~(1 << (r))
     60#define tcg_regset_set_reg(d, r) (d) |= 1L << (r)
     61#define tcg_regset_reset_reg(d, r) (d) &= ~(1L << (r))
    6162#define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1)
    6263#define tcg_regset_or(d, a, b) (d) = (a) | (b)
     
    122123 */
    123124
    124 #ifdef DEBUG_TCG
     125#ifdef CONFIG_DEBUG_TCG
    125126#define DEBUG_TCGV 1
    126127#endif
     
    466467void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type,
    467468                   int label_index, long addend);
    468 const TCGArg *tcg_gen_code_op(TCGContext *s, int opc, const TCGArg *args1,
    469                               unsigned int dead_iargs);
    470 
    471 /* tcg-runtime.c */
    472 int64_t tcg_helper_shl_i64(int64_t arg1, int64_t arg2);
    473 int64_t tcg_helper_shr_i64(int64_t arg1, int64_t arg2);
    474 int64_t tcg_helper_sar_i64(int64_t arg1, int64_t arg2);
    475 int64_t tcg_helper_div_i64(int64_t arg1, int64_t arg2);
    476 int64_t tcg_helper_rem_i64(int64_t arg1, int64_t arg2);
    477 uint64_t tcg_helper_divu_i64(uint64_t arg1, uint64_t arg2);
    478 uint64_t tcg_helper_remu_i64(uint64_t arg1, uint64_t arg2);
    479469
    480470#ifndef VBOX
    481471extern uint8_t code_gen_prologue[];
    482472#else
    483 extern uint8_t* code_gen_prologue;
     473extern uint8_t *code_gen_prologue;
    484474#endif
    485475#if defined(_ARCH_PPC) && !defined(_ARCH_PPC64)
  • trunk/src/recompiler/tcg/x86_64/tcg-target.c

    r36175 r37675  
    364364}
    365365
     366static void tcg_out_goto(TCGContext *s, int call, uint8_t *target)
     367{
     368    int32_t disp;
     369
     370    disp = target - s->code_ptr - 5;
     371    if (disp == (target - s->code_ptr - 5)) {
     372        tcg_out8(s, call ? 0xe8 : 0xe9);
     373        tcg_out32(s, disp);
     374    } else {
     375        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, (tcg_target_long) target);
     376        tcg_out_modrm(s, 0xff, call ? 2 : 4, TCG_REG_R10);
     377    }
     378}
     379
    366380static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
    367381                              int arg1, tcg_target_long arg2)
     
    384398static inline void tgen_arithi32(TCGContext *s, int c, int r0, int32_t val)
    385399{
    386     if (val == (int8_t)val) {
     400    if ((c == ARITH_ADD && val == 1) || (c == ARITH_SUB && val == -1)) {
     401        /* inc */
     402        tcg_out_modrm(s, 0xff, 0, r0);
     403    } else if ((c == ARITH_ADD && val == -1) || (c == ARITH_SUB && val == 1)) {
     404        /* dec */
     405        tcg_out_modrm(s, 0xff, 1, r0);
     406    } else if (val == (int8_t)val) {
    387407        tcg_out_modrm(s, 0x83, c, r0);
    388408        tcg_out8(s, val);
     
    401421static inline void tgen_arithi64(TCGContext *s, int c, int r0, int64_t val)
    402422{
    403     if (val == (int8_t)val) {
     423    if ((c == ARITH_ADD && val == 1) || (c == ARITH_SUB && val == -1)) {
     424        /* inc */
     425        tcg_out_modrm(s, 0xff | P_REXW, 0, r0);
     426    } else if ((c == ARITH_ADD && val == -1) || (c == ARITH_SUB && val == 1)) {
     427        /* dec */
     428        tcg_out_modrm(s, 0xff | P_REXW, 1, r0);
     429    } else if (val == (int8_t)val) {
    404430        tcg_out_modrm(s, 0x83 | P_REXW, c, r0);
    405431        tcg_out8(s, val);
     
    486512}
    487513
    488 #ifdef VBOX
    489 
    490 DECLINLINE(void) tcg_out_pushq(TCGContext *s, tcg_target_long val)
    491 {
    492      tcg_out8(s, 0x68); /* push imm32, subs 8 from rsp */
    493      tcg_out32(s, val); /* imm32 */
    494      if ((val >> 32) != 0)
    495      {
    496          tcg_out8(s, 0xc7); /* mov imm32, 4(%rsp) */
    497          tcg_out8(s, 0x44);
    498          tcg_out8(s, 0x24);
    499          tcg_out8(s, 0x04);
    500          tcg_out32(s, ((uint64_t)val) >> 32); /* imm32 */
    501      }
    502 }
    503 
    504 DECLINLINE(void) tcg_out_long_call(TCGContext *s, tcg_target_long dst)
    505 {
    506     intptr_t disp = dst - (tcg_target_long)s->code_ptr - 5;
    507     /* can do normal call */
    508     if (disp < 2LL * _1G && disp > -2LL * _1G)
    509     {
    510         tcg_out8(s,  0xe8); /* call disp32 */
    511         tcg_out32(s, disp); /* disp32 */
    512     }
    513     else
    514     {
    515 # if 0
    516         /* Somewhat tricky, but allows long jump not touching registers */
    517         int off = 5 /* push imm32 */ + 5 /* push imm32 */ + 1 /* ret */;
    518         if ((((uint64_t)s->code_ptr) + 32) >> 32)
    519             off += 8;
    520         if (dst >> 32)
    521             off += 8;
    522         /* return address */
    523         tcg_out_pushq(s, (tcg_target_long)s->code_ptr+off);
    524         /* destination */
    525         tcg_out_pushq(s, dst);
    526         tcg_out8(s, 0xc3); /* ret, used as call */
    527 # else
    528         tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_RAX, dst);
    529         tcg_out8(s, 0xff); /* call *%eax */
    530         tcg_out8(s, 0xd0);
    531 # endif
    532     }
    533 }
    534 
    535 DECLINLINE(void) tcg_out_long_jmp(TCGContext *s, tcg_target_long dst)
    536 {
    537     intptr_t disp;
    538 
    539     disp = dst - (tcg_target_long)s->code_ptr - 2;
    540     /* can do short relative jump */
    541     if (disp < 0x7f && disp > -0x7f)
    542     {
    543         tcg_out8(s, 0xeb); /* short jmp */
    544         tcg_out8(s, (int8_t)disp);
    545         return;
    546     }
    547 
    548     disp = dst - (tcg_target_long)s->code_ptr - 5;
    549     if (disp < 2LL * _1G && disp > -2LL * _1G)
    550     {
    551         tcg_out8(s, 0xe9); /* jmp */
    552         tcg_out32(s, (int32_t)disp);
    553         return;
    554     }
    555 # if 0
    556     tcg_out_pushq(s, dst);
    557     tcg_out8(s, 0xc3); /* ret */
    558 # else
    559     tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_RAX, dst);
    560     tcg_out8(s, 0xff); /* jmp *%eax */
    561     tcg_out8(s, 0xe0);
    562 # endif
    563 }
    564 
    565 #endif /* VBOX */
    566 
    567514#if defined(CONFIG_SOFTMMU)
    568515
     
    585532
    586533#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
    587 static void *vbox_ld_helpers[] = {
     534static void * const vbox_ld_helpers[] = {
    588535    __ldub_vbox_phys,
    589536    __lduw_vbox_phys,
     
    596543};
    597544
    598 static void *vbox_st_helpers[] = {
     545static void * const vbox_st_helpers[] = {
    599546    __stb_vbox_phys,
    600547    __stw_vbox_phys,
     
    608555        tcg_out_modrm(s, 0x8b |  P_REXW, TCG_REG_RDI, addr_reg);
    609556
    610     tcg_out_long_call(s, (tcg_target_long)vbox_ld_helpers[index]);
     557    tcg_out_goto(s, 1, vbox_ld_helpers[index]);
    611558    /* mov %rax, data_reg*/
    612559    tcg_out_modrm(s, 0x8b |  P_REXW, data_reg, TCG_REG_RAX);
     
    620567        /* mov addr_reg, %rsi */
    621568        tcg_out_modrm(s, 0x8b | P_REXW, TCG_REG_RSI, val_reg);
    622     tcg_out_long_call(s, (tcg_target_long)vbox_st_helpers[index]);
     569    tcg_out_goto(s, 1, vbox_st_helpers[index]);
    623570}
    624571
     
    629576{
    630577    int addr_reg, data_reg, r0, r1, mem_index, s_bits, bswap, rexw;
     578    int32_t offset;
    631579#if defined(CONFIG_SOFTMMU)
    632580    uint8_t *label1_ptr, *label2_ptr;
     
    679627    /* XXX: move that code at the end of the TB */
    680628    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RSI, mem_index);
    681 #ifndef VBOX
    682     tcg_out8(s, 0xe8);
    683     tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] -
    684               (tcg_target_long)s->code_ptr - 4);
    685 #else
    686     tcg_out_long_call(s, (tcg_target_long)qemu_ld_helpers[s_bits]);
    687 #endif
     629    tcg_out_goto(s, 1, qemu_ld_helpers[s_bits]);
    688630
    689631    switch(opc) {
     
    729671    tcg_out_modrm_offset(s, 0x03 | P_REXW, r0, r1, offsetof(CPUTLBEntry, addend) -
    730672                         offsetof(CPUTLBEntry, addr_read));
     673    offset = 0;
    731674#else
    732     r0 = addr_reg;
     675    if (GUEST_BASE == (int32_t)GUEST_BASE) {
     676        r0 = addr_reg;
     677        offset = GUEST_BASE;
     678    } else {
     679        offset = 0;
     680        /* movq $GUEST_BASE, r0 */
     681        tcg_out_opc(s, (0xb8 + (r0 & 7)) | P_REXW, 0, r0, 0);
     682        tcg_out32(s, GUEST_BASE);
     683        tcg_out32(s, GUEST_BASE >> 32);
     684        /* addq addr_reg, r0 */
     685        tcg_out_modrm(s, 0x01 | P_REXW, addr_reg, r0);
     686    }
    733687#endif
    734688
     
    743697    case 0:
    744698        /* movzbl */
    745         tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, 0);
     699        tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, offset);
    746700        break;
    747701    case 0 | 4:
    748702        /* movsbX */
    749         tcg_out_modrm_offset(s, 0xbe | P_EXT | rexw, data_reg, r0, 0);
     703        tcg_out_modrm_offset(s, 0xbe | P_EXT | rexw, data_reg, r0, offset);
    750704        break;
    751705    case 1:
    752706        /* movzwl */
    753         tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0);
     707        tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, offset);
    754708        if (bswap) {
    755709            /* rolw $8, data_reg */
     
    762716        if (bswap) {
    763717            /* movzwl */
    764             tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0);
     718            tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, offset);
    765719            /* rolw $8, data_reg */
    766720            tcg_out8(s, 0x66);
     
    772726        } else {
    773727            /* movswX */
    774             tcg_out_modrm_offset(s, 0xbf | P_EXT | rexw, data_reg, r0, 0);
     728            tcg_out_modrm_offset(s, 0xbf | P_EXT | rexw, data_reg, r0, offset);
    775729        }
    776730        break;
    777731    case 2:
    778732        /* movl (r0), data_reg */
    779         tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
     733        tcg_out_modrm_offset(s, 0x8b, data_reg, r0, offset);
    780734        if (bswap) {
    781735            /* bswap */
     
    786740        if (bswap) {
    787741            /* movl (r0), data_reg */
    788             tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
     742            tcg_out_modrm_offset(s, 0x8b, data_reg, r0, offset);
    789743            /* bswap */
    790744            tcg_out_opc(s, (0xc8 + (data_reg & 7)) | P_EXT, 0, data_reg, 0);
     
    793747        } else {
    794748            /* movslq */
    795             tcg_out_modrm_offset(s, 0x63 | P_REXW, data_reg, r0, 0);
     749            tcg_out_modrm_offset(s, 0x63 | P_REXW, data_reg, r0, offset);
    796750        }
    797751        break;
    798752    case 3:
    799753        /* movq (r0), data_reg */
    800         tcg_out_modrm_offset(s, 0x8b | P_REXW, data_reg, r0, 0);
     754        tcg_out_modrm_offset(s, 0x8b | P_REXW, data_reg, r0, offset);
    801755        if (bswap) {
    802756            /* bswap */
     
    821775{
    822776    int addr_reg, data_reg, r0, r1, mem_index, s_bits, bswap, rexw;
     777    int32_t offset;
    823778#if defined(CONFIG_SOFTMMU)
    824779    uint8_t *label1_ptr, *label2_ptr;
     
    890845    }
    891846    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index);
    892 #ifndef VBOX
    893     tcg_out8(s, 0xe8);
    894     tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
    895               (tcg_target_long)s->code_ptr - 4);
    896 #else
    897     tcg_out_long_call(s, (tcg_target_long)qemu_st_helpers[s_bits]);
    898 #endif
     847    tcg_out_goto(s, 1, qemu_st_helpers[s_bits]);
    899848
    900849    /* jmp label2 */
     
    909858    tcg_out_modrm_offset(s, 0x03 | P_REXW, r0, r1, offsetof(CPUTLBEntry, addend) -
    910859                         offsetof(CPUTLBEntry, addr_write));
     860    offset = 0;
    911861#else
    912     r0 = addr_reg;
     862    if (GUEST_BASE == (int32_t)GUEST_BASE) {
     863        r0 = addr_reg;
     864        offset = GUEST_BASE;
     865    } else {
     866        offset = 0;
     867        /* movq $GUEST_BASE, r0 */
     868        tcg_out_opc(s, (0xb8 + (r0 & 7)) | P_REXW, 0, r0, 0);
     869        tcg_out32(s, GUEST_BASE);
     870        tcg_out32(s, GUEST_BASE >> 32);
     871        /* addq addr_reg, r0 */
     872        tcg_out_modrm(s, 0x01 | P_REXW, addr_reg, r0);
     873    }
    913874#endif
    914875
     
    922883    case 0:
    923884        /* movb */
    924         tcg_out_modrm_offset(s, 0x88 | P_REXB, data_reg, r0, 0);
     885        tcg_out_modrm_offset(s, 0x88 | P_REXB, data_reg, r0, offset);
    925886        break;
    926887    case 1:
     
    934895        /* movw */
    935896        tcg_out8(s, 0x66);
    936         tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
     897        tcg_out_modrm_offset(s, 0x89, data_reg, r0, offset);
    937898        break;
    938899    case 2:
     
    944905        }
    945906        /* movl */
    946         tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
     907        tcg_out_modrm_offset(s, 0x89, data_reg, r0, offset);
    947908        break;
    948909    case 3:
     
    954915        }
    955916        /* movq */
    956         tcg_out_modrm_offset(s, 0x89 | P_REXW, data_reg, r0, 0);
     917        tcg_out_modrm_offset(s, 0x89 | P_REXW, data_reg, r0, offset);
    957918        break;
    958919    default:
     
    977938    case INDEX_op_exit_tb:
    978939        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, args[0]);
    979 #ifndef VBOX
    980         tcg_out8(s, 0xe9); /* jmp tb_ret_addr */
    981         tcg_out32(s, tb_ret_addr - s->code_ptr - 4);
    982 #else
    983         tcg_out_long_jmp(s,  (tcg_target_long)tb_ret_addr);
    984 #endif
     940        tcg_out_goto(s, 0, tb_ret_addr);
    985941        break;
    986942    case INDEX_op_goto_tb:
     
    998954                                                   args[0]));
    999955#else
    1000             /** @todo: can we clobber RAX here? */
     956            /** @todo: can we clobber RAX here? */ /** @todo r=bird: I bet we cannot now... XXXX */
    1001957            tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_RAX,
    1002958                         (tcg_target_long)&(s->tb_next[args[0]]));
     
    1008964    case INDEX_op_call:
    1009965        if (const_args[0]) {
    1010 #ifndef VBOX
    1011             tcg_out8(s, 0xe8);
    1012             tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
    1013 #else
    1014             tcg_out_long_call(s, args[0]);
    1015 #endif
     966            tcg_out_goto(s, 1, (void *) args[0]);
    1016967        } else {
    1017968            tcg_out_modrm(s, 0xff, 2, args[0]);
     
    1020971    case INDEX_op_jmp:
    1021972        if (const_args[0]) {
    1022             tcg_out8(s, 0xe9);
    1023             tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
     973            tcg_out_goto(s, 0, (void *) args[0]);
    1024974        } else {
    1025975            tcg_out_modrm(s, 0xff, 4, args[0]);
     
    12891239        tcg_out_modrm(s, 0x63 | P_REXW, args[0], args[1]);
    12901240        break;
     1241    case INDEX_op_ext8u_i32:
     1242        tcg_out_modrm(s, 0xb6 | P_EXT | P_REXB, args[0], args[1]);
     1243        break;
     1244    case INDEX_op_ext16u_i32:
     1245        tcg_out_modrm(s, 0xb7 | P_EXT, args[0], args[1]);
     1246        break;
     1247    case INDEX_op_ext8u_i64:
     1248        tcg_out_modrm(s, 0xb6 | P_EXT | P_REXW, args[0], args[1]);
     1249        break;
     1250    case INDEX_op_ext16u_i64:
     1251        tcg_out_modrm(s, 0xb7 | P_EXT | P_REXW, args[0], args[1]);
     1252        break;
     1253    case INDEX_op_ext32u_i64:
     1254        tcg_out_modrm(s, 0x8b, args[0], args[1]);
     1255        break;
    12911256
    12921257    case INDEX_op_qemu_ld8u:
     
    14631428    { INDEX_op_ext16s_i64, { "r", "r"} },
    14641429    { INDEX_op_ext32s_i64, { "r", "r"} },
     1430    { INDEX_op_ext8u_i32, { "r", "r"} },
     1431    { INDEX_op_ext16u_i32, { "r", "r"} },
     1432    { INDEX_op_ext8u_i64, { "r", "r"} },
     1433    { INDEX_op_ext16u_i64, { "r", "r"} },
     1434    { INDEX_op_ext32u_i64, { "r", "r"} },
    14651435
    14661436    { INDEX_op_qemu_ld8u, { "r", "L" } },
     
    14751445    { INDEX_op_qemu_st16, { "L", "L" } },
    14761446    { INDEX_op_qemu_st32, { "L", "L" } },
    1477     { INDEX_op_qemu_st64, { "L", "L", "L" } },
     1447    { INDEX_op_qemu_st64, { "L", "L" } },
    14781448
    14791449    { -1 },
  • trunk/src/recompiler/tcg/x86_64/tcg-target.h

    r36175 r37675  
    7171#define TCG_TARGET_HAS_ext16s_i64
    7272#define TCG_TARGET_HAS_ext32s_i64
     73#define TCG_TARGET_HAS_ext8u_i32
     74#define TCG_TARGET_HAS_ext16u_i32
     75#define TCG_TARGET_HAS_ext8u_i64
     76#define TCG_TARGET_HAS_ext16u_i64
     77#define TCG_TARGET_HAS_ext32u_i64
     78
    7379#define TCG_TARGET_HAS_rot_i32
    7480#define TCG_TARGET_HAS_rot_i64
     81
     82#define TCG_TARGET_HAS_GUEST_BASE
    7583
    7684/* Note: must be synced with dyngen-exec.h */
  • trunk/src/recompiler/tests/sha1.c

    r36170 r37675  
    191191
    192192        for (j = 0; j < 4; t >>= 8, j++)
    193             *--fcp = (unsigned char) t
     193            *--fcp = (unsigned char) t;
    194194    }
    195195#else
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette