VirtualBox

Changeset 36140 in vbox for trunk/src/recompiler


Ignore:
Timestamp:
Mar 3, 2011 1:48:16 PM (14 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
70322
Message:

rem: Re-synced to svn://svn.savannah.nongnu.org/qemu/trunk@5495 (repo UUID c046a42c-6fe2-441c-8c8c-71466251a162).

Location:
trunk/src/recompiler
Files:
2 added
51 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/recompiler/Makefile.kmk

    r35572 r36140  
    6161 VBoxRemPrimary_DEFS.solaris  += __C99FEATURES__
    6262endif # win
    63 VBoxRemPrimary_DEFS           += IN_REM_R3 REM_INCLUDE_CPU_H
     63VBoxRemPrimary_DEFS           += IN_REM_R3 REM_INCLUDE_CPU_H NEED_CPU_H
    6464#VBoxRemPrimary_DEFS           += REM_PHYS_ADDR_IN_TLB
    6565#VBoxRemPrimary_DEFS           += DEBUG_ALL_LOGGING DEBUG_DISAS DEBUG_PCALL DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB  # Enables huge amounts of debug logging.
  • trunk/src/recompiler/VBoxRecompiler.c

    r36066 r36140  
    21002100        pVM->rem.s.Env.dr[i] = pCtx->dr[i];
    21012101
     2102#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
    21022103    /*
    21032104     * Clear the halted hidden flag (the interrupt waking up the CPU can
     
    21052106     */
    21062107    pVM->rem.s.Env.hflags      &= ~HF_HALTED_MASK;
     2108#endif
    21072109
    21082110    /*
  • trunk/src/recompiler/a.out.h

    r33540 r36140  
    2626  short f_magic;        /* magic number                 */
    2727  short f_nscns;        /* number of sections           */
    28   unsigned long f_timdat;       /* time & date stamp            */
    29   unsigned long f_symptr;       /* file pointer to symtab       */
    30   unsigned long f_nsyms;        /* number of symtab entries     */
     28  host_ulong f_timdat;  /* time & date stamp            */
     29  host_ulong f_symptr;  /* file pointer to symtab       */
     30  host_ulong f_nsyms;   /* number of symtab entries     */
    3131  short f_opthdr;       /* sizeof(optional hdr)         */
    3232  short f_flags;        /* flags                        */
     
    7373  unsigned short magic;         /* type of file                         */
    7474  unsigned short vstamp;        /* version stamp                        */
    75   unsigned long tsize;          /* text size in bytes, padded to FW bdry*/
    76   unsigned long dsize;          /* initialized data "  "                */
    77   unsigned long bsize;          /* uninitialized data "   "             */
    78   unsigned long entry;          /* entry pt.                            */
    79   unsigned long text_start;     /* base of text used for this file */
    80   unsigned long data_start;     /* base of data used for this file=
     75  host_ulong    tsize;          /* text size in bytes, padded to FW bdry*/
     76  host_ulong    dsize;          /* initialized data "  "                */
     77  host_ulong    bsize;          /* uninitialized data "   "             */
     78  host_ulong    entry;          /* entry pt.                            */
     79  host_ulong text_start;        /* base of text used for this file */
     80  host_ulong data_start;        /* base of data used for this file=
    8181 */
    8282}
     
    104104struct external_scnhdr {
    105105  char          s_name[8];      /* section name                 */
    106   unsigned long s_paddr;        /* physical address, offset
     106  host_ulong    s_paddr;        /* physical address, offset
    107107                                   of last addr in scn */
    108   unsigned long s_vaddr;        /* virtual address              */
    109   unsigned long s_size;         /* section size                 */
    110   unsigned long s_scnptr;       /* file ptr to raw data for section */
    111   unsigned long s_relptr;       /* file ptr to relocation       */
    112   unsigned long s_lnnoptr;      /* file ptr to line numbers     */
     108  host_ulong    s_vaddr;        /* virtual address              */
     109  host_ulong    s_size;         /* section size                 */
     110  host_ulong    s_scnptr;       /* file ptr to raw data for section */
     111  host_ulong    s_relptr;       /* file ptr to relocation       */
     112  host_ulong    s_lnnoptr;      /* file ptr to line numbers     */
    113113  unsigned short s_nreloc;      /* number of relocation entries */
    114114  unsigned short s_nlnno;       /* number of line number entries*/
    115   unsigned long s_flags;        /* flags                        */
     115  host_ulong    s_flags;        /* flags                        */
    116116};
    117117
     
    137137struct external_lineno {
    138138  union {
    139     unsigned long l_symndx; /* function name symbol index, iff l_lnno 0 */
    140     unsigned long l_paddr;      /* (physical) address of line number    */
     139    host_ulong l_symndx; /* function name symbol index, iff l_lnno 0 */
     140    host_ulong l_paddr; /* (physical) address of line number    */
    141141  } l_addr;
    142142  unsigned short l_lnno;        /* line number          */
     
    157157    char e_name[E_SYMNMLEN];
    158158    struct {
    159       unsigned long e_zeroes;
    160       unsigned long e_offset;
     159      host_ulong e_zeroes;
     160      host_ulong e_offset;
    161161    } e;
    162162  } e;
    163   unsigned long e_value;
     163  host_ulong e_value;
    164164  unsigned short e_scnum;
    165165  unsigned short e_type;
     
    175175union external_auxent {
    176176  struct {
    177     unsigned long x_tagndx;     /* str, un, or enum tag indx */
     177    host_ulong x_tagndx;        /* str, un, or enum tag indx */
    178178    union {
    179179      struct {
     
    181181        unsigned short  x_size; /* str/union/array size */
    182182      } x_lnsz;
    183       unsigned long x_fsize;    /* size of function */
     183      host_ulong x_fsize;       /* size of function */
    184184    } x_misc;
    185185    union {
    186186      struct {                  /* if ISFCN, tag, or .bb */
    187         unsigned long x_lnnoptr;/* ptr to fcn line # */
    188         unsigned long x_endndx; /* entry ndx past block end */
     187        host_ulong x_lnnoptr;/* ptr to fcn line # */
     188        host_ulong x_endndx;    /* entry ndx past block end */
    189189      } x_fcn;
    190190      struct {                  /* if ISARY, up to 4 dimen. */
     
    198198    char x_fname[E_FILNMLEN];
    199199    struct {
    200       unsigned long x_zeroes;
    201       unsigned long x_offset;
     200      host_ulong x_zeroes;
     201      host_ulong x_offset;
    202202    } x_n;
    203203  } x_file;
    204204
    205205  struct {
    206     unsigned long x_scnlen;     /* section length */
     206    host_ulong x_scnlen;        /* section length */
    207207    unsigned short x_nreloc;    /* # relocation entries */
    208208    unsigned short x_nlinno;    /* # line numbers */
    209     unsigned long x_checksum;   /* section COMDAT checksum */
     209    host_ulong x_checksum;      /* section COMDAT checksum */
    210210    unsigned short x_associated;/* COMDAT associated section index */
    211211    char x_comdat[1];           /* COMDAT selection number */
     
    213213
    214214  struct {
    215     unsigned long x_tvfill;     /* tv fill value */
     215    host_ulong x_tvfill;        /* tv fill value */
    216216    unsigned short x_tvlen;     /* length of .tv */
    217217    char x_tvran[2][2];         /* tv range */
     
    298298
    299299#define IMAGE_SCN_LNK_NRELOC_OVFL            0x01000000  /* Section contains extended relocations. */
    300 #define IMAGE_SCN_MEM_NOT_CACHED             0x04000000  /* Section is not cacheable.              */
     300#define IMAGE_SCN_MEM_NOT_CACHED             0x04000000  /* Section is not cachable.               */
    301301#define IMAGE_SCN_MEM_NOT_PAGED              0x08000000  /* Section is not pageable.               */
    302302#define IMAGE_SCN_MEM_SHARED                 0x10000000  /* Section is shareable.                  */
     
    345345  unsigned short e_oeminfo;     /* OEM information; e_oemid specific, 0x0 */
    346346  char e_res2[10][2];           /* Reserved words, all 0x0 */
    347   unsigned long e_lfanew;       /* File address of new exe header, 0x80 */
     347  host_ulong e_lfanew;  /* File address of new exe header, 0x80 */
    348348  char dos_message[16][4];      /* other stuff, always follow DOS header */
    349349  unsigned int nt_signature;    /* required NT signature, 0x4550 */
     
    353353  unsigned short f_magic;       /* magic number                 */
    354354  unsigned short f_nscns;       /* number of sections           */
    355   unsigned long f_timdat;       /* time & date stamp            */
    356   unsigned long f_symptr;       /* file pointer to symtab       */
    357   unsigned long f_nsyms;        /* number of symtab entries     */
     355  host_ulong f_timdat;  /* time & date stamp            */
     356  host_ulong f_symptr;  /* file pointer to symtab       */
     357  host_ulong f_nsyms;   /* number of symtab entries     */
    358358  unsigned short f_opthdr;      /* sizeof(optional hdr)         */
    359359  unsigned short f_flags;       /* flags                        */
     
    371371  unsigned short magic;         /* type of file                         */
    372372  unsigned short vstamp;        /* version stamp                        */
    373   unsigned long tsize;          /* text size in bytes, padded to FW bdry*/
    374   unsigned long dsize;          /* initialized data "  "                */
    375   unsigned long bsize;          /* uninitialized data "   "             */
    376   unsigned long entry;          /* entry pt.                            */
    377   unsigned long text_start;     /* base of text used for this file */
    378   unsigned long data_start;     /* base of all data used for this file */
     373  host_ulong    tsize;          /* text size in bytes, padded to FW bdry*/
     374  host_ulong    dsize;          /* initialized data "  "                */
     375  host_ulong    bsize;          /* uninitialized data "   "             */
     376  host_ulong    entry;          /* entry pt.                            */
     377  host_ulong text_start;        /* base of text used for this file */
     378  host_ulong data_start;        /* base of all data used for this file */
    379379
    380380  /* NT extra fields; see internal.h for descriptions */
    381   unsigned long  ImageBase;
    382   unsigned long  SectionAlignment;
    383   unsigned long  FileAlignment;
     381  host_ulong  ImageBase;
     382  host_ulong  SectionAlignment;
     383  host_ulong  FileAlignment;
    384384  unsigned short  MajorOperatingSystemVersion;
    385385  unsigned short  MinorOperatingSystemVersion;
     
    389389  unsigned short  MinorSubsystemVersion;
    390390  char  Reserved1[4];
    391   unsigned long  SizeOfImage;
    392   unsigned long  SizeOfHeaders;
    393   unsigned long  CheckSum;
     391  host_ulong  SizeOfImage;
     392  host_ulong  SizeOfHeaders;
     393  host_ulong  CheckSum;
    394394  unsigned short Subsystem;
    395395  unsigned short DllCharacteristics;
    396   unsigned long  SizeOfStackReserve;
    397   unsigned long  SizeOfStackCommit;
    398   unsigned long  SizeOfHeapReserve;
    399   unsigned long  SizeOfHeapCommit;
    400   unsigned long  LoaderFlags;
    401   unsigned long  NumberOfRvaAndSizes;
     396  host_ulong  SizeOfStackReserve;
     397  host_ulong  SizeOfStackCommit;
     398  host_ulong  SizeOfHeapReserve;
     399  host_ulong  SizeOfHeapCommit;
     400  host_ulong  LoaderFlags;
     401  host_ulong  NumberOfRvaAndSizes;
    402402  /* IMAGE_DATA_DIRECTORY DataDirectory[IMAGE_NUMBEROF_DIRECTORY_ENTRIES]; */
    403403  char  DataDirectory[16][2][4]; /* 16 entries, 2 elements/entry, 4 chars */
  • trunk/src/recompiler/bswap.h

    r36125 r36140  
    139139#define le16_to_cpupu(p) le16_to_cpup(p)
    140140#define le32_to_cpupu(p) le32_to_cpup(p)
     141#define be32_to_cpupu(p) be32_to_cpup(p)
    141142
    142143#define cpu_to_be16wu(p, v) cpu_to_be16w(p, v)
     
    175176}
    176177
     178static inline uint32_t be32_to_cpupu(const uint32_t *p)
     179{
     180    const uint8_t *p1 = (const uint8_t *)p;
     181    return p1[3] | (p1[2] << 8) | (p1[1] << 16) | (p1[0] << 24);
     182}
     183
    177184static inline void cpu_to_be16wu(uint16_t *p, uint16_t v)
    178185{
  • trunk/src/recompiler/cpu-all.h

    r36125 r36140  
    3939#endif
    4040
    41 #if defined(__arm__) || defined(__sparc__)
     41#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__)
    4242#define WORDS_ALIGNED
    4343#endif
     
    5757
    5858#include "bswap.h"
     59#include "softfloat.h"
    5960
    6061#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
     
    600601    uint32_t a,b;
    601602    a = ldl_be_p(ptr);
    602     b = ldl_be_p((uint8_t*)ptr+4);
     603    b = ldl_be_p((uint8_t *)ptr + 4);
    603604    return (((uint64_t)a<<32)|b);
    604605}
     
    637638{
    638639    stl_be_p(ptr, v >> 32);
    639     stl_be_p((uint8_t*)ptr + 4, v);
     640    stl_be_p((uint8_t *)ptr + 4, v);
    640641}
    641642
    642643/* float access */
     644
    643645static inline float32 ldfl_be_p(void *ptr)
    644646{
     
    665667    CPU_DoubleU u;
    666668    u.l.upper = ldl_be_p(ptr);
    667     u.l.lower = ldl_be_p((uint8_t*)ptr + 4);
     669    u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
    668670    return u.d;
    669671}
     
    674676    u.d = v;
    675677    stl_be_p(ptr, u.l.upper);
    676     stl_be_p((uint8_t*)ptr + 4, u.l.lower);
     678    stl_be_p((uint8_t *)ptr + 4, u.l.lower);
    677679}
    678680
     
    776778/* All direct uses of g2h and h2g need to go away for usermode softmmu.  */
    777779#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
    778 #define h2g(x) ((target_ulong)(x - GUEST_BASE))
     780#define h2g(x) ((target_ulong)((unsigned long)(x) - GUEST_BASE))
     781
    779782#define saddr(x) g2h(x)
    780783#define laddr(x) g2h(x)
     
    826829#define ldsw_code(p) ldsw_raw(p)
    827830#define ldl_code(p) ldl_raw(p)
     831#define ldq_code(p) ldq_raw(p)
    828832
    829833#define ldub_kernel(p) ldub_raw(p)
     
    832836#define ldsw_kernel(p) ldsw_raw(p)
    833837#define ldl_kernel(p) ldl_raw(p)
     838#define ldq_kernel(p) ldq_raw(p)
    834839#define ldfl_kernel(p) ldfl_raw(p)
    835840#define ldfq_kernel(p) ldfq_raw(p)
     
    874879void page_unprotect_range(target_ulong data, target_ulong data_size);
    875880
     881#if 0 /* bird: Not there in the code I'm looking at. */
    876882#define SINGLE_CPU_DEFINES
    877883#ifdef SINGLE_CPU_DEFINES
     
    937943
    938944#endif /* SINGLE_CPU_DEFINES */
     945#endif /* bird: removed? */
    939946
    940947void cpu_dump_state(CPUState *env, FILE *f,
    941948                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
    942949                    int flags);
    943 
    944 DECLNORETURN(void) cpu_abort(CPUState *env, const char *fmt, ...);
     950void cpu_dump_statistics (CPUState *env, FILE *f,
     951                          int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
     952                          int flags);
     953
     954void cpu_abort(CPUState *env, const char *fmt, ...)
     955    __attribute__ ((__format__ (__printf__, 2, 3)))
     956    __attribute__ ((__noreturn__));
    945957extern CPUState *first_cpu;
    946958extern CPUState *cpu_single_env;
     
    955967#define CPU_INTERRUPT_HALT   0x20 /* CPU halt wanted */
    956968#define CPU_INTERRUPT_SMI    0x40 /* (x86 only) SMI interrupt pending */
    957 #define CPU_INTERRUPT_DEBUG  0x80 /* Debug event occurred.  */
     969#define CPU_INTERRUPT_DEBUG  0x80 /* Debug event occured.  */
    958970#define CPU_INTERRUPT_VIRQ   0x100 /* virtual interrupt pending.  */
    959971#define CPU_INTERRUPT_NMI    0x200 /* NMI pending. */
     
    961973#ifdef VBOX
    962974/** Executes a single instruction. cpu_exec() will normally return EXCP_SINGLE_INSTR. */
    963 #define CPU_INTERRUPT_SINGLE_INSTR              0x0400
     975# define CPU_INTERRUPT_SINGLE_INSTR             0x0400
    964976/** Executing a CPU_INTERRUPT_SINGLE_INSTR request, quit the cpu_loop. (for exceptions and suchlike) */
    965 #define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT    0x0800
     977# define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT   0x0800
    966978/** VM execution was interrupted by VMR3Reset, VMR3Suspend or VMR3PowerOff. */
    967 #define CPU_INTERRUPT_RC                        0x1000
     979# define CPU_INTERRUPT_RC                       0x1000
    968980/** Exit current TB to process an external interrupt request (also in op.c!!) */
    969 #define CPU_INTERRUPT_EXTERNAL_EXIT             0x2000
     981# define CPU_INTERRUPT_EXTERNAL_EXIT            0x2000
    970982/** Exit current TB to process an external interrupt request (also in op.c!!) */
    971 #define CPU_INTERRUPT_EXTERNAL_HARD             0x4000
     983# define CPU_INTERRUPT_EXTERNAL_HARD            0x4000
    972984/** Exit current TB to process an external interrupt request (also in op.c!!) */
    973 #define CPU_INTERRUPT_EXTERNAL_TIMER            0x8000
     985# define CPU_INTERRUPT_EXTERNAL_TIMER           0x8000
    974986/** Exit current TB to process an external interrupt request (also in op.c!!) */
    975 #define CPU_INTERRUPT_EXTERNAL_DMA              0x10000
     987# define CPU_INTERRUPT_EXTERNAL_DMA             0x10000
    976988#endif /* VBOX */
    977989void cpu_interrupt(CPUState *s, int mask);
     
    10141026} CPULogItem;
    10151027
    1016 extern CPULogItem cpu_log_items[];
     1028extern const CPULogItem cpu_log_items[];
    10171029
    10181030void cpu_set_log(int log_flags);
     
    10431055
    10441056#ifndef VBOX
    1045 extern int phys_ram_size;
     1057extern ram_addr_t phys_ram_size;
    10461058extern int phys_ram_fd;
    1047 extern int phys_ram_size;
     1059extern uint8_t *phys_ram_base;
     1060extern uint8_t *phys_ram_dirty;
     1061extern ram_addr_t ram_size;
    10481062#else /* VBOX */
    10491063extern RTGCPHYS phys_ram_size;
    10501064/** This is required for bounds checking the phys_ram_dirty accesses. */
    10511065extern RTGCPHYS phys_ram_dirty_size;
     1066extern uint8_t *phys_ram_dirty;
    10521067#endif /* VBOX */
    1053 #if !defined(VBOX)
    1054 extern uint8_t *phys_ram_base;
    1055 #endif
    1056 extern uint8_t *phys_ram_dirty;
    10571068
    10581069/* physical memory access */
     
    10911102                                  ram_addr_t size,
    10921103                                  ram_addr_t phys_offset);
    1093 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr);
     1104ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
    10941105ram_addr_t qemu_ram_alloc(ram_addr_t);
    10951106void qemu_ram_free(ram_addr_t addr);
     
    11291140                        uint8_t *buf, int len, int is_write);
    11301141
    1131 #define VGA_DIRTY_FLAG  0x01
    1132 #define CODE_DIRTY_FLAG 0x02
     1142#define VGA_DIRTY_FLAG       0x01
     1143#define CODE_DIRTY_FLAG      0x02
    11331144#define KQEMU_DIRTY_FLAG     0x04
    11341145#define MIGRATION_DIRTY_FLAG 0x08
     
    12051216/* host CPU ticks (if available) */
    12061217
    1207 #ifdef VBOX
    1208 # include <iprt/asm-amd64-x86.h>
    1209 
    1210 DECLINLINE(int64_t) cpu_get_real_ticks(void)
    1211 {
    1212     return ASMReadTSC();
    1213 }
    1214 
    1215 #elif defined(__powerpc__)
     1218#if defined(__powerpc__)
    12161219
    12171220static inline uint32_t get_tbl(void)
     
    12631266}
    12641267
     1268#elif defined(__hppa__)
     1269
     1270static inline int64_t cpu_get_real_ticks(void)
     1271{
     1272    int val;
     1273    asm volatile ("mfctl %%cr16, %0" : "=r"(val));
     1274    return val;
     1275}
     1276
    12651277#elif defined(__ia64)
    12661278
     
    12811293}
    12821294
    1283 #elif defined(__sparc_v9__)
     1295#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
    12841296
    12851297static inline int64_t cpu_get_real_ticks (void)
     
    13021314#endif
    13031315}
     1316
     1317#elif defined(__mips__)
     1318
     1319static inline int64_t cpu_get_real_ticks(void)
     1320{
     1321#if __mips_isa_rev >= 2
     1322    uint32_t count;
     1323    static uint32_t cyc_per_count = 0;
     1324
     1325    if (!cyc_per_count)
     1326        __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));
     1327
     1328    __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));
     1329    return (int64_t)(count * cyc_per_count);
     1330#else
     1331    /* FIXME */
     1332    static int64_t ticks = 0;
     1333    return ticks++;
     1334#endif
     1335}
     1336
    13041337#else
    13051338/* The host CPU doesn't have an easily accessible cycle counter.
    1306    Just return a monotonically increasing vlue.  This will be totally wrong,
    1307    but hopefully better than nothing.  */
     1339   Just return a monotonically increasing value.  This will be
     1340   totally wrong, but hopefully better than nothing.  */
    13081341static inline int64_t cpu_get_real_ticks (void)
    13091342{
  • trunk/src/recompiler/cpu-defs.h

    r33656 r36140  
    3131#define CPU_DEFS_H
    3232
     33#ifndef NEED_CPU_H
     34#error cpu.h included from common code
     35#endif
     36
    3337#include "config.h"
    3438#include <setjmp.h>
    35 #ifndef VBOX
    3639#include <inttypes.h>
    37 #endif
    3840#include "osdep.h"
    3941
     
    8789#define HOST_LONG_SIZE (HOST_LONG_BITS / 8)
    8890
    89 #define EXCP_INTERRUPT  0x10000 /* async interruption */
     91#define EXCP_INTERRUPT  0x10000 /* async interruption */
    9092#define EXCP_HLT        0x10001 /* hlt instruction reached */
    9193#define EXCP_DEBUG      0x10002 /* cpu stopped after a breakpoint or singlestep */
     
    130132    target_ulong addr_write;
    131133    target_ulong addr_code;
    132       /* Addend to virtual address to get physical address.  IO accesses
    133        use the corresponding iotlb value.  */
     134    /* Addend to virtual address to get physical address.  IO accesses
     135       use the correcponding iotlb value.  */
    134136#if TARGET_PHYS_ADDR_BITS == 64
    135137    /* on i386 Linux make sure it is aligned */
     
    157159#endif
    158160
    159 
    160161#define CPU_TEMP_BUF_NLONGS 128
    161 
    162162#define CPU_COMMON                                                      \
    163163    struct TranslationBlock *current_tb; /* currently executing TB  */  \
     
    204204    int watchpoint_hit;                                                 \
    205205                                                                        \
     206    struct GDBRegisterState *gdb_regs;                                  \
     207                                                                        \
    206208    /* Core interrupt code */                                           \
    207209    jmp_buf jmp_env;                                                    \
  • trunk/src/recompiler/cpu-exec.c

    r36125 r36140  
    5858//#define DEBUG_EXEC
    5959//#define DEBUG_SIGNAL
    60 
    6160
    6261void cpu_loop_exit(void)
     
    800799                       CPU tries to execute code at the magic address.
    801800                       This will cause the magic PC value to be pushed to
    802                        the stack if an interrupt occurred at the wrong time.
     801                       the stack if an interrupt occured at the wrong time.
    803802                       We avoid this by disabling interrupts when
    804803                       pc contains a magic address.  */
     
    847846                    }
    848847#endif
    849                    /* Don't use the cached interrupt_request value,
     848                   /* Don't use the cached interupt_request value,
    850849                      do_interrupt may have updated the EXITTB flag. */
    851850                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
     
    10081007    return ret;
    10091008}
     1009
    10101010#endif /* !VBOX */
    10111011
     
    10361036                               (selector << 4), 0xffff, 0);
    10371037    } else {
    1038         load_seg(seg_reg, selector);
     1038        helper_load_seg(seg_reg, selector);
    10391039    }
    10401040    env = saved_env;
    10411041}
    10421042
    1043 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
     1043void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
    10441044{
    10451045    CPUX86State *saved_env;
     
    10481048    env = s;
    10491049
    1050     helper_fsave((target_ulong)ptr, data32);
     1050    helper_fsave(ptr, data32);
    10511051
    10521052    env = saved_env;
    10531053}
    10541054
    1055 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
     1055void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
    10561056{
    10571057    CPUX86State *saved_env;
     
    10601060    env = s;
    10611061
    1062     helper_frstor((target_ulong)ptr, data32);
     1062    helper_frstor(ptr, data32);
    10631063
    10641064    env = saved_env;
     
    10941094
    10951095    /* see if it is an MMU fault */
    1096     ret = cpu_x86_handle_mmu_fault(env, address, is_write,
    1097                                    ((env->hflags & HF_CPL_MASK) == 3), 0);
     1096    ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    10981097    if (ret < 0)
    10991098        return 0; /* not an MMU fault */
     
    11441143    }
    11451144    /* see if it is an MMU fault */
    1146     ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
     1145    ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    11471146    if (ret < 0)
    11481147        return 0; /* not an MMU fault */
     
    11601159    sigprocmask(SIG_SETMASK, old_set, NULL);
    11611160    cpu_loop_exit();
     1161    /* never comes here */
     1162    return 1;
    11621163}
    11631164#elif defined(TARGET_SPARC)
     
    11801181    }
    11811182    /* see if it is an MMU fault */
    1182     ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
     1183    ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    11831184    if (ret < 0)
    11841185        return 0; /* not an MMU fault */
     
    11961197    sigprocmask(SIG_SETMASK, old_set, NULL);
    11971198    cpu_loop_exit();
     1199    /* never comes here */
     1200    return 1;
    11981201}
    11991202#elif defined (TARGET_PPC)
     
    12171220
    12181221    /* see if it is an MMU fault */
    1219     ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
     1222    ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    12201223    if (ret < 0)
    12211224        return 0; /* not an MMU fault */
     
    12661269    }
    12671270    /* see if it is an MMU fault */
    1268     ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
     1271    ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    12691272    if (ret < 0)
    12701273        return 0; /* not an MMU fault */
     
    13061309
    13071310    /* see if it is an MMU fault */
    1308     ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
     1311    ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    13091312    if (ret < 0)
    13101313        return 0; /* not an MMU fault */
     
    13211324    if (ret == 1) {
    13221325#if 0
    1323         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
    1324                env->nip, env->error_code, tb);
     1326        printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
     1327               env->PC, env->error_code, tb);
    13251328#endif
    13261329    /* we restore the process signal mask as the sigreturn should
     
    13561359
    13571360    /* see if it is an MMU fault */
    1358     ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
     1361    ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    13591362    if (ret < 0)
    13601363        return 0; /* not an MMU fault */
     
    13801383    return 1;
    13811384}
     1385
     1386#elif defined (TARGET_ALPHA)
     1387static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
     1388                                    int is_write, sigset_t *old_set,
     1389                                    void *puc)
     1390{
     1391    TranslationBlock *tb;
     1392    int ret;
     1393
     1394    if (cpu_single_env)
     1395        env = cpu_single_env; /* XXX: find a correct solution for multithread */
     1396#if defined(DEBUG_SIGNAL)
     1397    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
     1398           pc, address, is_write, *(unsigned long *)old_set);
     1399#endif
     1400    /* XXX: locking issue */
     1401    if (is_write && page_unprotect(h2g(address), pc, puc)) {
     1402        return 1;
     1403    }
     1404
     1405    /* see if it is an MMU fault */
     1406    ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
     1407    if (ret < 0)
     1408        return 0; /* not an MMU fault */
     1409    if (ret == 0)
     1410        return 1; /* the MMU fault was handled without causing real CPU fault */
     1411
     1412    /* now we have a real cpu fault */
     1413    tb = tb_find_pc(pc);
     1414    if (tb) {
     1415        /* the PC is inside the translated code. It means that we have
     1416           a virtual CPU fault */
     1417        cpu_restore_state(tb, env, pc, puc);
     1418    }
     1419#if 0
     1420        printf("PF exception: NIP=0x%08x error=0x%x %p\n",
     1421               env->nip, env->error_code, tb);
     1422#endif
     1423    /* we restore the process signal mask as the sigreturn should
     1424       do it (XXX: use sigsetjmp) */
     1425    sigprocmask(SIG_SETMASK, old_set, NULL);
     1426    cpu_loop_exit();
     1427    /* never comes here */
     1428    return 1;
     1429}
     1430#elif defined (TARGET_CRIS)
     1431static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
     1432                                    int is_write, sigset_t *old_set,
     1433                                    void *puc)
     1434{
     1435    TranslationBlock *tb;
     1436    int ret;
     1437
     1438    if (cpu_single_env)
     1439        env = cpu_single_env; /* XXX: find a correct solution for multithread */
     1440#if defined(DEBUG_SIGNAL)
     1441    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
     1442           pc, address, is_write, *(unsigned long *)old_set);
     1443#endif
     1444    /* XXX: locking issue */
     1445    if (is_write && page_unprotect(h2g(address), pc, puc)) {
     1446        return 1;
     1447    }
     1448
     1449    /* see if it is an MMU fault */
     1450    ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
     1451    if (ret < 0)
     1452        return 0; /* not an MMU fault */
     1453    if (ret == 0)
     1454        return 1; /* the MMU fault was handled without causing real CPU fault */
     1455
     1456    /* now we have a real cpu fault */
     1457    tb = tb_find_pc(pc);
     1458    if (tb) {
     1459        /* the PC is inside the translated code. It means that we have
     1460           a virtual CPU fault */
     1461        cpu_restore_state(tb, env, pc, puc);
     1462    }
     1463    /* we restore the process signal mask as the sigreturn should
     1464       do it (XXX: use sigsetjmp) */
     1465    sigprocmask(SIG_SETMASK, old_set, NULL);
     1466    cpu_loop_exit();
     1467    /* never comes here */
     1468    return 1;
     1469}
     1470
    13821471#else
    13831472#error unsupported target CPU
     
    14121501#define REG_TRAPNO TRAPNO
    14131502#endif
    1414     pc = uc->uc_mcontext.gregs[REG_EIP];
    1415     trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
    1416 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
    1417     if (trapno == 0x00 || trapno == 0x05) {
    1418         /* send division by zero or bound exception */
    1419         cpu_send_trap(pc, trapno, uc);
    1420         return 1;
    1421     } else
    1422 #endif
    1423         return handle_cpu_signal(pc, (unsigned long)info->si_addr,
    1424                                  trapno == 0xe ?
    1425                                  (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
    1426                                  &uc->uc_sigmask, puc);
     1503    pc = EIP_sig(uc);
     1504    trapno = TRAP_sig(uc);
     1505    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
     1506                             trapno == 0xe ?
     1507                             (ERROR_sig(uc) >> 1) & 1 : 0,
     1508                             &uc->uc_sigmask, puc);
    14271509}
    14281510
     
    15521634{
    15531635    siginfo_t *info = pinfo;
     1636    int is_write;
     1637    uint32_t insn;
     1638#if !defined(__arch64__) || defined(HOST_SOLARIS)
    15541639    uint32_t *regs = (uint32_t *)(info + 1);
    15551640    void *sigmask = (regs + 20);
    1556     unsigned long pc;
    1557     int is_write;
    1558     uint32_t insn;
    1559 
    15601641    /* XXX: is there a standard glibc define ? */
    1561     pc = regs[1];
     1642    unsigned long pc = regs[1];
     1643#else
     1644    struct sigcontext *sc = puc;
     1645    unsigned long pc = sc->sigc_regs.tpc;
     1646    void *sigmask = (void *)sc->sigc_mask;
     1647#endif
     1648
    15621649    /* XXX: need kernel patch to get write flag faster */
    15631650    is_write = 0;
     
    15901677    int is_write;
    15911678
     1679#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
    15921680    pc = uc->uc_mcontext.gregs[R15];
     1681#else
     1682    pc = uc->uc_mcontext.arm_pc;
     1683#endif
    15931684    /* XXX: compute is_write */
    15941685    is_write = 0;
     
    16641755    is_write = 0;
    16651756    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
     1757                             is_write, &uc->uc_sigmask, puc);
     1758}
     1759
     1760#elif defined(__mips__)
     1761
     1762int cpu_signal_handler(int host_signum, void *pinfo,
     1763                       void *puc)
     1764{
     1765    siginfo_t *info = pinfo;
     1766    struct ucontext *uc = puc;
     1767    greg_t pc = uc->uc_mcontext.pc;
     1768    int is_write;
     1769
     1770    /* XXX: compute is_write */
     1771    is_write = 0;
     1772    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
     1773                             is_write, &uc->uc_sigmask, puc);
     1774}
     1775
     1776#elif defined(__hppa__)
     1777
     1778int cpu_signal_handler(int host_signum, void *pinfo,
     1779                       void *puc)
     1780{
     1781    struct siginfo *info = pinfo;
     1782    struct ucontext *uc = puc;
     1783    unsigned long pc;
     1784    int is_write;
     1785
     1786    pc = uc->uc_mcontext.sc_iaoq[0];
     1787    /* FIXME: compute is_write */
     1788    is_write = 0;
     1789    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
    16661790                             is_write,
    16671791                             &uc->uc_sigmask, puc);
  • trunk/src/recompiler/cutils.c

    r26499 r36140  
    514514}
    515515
    516 #endif
     516#endif /* VBOX */
     517
    517518void pstrcpy(char *buf, int buf_size, const char *str)
    518519{
     
    588589    return t;
    589590}
    590 #endif
     591#endif /* VBOX */
  • trunk/src/recompiler/dyngen-exec.h

    r33656 r36140  
    4747#ifndef VBOX
    4848
     49#ifdef __OpenBSD__
     50#include <sys/types.h>
     51#else
    4952typedef unsigned char uint8_t;
    5053typedef unsigned short uint16_t;
    5154typedef unsigned int uint32_t;
    52 /* Linux/Sparc64 defines uint64_t */
    53 #if !(defined (__sparc_v9__) && defined(__linux__))
     55// Linux/Sparc64 defines uint64_t
     56#if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__))
    5457/* XXX may be done for all 64 bits targets ? */
    55 #if defined (__x86_64__) || defined(__ia64)
     58#if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(__powerpc64__)
    5659typedef unsigned long uint64_t;
    5760#else
     
    6164
    6265/* if Solaris/__sun__, don't typedef int8_t, as it will be typedef'd
    63    prior to this and will cause an error in compilation, conflicting
     66   prior to this and will cause an error in compliation, conflicting
    6467   with /usr/include/sys/int_types.h, line 75 */
    6568#ifndef __sun__
     
    6972typedef signed int int32_t;
    7073// Linux/Sparc64 defines int64_t
    71 #if !(defined (__sparc_v9__) && defined(__linux__))
    72 #if defined (__x86_64__) || defined(__ia64)
     74#if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__))
     75#if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(__powerpc64__)
    7376typedef signed long int64_t;
    7477#else
    7578typedef signed long long int64_t;
     79#endif
    7680#endif
    7781#endif
     
    114118#endif /* VBOX */
    115119
    116 #ifdef __i386__
     120#if defined(__i386__)
    117121#ifndef VBOX
    118122#define AREG0 "ebp"
     
    120124#define AREG2 "esi"
    121125#define AREG3 "edi"
    122 #else
    123 #define AREG0 "esi"
    124 #define AREG1 "edi"
    125 #endif
    126 #endif
    127 #ifdef __x86_64__
    128 #if defined(VBOX)
    129 /* Must be in sync with TCG register notion, see tcg-target.h */
    130 #endif
     126#else  /* VBOX - why are we different? */
     127# define AREG0 "esi"
     128# define AREG1 "edi"
     129#endif /* VBOX */
     130#elif defined(__x86_64__)
    131131#define AREG0 "r14"
    132132#define AREG1 "r15"
    133133#define AREG2 "r12"
    134134#define AREG3 "r13"
    135 #endif
    136 #ifdef __powerpc__
     135//#define AREG4 "rbp"
     136//#define AREG5 "rbx"
     137#elif defined(__powerpc__)
    137138#define AREG0 "r27"
    138139#define AREG1 "r24"
     
    150151#define AREG11 "r23"
    151152#endif
    152 #define USE_INT_TO_FLOAT_HELPERS
    153 #define BUGGY_GCC_DIV64
    154 #endif
    155 #ifdef __arm__
     153#elif defined(__arm__)
    156154#define AREG0 "r7"
    157155#define AREG1 "r4"
    158156#define AREG2 "r5"
    159157#define AREG3 "r6"
    160 #endif
    161 #ifdef __mips__
    162 #define AREG0 "s3"
     158#elif defined(__hppa__)
     159#define AREG0 "r17"
     160#define AREG1 "r14"
     161#define AREG2 "r15"
     162#define AREG3 "r16"
     163#elif defined(__mips__)
     164#define AREG0 "fp"
    163165#define AREG1 "s0"
    164166#define AREG2 "s1"
    165167#define AREG3 "s2"
    166 #endif
    167 #ifdef __sparc__
     168#define AREG4 "s3"
     169#define AREG5 "s4"
     170#define AREG6 "s5"
     171#define AREG7 "s6"
     172#define AREG8 "s7"
     173#elif defined(__sparc__)
    168174#ifdef HOST_SOLARIS
    169175#define AREG0 "g2"
     
    174180#else
    175181#ifdef __sparc_v9__
    176 #define AREG0 "g1"
    177 #define AREG1 "g4"
    178 #define AREG2 "g5"
    179 #define AREG3 "g7"
     182#define AREG0 "g5"
     183#define AREG1 "g6"
     184#define AREG2 "g7"
    180185#else
    181186#define AREG0 "g6"
     
    193198#endif
    194199#endif
    195 #define USE_FP_CONVERT
    196 #endif
    197 #ifdef __s390__
     200#elif defined(__s390__)
    198201#define AREG0 "r10"
    199202#define AREG1 "r7"
    200203#define AREG2 "r8"
    201204#define AREG3 "r9"
    202 #endif
    203 #ifdef __alpha__
     205#elif defined(__alpha__)
    204206/* Note $15 is the frame pointer, so anything in op-i386.c that would
    205207   require a frame pointer, like alloca, would probably loose.  */
     
    211213#define AREG5 "$13"
    212214#define AREG6 "$14"
    213 #endif
    214 #ifdef __mc68000
     215#elif defined(__mc68000)
    215216#define AREG0 "%a5"
    216217#define AREG1 "%a4"
     
    218219#define AREG3 "%d6"
    219220#define AREG4 "%d5"
    220 #endif
    221 #ifdef __ia64__
     221#elif defined(__ia64__)
    222222#define AREG0 "r7"
    223223#define AREG1 "r4"
    224224#define AREG2 "r5"
    225225#define AREG3 "r6"
    226 #endif
    227 
    228 #ifndef VBOX
     226#else
     227#error unsupported CPU
     228#endif
     229
     230#ifndef VBOX /* WHY DO WE UNSUBSCRIBE TO THIS MACRO? */
    229231/* force GCC to generate only one epilog at the end of the function */
    230232#define FORCE_RET() __asm__ __volatile__("" : : : "memory");
     
    259261#define PARAM2 ({ int _r; asm("" : "=r"(_r) : "0" (&__op_param2)); _r; })
    260262#define PARAM3 ({ int _r; asm("" : "=r"(_r) : "0" (&__op_param3)); _r; })
     263#elif defined(__s390__)
     264extern int __op_param1 __hidden;
     265extern int __op_param2 __hidden;
     266extern int __op_param3 __hidden;
     267#define PARAM1 ({ int _r; asm("bras %0,8; .long " ASM_NAME(__op_param1) "; l %0,0(%0)" : "=r"(_r) : ); _r; })
     268#define PARAM2 ({ int _r; asm("bras %0,8; .long " ASM_NAME(__op_param2) "; l %0,0(%0)" : "=r"(_r) : ); _r; })
     269#define PARAM3 ({ int _r; asm("bras %0,8; .long " ASM_NAME(__op_param3) "; l %0,0(%0)" : "=r"(_r) : ); _r; })
    261270#else
    262271#if defined(__APPLE__)
     
    278287#endif
    279288
    280 #ifdef VBOX
    281 #define GETPC() ASMReturnAddress()
     289#if defined(__i386__)
     290#define EXIT_TB() asm volatile ("ret")
     291#define GOTO_LABEL_PARAM(n) asm volatile ("jmp " ASM_NAME(__op_gen_label) #n)
     292#elif defined(__x86_64__)
     293#define EXIT_TB() asm volatile ("ret")
     294#define GOTO_LABEL_PARAM(n) asm volatile ("jmp " ASM_NAME(__op_gen_label) #n)
     295#elif defined(__powerpc__)
     296#define EXIT_TB() asm volatile ("blr")
     297#define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n)
    282298#elif defined(__s390__)
     299#define EXIT_TB() asm volatile ("br %r14")
     300#define GOTO_LABEL_PARAM(n) asm volatile ("larl %r7,12; l %r7,0(%r7); br %r7; .long " ASM_NAME(__op_gen_label) #n)
     301#elif defined(__alpha__)
     302#define EXIT_TB() asm volatile ("ret")
     303#elif defined(__ia64__)
     304#define EXIT_TB() asm volatile ("br.ret.sptk.many b0;;")
     305#define GOTO_LABEL_PARAM(n) asm volatile ("br.sptk.many " \
     306                                          ASM_NAME(__op_gen_label) #n)
     307#elif defined(__sparc__)
     308#define EXIT_TB() asm volatile ("jmpl %i0 + 8, %g0; nop")
     309#define GOTO_LABEL_PARAM(n) asm volatile ("ba " ASM_NAME(__op_gen_label) #n ";nop")
     310#elif defined(__arm__)
     311#define EXIT_TB() asm volatile ("b exec_loop")
     312#define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n)
     313#elif defined(__mc68000)
     314#define EXIT_TB() asm volatile ("rts")
     315#elif defined(__mips__)
     316#define EXIT_TB() asm volatile ("jr $ra")
     317#define GOTO_LABEL_PARAM(n) asm volatile (".set noat; la $1, " ASM_NAME(__op_gen_label) #n "; jr $1; .set at")
     318#elif defined(__hppa__)
     319#define GOTO_LABEL_PARAM(n) asm volatile ("b,n " ASM_NAME(__op_gen_label) #n)
     320#else
     321#error unsupported CPU
     322#endif
     323
    283324/* The return address may point to the start of the next instruction.
    284325   Subtracting one gets us the call instruction itself.  */
     326#if defined(__s390__)
    285327# define GETPC() ((void*)(((unsigned long)__builtin_return_address(0) & 0x7fffffffUL) - 1))
    286328#elif defined(__arm__)
     
    291333# define GETPC() ((void *)((unsigned long)__builtin_return_address(0) - 1))
    292334#endif
     335
    293336#endif /* !defined(__DYNGEN_EXEC_H__) */
  • trunk/src/recompiler/elf.h

    r33540 r36140  
    11#ifndef _QEMU_ELF_H
    22#define _QEMU_ELF_H
     3
     4#include <inttypes.h>
    35
    46/* 32-bit ELF base types. */
     
    327329#define R_SPARC_64              32
    328330#define R_SPARC_OLO10           33
     331#define R_SPARC_HH22            34
     332#define R_SPARC_HM10            35
     333#define R_SPARC_LM22            36
    329334#define R_SPARC_WDISP16         40
    330335#define R_SPARC_WDISP19         41
     
    639644#define EFA_PARISC_2_0              0x0214 /* PA-RISC 2.0 big-endian.  */
    640645
    641 /* Additional section indices.  */
    642 
    643 #define SHN_PARISC_ANSI_COMMON  0xff00     /* Section for tentatively declared
     646/* Additional section indeces.  */
     647
     648#define SHN_PARISC_ANSI_COMMON  0xff00     /* Section for tenatively declared
    644649                                              symbols in ANSI C.  */
    645650#define SHN_PARISC_HUGE_COMMON  0xff01     /* Common blocks in huge model.  */
     
    11191124} Elf64_Nhdr;
    11201125
     1126#ifdef ELF_CLASS
    11211127#if ELF_CLASS == ELFCLASS32
    11221128
     
    11261132#define elf_shdr        elf32_shdr
    11271133#define elf_sym         elf32_sym
     1134#define elf_addr_t      Elf32_Off
    11281135
    11291136#ifdef ELF_USES_RELOCA
     
    11401147#define elf_shdr        elf64_shdr
    11411148#define elf_sym         elf64_sym
     1149#define elf_addr_t      Elf64_Off
    11421150
    11431151#ifdef ELF_USES_RELOCA
     
    11591167#endif
    11601168
     1169#endif /* ELF_CLASS */
     1170
    11611171
    11621172#endif /* _QEMU_ELF_H */
  • trunk/src/recompiler/exec-all.h

    r36125 r36140  
    6060
    6161/* Maximum size a TCG op can expand to.  This is complicated because a
    62    single op may require several host instructions and register reloads.
     62   single op may require several host instructions and regirster reloads.
    6363   For now take a wild guess at 128 bytes, which should allow at least
    6464   a couple of fixup instructions per argument.  */
     
    191191    target_ulong tmp;
    192192    tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
    193     return (tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK;
     193    return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
    194194}
    195195
     
    198198    target_ulong tmp;
    199199    tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
    200     return (((tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK) |
    201             (tmp & TB_JMP_ADDR_MASK));
     200    return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
     201            | (tmp & TB_JMP_ADDR_MASK));
    202202}
    203203
     
    215215
    216216extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
    217 
    218217extern uint8_t *code_gen_ptr;
    219218extern int code_gen_max_blocks;
     
    222221
    223222#if defined(__powerpc__)
    224 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
    225 {
    226     uint32_t val, *ptr;
    227 
    228     /* patch the branch destination */
    229     ptr = (uint32_t *)jmp_addr;
    230     val = *ptr;
    231     val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc);
    232     *ptr = val;
    233     /* flush icache */
    234     asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
    235     asm volatile ("sync" : : : "memory");
    236     asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
    237     asm volatile ("sync" : : : "memory");
    238     asm volatile ("isync" : : : "memory");
    239 }
    240 #elif defined(__i386__)
     223extern void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
     224#define tb_set_jmp_target1 ppc_tb_set_jmp_target
     225#elif defined(__i386__) || defined(__x86_64__)
    241226static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
    242227{
    243228    /* patch the branch destination */
    244229    *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
    245     /* no need to flush icache explicitely */
     230    /* no need to flush icache explicitly */
     231}
     232#elif defined(__arm__)
     233static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
     234{
     235    register unsigned long _beg __asm ("a1");
     236    register unsigned long _end __asm ("a2");
     237    register unsigned long _flg __asm ("a3");
     238
     239    /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
     240    *(uint32_t *)jmp_addr |= ((addr - (jmp_addr + 8)) >> 2) & 0xffffff;
     241
     242    /* flush icache */
     243    _beg = jmp_addr;
     244    _end = jmp_addr + 4;
     245    _flg = 0;
     246    __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
    246247}
    247248#endif
     
    285286
    286287TranslationBlock *tb_find_pc(unsigned long pc_ptr);
    287 
    288 #ifndef offsetof
    289 #define offsetof(type, field) ((size_t) &((type *)0)->field)
    290 #endif
    291288
    292289#if defined(_WIN32)
     
    344341
    345342#if defined(CONFIG_USER_ONLY)
    346 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
     343static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
    347344{
    348345    return addr;
     
    389386}
    390387
    391 
    392388/* Deterministic execution requires that IO only be performed on the last
    393389   instruction of a TB so that interrupts take effect immediately.  */
     
    405401#endif
    406402
    407 
    408403#ifdef USE_KQEMU
    409404#define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
     405
     406#define MSR_QPI_COMMBASE 0xfabe0010
    410407
    411408int kqemu_init(CPUState *env);
     
    415412void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
    416413void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
     414void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size,
     415                        ram_addr_t phys_offset);
    417416void kqemu_cpu_interrupt(CPUState *env);
    418417void kqemu_record_dump(void);
     418
     419extern uint32_t kqemu_comm_base;
    419420
    420421static inline int kqemu_is_ok(CPUState *env)
  • trunk/src/recompiler/exec.c

    r36125 r36140  
    3131#ifndef VBOX
    3232#ifdef _WIN32
     33#define WIN32_LEAN_AND_MEAN
    3334#include <windows.h>
    3435#else
     
    5455#include "cpu.h"
    5556#include "exec-all.h"
     57#include "qemu-common.h"
     58#include "tcg.h"
     59#ifndef VBOX
     60#include "hw/hw.h"
     61#endif
     62#include "osdep.h"
    5663#if defined(CONFIG_USER_ONLY)
    5764#include <qemu.h>
     
    6673//#define DEBUG_TB_CHECK
    6774//#define DEBUG_TLB_CHECK
     75
     76//#define DEBUG_IOPORT
     77//#define DEBUG_SUBPAGE
    6878
    6979#if !defined(CONFIG_USER_ONLY)
     
    114124    __attribute__((aligned (32)))
    115125#endif
     126
    116127uint8_t code_gen_prologue[1024] code_gen_section;
    117 
    118128#else /* VBOX */
    119129extern uint8_t* code_gen_prologue;
    120130#endif /* VBOX */
    121 
    122131static uint8_t *code_gen_buffer;
    123132static unsigned long code_gen_buffer_size;
     
    171180
    172181typedef struct PhysPageDesc {
    173     /* offset in host memory of the page + io_index in the low 12 bits */
     182    /* offset in host memory of the page + io_index in the low bits */
    174183    ram_addr_t phys_offset;
    175184} PhysPageDesc;
     
    194203#define L1_SIZE (1 << L1_BITS)
    195204#define L2_SIZE (1 << L2_BITS)
    196 
    197 static void io_mem_init(void);
    198205
    199206unsigned long qemu_real_host_page_size;
     
    252259} subpage_t;
    253260
    254 
    255261#ifndef VBOX
    256262#ifdef _WIN32
     
    279285}
    280286#endif
    281 #else // VBOX
     287#else /* VBOX */
    282288static void map_exec(void *addr, long size)
    283289{
     
    285291                 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
    286292}
    287 #endif
     293#endif /* VBOX */
    288294
    289295static void page_init(void)
     
    325331    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
    326332#endif
     333
    327334#ifdef VBOX
    328335    /* We use other means to set reserved bit on our pages */
    329 #else
     336#else  /* !VBOX */
    330337#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
    331338    {
     
    355362    }
    356363#endif
    357 #endif
     364#endif /* !VBOX */
    358365}
    359366
     
    498505#endif
    499506
    500 #ifdef VBOX
    501 /*
    502  * We don't need such huge codegen buffer size, as execute most of the code
    503  * in raw or hwacc mode
    504  */
     507#ifdef VBOX /*  We don't need such huge codegen buffer size, as execute
     508                most of the code  in raw or hwacc mode. */
    505509#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
    506 #else
     510#else  /* !VBOX */
    507511#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
    508 #endif
     512#endif /* !VBOX */
    509513
    510514#if defined(CONFIG_USER_ONLY)
    511 /* Currently it is not recommended to allocate big chunks of data in
     515/* Currently it is not recommanded to allocate big chunks of data in
    512516   user mode. It will change when a dedicated libc will be used */
    513517#define USE_STATIC_CODE_GEN_BUFFER
     
    829833    }
    830834}
    831 #endif // DEBUG_TB_CHECK
     835
     836#endif
    832837
    833838/* invalidate one TB */
     
    956961
    957962#ifdef VBOX
     963
    958964void tb_invalidate_virt(CPUState *env, uint32_t eip)
    959965{
     
    9981004}
    9991005# endif /* VBOX_STRICT */
     1006
    10001007#endif /* VBOX */
    10011008
     
    10321039    TranslationBlock *tb;
    10331040
    1034     p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
     1041    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
    10351042    if (!p->code_bitmap)
    10361043        return;
    1037     memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
    10381044
    10391045    tb = p->first_tb;
     
    15161522static void breakpoint_invalidate(CPUState *env, target_ulong pc)
    15171523{
    1518     target_ulong addr, pd;
     1524    target_phys_addr_t addr;
     1525    target_ulong pd;
    15191526    ram_addr_t ram_addr;
    15201527    PhysPageDesc *p;
     
    16441651    if (env->singlestep_enabled != enabled) {
    16451652        env->singlestep_enabled = enabled;
    1646         /* must flush all the translated code to avoid inconsistencies */
     1653        /* must flush all the translated code to avoid inconsistancies */
    16471654        /* XXX: only flush what is necessary */
    16481655        tb_flush(env);
     
    16571664    loglevel = log_flags;
    16581665    if (loglevel && !logfile) {
    1659         logfile = fopen(logfilename, "w");
     1666        logfile = fopen(logfilename, log_append ? "a" : "w");
    16601667        if (!logfile) {
    16611668            perror(logfilename);
     
    16651672        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
    16661673        {
    1667             static uint8_t logfile_buf[4096];
     1674            static char logfile_buf[4096];
    16681675            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
    16691676        }
     
    16711678        setvbuf(logfile, NULL, _IOLBF, 0);
    16721679#endif
     1680        log_append = 1;
     1681    }
     1682    if (!loglevel && logfile) {
     1683        fclose(logfile);
     1684        logfile = NULL;
    16731685    }
    16741686}
     
    16771689{
    16781690    logfilename = strdup(filename);
     1691    if (logfile) {
     1692        fclose(logfile);
     1693        logfile = NULL;
     1694    }
     1695    cpu_set_log(loglevel);
    16791696}
    16801697#endif /* !VBOX */
     
    17471764      "show target assembly code for each compiled TB" },
    17481765    { CPU_LOG_TB_OP, "op",
    1749       "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
     1766      "show micro ops for each compiled TB" },
     1767    { CPU_LOG_TB_OP_OPT, "op_opt",
     1768      "show micro ops "
    17501769#ifdef TARGET_I386
    1751     { CPU_LOG_TB_OP_OPT, "op_opt",
    1752       "show micro ops after optimization for each compiled TB" },
    1753 #endif
     1770      "before eflags optimization and "
     1771#endif
     1772      "after liveness analysis" },
    17541773    { CPU_LOG_INT, "int",
    17551774      "show interrupts/exceptions in short format" },
     
    17571776      "show trace before each executed TB (lots of logs)" },
    17581777    { CPU_LOG_TB_CPU, "cpu",
    1759       "show CPU state before bloc translation" },
     1778      "show CPU state before block translation" },
    17601779#ifdef TARGET_I386
    17611780    { CPU_LOG_PCALL, "pcall",
     
    17791798int cpu_str_to_log_mask(const char *str)
    17801799{
    1781     CPULogItem *item;
     1800    const CPULogItem *item;
    17821801    int mask;
    17831802    const char *p, *p1;
     
    18141833{
    18151834    va_list ap;
     1835    va_list ap2;
    18161836
    18171837    va_start(ap, fmt);
     1838    va_copy(ap2, ap);
    18181839    fprintf(stderr, "qemu: fatal: ");
    18191840    vfprintf(stderr, fmt, ap);
     
    18241845    cpu_dump_state(env, stderr, fprintf, 0);
    18251846#endif
     1847    if (logfile) {
     1848        fprintf(logfile, "qemu: fatal: ");
     1849        vfprintf(logfile, fmt, ap2);
     1850        fprintf(logfile, "\n");
     1851#ifdef TARGET_I386
     1852        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
     1853#else
     1854        cpu_dump_state(env, logfile, fprintf, 0);
     1855#endif
     1856        fflush(logfile);
     1857        fclose(logfile);
     1858    }
     1859    va_end(ap2);
    18261860    va_end(ap);
    18271861    abort();
     
    18411875    return new_env;
    18421876}
    1843 #endif
     1877#endif /* !VBOX */
    18441878
    18451879#if !defined(CONFIG_USER_ONLY)
     
    18651899}
    18661900
     1901#ifdef VBOX
    18671902static CPUTLBEntry s_cputlb_empty_entry = {
    18681903    .addr_read  = -1,
     
    18711906    .addend     = -1,
    18721907};
     1908#endif /* VBOX */
    18731909
    18741910/* NOTE: if flush_global is true, also flush global entries (not
     
    18861922
    18871923    for(i = 0; i < CPU_TLB_SIZE; i++) {
     1924#ifdef VBOX
    18881925        int mmu_idx;
    18891926        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
    18901927            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
    18911928        }
     1929#else  /* !VBOX */
     1930        env->tlb_table[0][i].addr_read = -1;
     1931        env->tlb_table[0][i].addr_write = -1;
     1932        env->tlb_table[0][i].addr_code = -1;
     1933        env->tlb_table[1][i].addr_read = -1;
     1934        env->tlb_table[1][i].addr_write = -1;
     1935        env->tlb_table[1][i].addr_code = -1;
     1936#if (NB_MMU_MODES >= 3)
     1937        env->tlb_table[2][i].addr_read = -1;
     1938        env->tlb_table[2][i].addr_write = -1;
     1939        env->tlb_table[2][i].addr_code = -1;
     1940#if (NB_MMU_MODES == 4)
     1941        env->tlb_table[3][i].addr_read = -1;
     1942        env->tlb_table[3][i].addr_write = -1;
     1943        env->tlb_table[3][i].addr_code = -1;
     1944#endif
     1945#endif
     1946#endif /* !VBOX */
    18921947    }
    18931948
     
    19872042        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
    19882043        if ((addr - start) < length) {
    1989             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
     2044            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
    19902045        }
    19912046    }
     
    21292184}
    21302185
    2131 
    2132 /* update the TLB corresponding to virtual page vaddr and phys addr
    2133    addr so that it is no longer dirty */
    2134 static inline void tlb_set_dirty(CPUState *env,
    2135                                  unsigned long addr, target_ulong vaddr)
     2186/* update the TLB corresponding to virtual page vaddr
     2187   so that it is no longer dirty */
     2188static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
    21362189{
    21372190    int i;
    21382191
    2139     addr &= TARGET_PAGE_MASK;
     2192    vaddr &= TARGET_PAGE_MASK;
    21402193    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    2141     tlb_set_dirty1(&env->tlb_table[0][i], addr);
    2142     tlb_set_dirty1(&env->tlb_table[1][i], addr);
     2194    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
     2195    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
    21432196#if (NB_MMU_MODES >= 3)
    21442197    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
     
    23022355    return ret;
    23032356}
    2304 #if 0
    2305 /* called from signal handler: invalidate the code and unprotect the
    2306    page. Return TRUE if the fault was successfully handled. */
    2307 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
    2308 {
    2309 #if !defined(CONFIG_SOFTMMU)
    2310     VirtPageDesc *vp;
    2311 
    2312 #if defined(DEBUG_TLB)
    2313     printf("page_unprotect: addr=0x%08x\n", addr);
    2314 #endif
    2315     addr &= TARGET_PAGE_MASK;
    2316 
    2317     /* if it is not mapped, no need to worry here */
    2318     if (addr >= MMAP_AREA_END)
    2319         return 0;
    2320     vp = virt_page_find(addr >> TARGET_PAGE_BITS);
    2321     if (!vp)
    2322         return 0;
    2323     /* NOTE: in this case, validate_tag is _not_ tested as it
    2324        validates only the code TLB */
    2325     if (vp->valid_tag != virt_valid_tag)
    2326         return 0;
    2327     if (!(vp->prot & PAGE_WRITE))
    2328         return 0;
    2329 #if defined(DEBUG_TLB)
    2330     printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
    2331            addr, vp->phys_addr, vp->prot);
    2332 #endif
    2333     if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
    2334         cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
    2335                   (unsigned long)addr, vp->prot);
    2336     /* set the dirty bit */
    2337     phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
    2338     /* flush the code inside */
    2339     tb_invalidate_phys_page(vp->phys_addr, pc, puc);
    2340     return 1;
    2341 #elif defined(VBOX)
    2342     addr &= TARGET_PAGE_MASK;
    2343 
    2344     /* if it is not mapped, no need to worry here */
    2345     if (addr >= MMAP_AREA_END)
    2346         return 0;
    2347     return 1;
    2348 #else
    2349     return 0;
    2350 #endif
    2351 }
    2352 #endif /* 0 */
    23532357
    23542358#else
     
    24252429
    24262430/* modify the flags of a page and invalidate the code if
    2427    necessary. The flag PAGE_WRITE_ORG is positioned automatically
     2431   necessary. The flag PAGE_WRITE_ORG is positionned automatically
    24282432   depending on PAGE_WRITE */
    24292433void page_set_flags(target_ulong start, target_ulong end, int flags)
     
    24322436    target_ulong addr;
    24332437
     2438    /* mmap_lock should already be held.  */
    24342439    start = start & TARGET_PAGE_MASK;
    24352440    end = TARGET_PAGE_ALIGN(end);
     
    24392444    AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
    24402445#endif
    2441     spin_lock(&tb_lock);
    24422446    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
    24432447        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
     2448        /* We may be called for host regions that are outside guest
     2449           address space.  */
     2450        if (!p)
     2451            return;
    24442452        /* if the write protection is set, then we invalidate the code
    24452453           inside */
     
    24512459        p->flags = flags;
    24522460    }
    2453     spin_unlock(&tb_lock);
    24542461}
    24552462
     
    24912498
    24922499/* called from signal handler: invalidate the code and unprotect the
    2493    page. Return TRUE if the fault was successfully handled. */
     2500   page. Return TRUE if the fault was succesfully handled. */
    24942501int page_unprotect(target_ulong address, unsigned long pc, void *puc)
    24952502{
     
    25702577    } while (0)
    25712578
    2572 
    25732579/* register physical memory. 'size' must be a multiple of the target
    25742580   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
    25752581   io memory page */
    25762582void cpu_register_physical_memory(target_phys_addr_t start_addr,
    2577                                   unsigned long size,
    2578                                   unsigned long phys_offset)
     2583                                  ram_addr_t size,
     2584                                  ram_addr_t phys_offset)
    25792585{
    25802586    target_phys_addr_t addr, end_addr;
     
    26132619            } else {
    26142620                p->phys_offset = phys_offset;
    2615         if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
    2616             (phys_offset & IO_MEM_ROMD))
     2621                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
     2622                    (phys_offset & IO_MEM_ROMD))
    26172623                    phys_offset += TARGET_PAGE_SIZE;
    26182624            }
     
    26202626            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
    26212627            p->phys_offset = phys_offset;
    2622         if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
    2623             (phys_offset & IO_MEM_ROMD))
     2628            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
     2629                (phys_offset & IO_MEM_ROMD))
    26242630                phys_offset += TARGET_PAGE_SIZE;
    26252631            else {
     
    26392645        }
    26402646    }
     2647
    26412648    /* since each CPU stores ram addresses in its TLB cache, we must
    26422649       reset the modified entries */
     
    26482655
    26492656/* XXX: temporary until new memory mapping API */
    2650 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
     2657ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
    26512658{
    26522659    PhysPageDesc *p;
     
    26762683{
    26772684}
    2678 #endif
    2679 
     2685#endif /* !VBOX */
    26802686
    26812687static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
    26822688{
    26832689#ifdef DEBUG_UNASSIGNED
    2684     printf("Unassigned mem read  0x%08x\n", (int)addr);
     2690    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
    26852691#endif
    26862692#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
     
    27152721{
    27162722#ifdef DEBUG_UNASSIGNED
    2717     printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
     2723    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
     2724#endif
     2725#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
     2726    do_unassigned_access(addr, 1, 0, 0, 1);
    27182727#endif
    27192728}
     
    27382747#endif
    27392748}
     2749
    27402750static CPUReadMemoryFunc *unassigned_mem_read[3] = {
    27412751    unassigned_mem_readb,
     
    27502760};
    27512761
    2752 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
    2753 {
    2754     unsigned long ram_addr;
     2762static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
     2763                                uint32_t val)
     2764{
    27552765    int dirty_flags;
    2756 #if defined(VBOX)
    2757     ram_addr = addr;
    2758 #else
    2759     ram_addr = addr - (unsigned long)phys_ram_base;
    2760 #endif
    27612766#ifdef VBOX
    27622767    if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
     
    27772782    }
    27782783#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
    2779     remR3PhysWriteU8(addr, val);
    2780 #else
    2781     stb_p((uint8_t *)(long)addr, val);
     2784    remR3PhysWriteU8(ram_addr, val);
     2785#else
     2786    stb_p(phys_ram_base + ram_addr, val);
    27822787#endif
    27832788#ifdef USE_KQEMU
     
    27942799       flushed */
    27952800    if (dirty_flags == 0xff)
    2796         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
    2797 }
    2798 
    2799 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
    2800 {
    2801     unsigned long ram_addr;
     2801        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
     2802}
     2803
     2804static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
     2805                                uint32_t val)
     2806{
    28022807    int dirty_flags;
    2803 #if defined(VBOX)
    2804     ram_addr = addr;
    2805 #else
    2806     ram_addr = addr - (unsigned long)phys_ram_base;
    2807 #endif
    28082808#ifdef VBOX
    28092809    if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
     
    28242824    }
    28252825#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
    2826     remR3PhysWriteU16(addr, val);
    2827 #else
    2828     stw_p((uint8_t *)(long)addr, val);
     2826    remR3PhysWriteU16(ram_addr, val);
     2827#else
     2828    stw_p(phys_ram_base + ram_addr, val);
    28292829#endif
    28302830
     
    28422842       flushed */
    28432843    if (dirty_flags == 0xff)
    2844         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
    2845 }
    2846 
    2847 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
    2848 {
    2849     unsigned long ram_addr;
     2844        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
     2845}
     2846
     2847static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
     2848                                uint32_t val)
     2849{
    28502850    int dirty_flags;
    2851 #if defined(VBOX)
    2852     ram_addr = addr;
    2853 #else
    2854     ram_addr = addr - (unsigned long)phys_ram_base;
    2855 #endif
    28562851#ifdef VBOX
    28572852    if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
     
    28722867    }
    28732868#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
    2874     remR3PhysWriteU32(addr, val);
    2875 #else
    2876     stl_p((uint8_t *)(long)addr, val);
     2869    remR3PhysWriteU32(ram_addr, val);
     2870#else
     2871    stl_p(phys_ram_base + ram_addr, val);
    28772872#endif
    28782873#ifdef USE_KQEMU
     
    28892884       flushed */
    28902885    if (dirty_flags == 0xff)
    2891         tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
     2886        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
    28922887}
    28932888
     
    29032898    notdirty_mem_writel,
    29042899};
    2905 
    29062900
    29072901/* Generate a debug exception if a watchpoint has been hit.  */
     
    31843178    return io_mem_read[io_index >> IO_MEM_SHIFT];
    31853179}
     3180
    31863181#endif /* !defined(CONFIG_USER_ONLY) */
    31873182
     
    32103205                /* FIXME - should this return an error rather than just fail? */
    32113206                return;
    3212             memcpy(p, buf, len);
    3213             unlock_user(p, addr, len);
     3207            memcpy(p, buf, l);
     3208            unlock_user(p, addr, l);
    32143209        } else {
    32153210            if (!(flags & PAGE_READ))
    32163211                return;
     3212            /* XXX: this code should not depend on lock_user */
    32173213            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
    32183214                /* FIXME - should this return an error rather than just fail? */
    32193215                return;
    3220             memcpy(buf, p, len);
     3216            memcpy(buf, p, l);
    32213217            unlock_user(p, addr, 0);
    32223218        }
     
    35083504    } else {
    35093505#ifndef VBOX
    3510         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
    3511             (addr & ~TARGET_PAGE_MASK);
     3506        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
     3507        ptr = phys_ram_base + addr1;
    35123508        stl_p(ptr, val);
    35133509#else
     
    35213517                /* set dirty bit */
    35223518                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
    3523                         (0xff & ~CODE_DIRTY_FLAG);
     3519                    (0xff & ~CODE_DIRTY_FLAG);
    35243520            }
    35253521        }
    3526 #endif
     3522#endif /* !VBOX */
    35273523    }
    35283524}
     
    35613557    }
    35623558}
    3563 
    35643559
    35653560/* warning: addr must be aligned */
     
    36323627{
    36333628    int l;
    3634     target_ulong page, phys_addr;
     3629    target_phys_addr_t phys_addr;
     3630    target_ulong page;
    36353631
    36363632    while (len > 0) {
  • trunk/src/recompiler/fpu/softfloat-macros.h

    r21292 r36140  
    718718
    719719}
     720
  • trunk/src/recompiler/fpu/softfloat-native.c

    r21292 r36140  
    77{
    88    STATUS(float_rounding_mode) = val;
    9 #if defined(_BSD) && !defined(__APPLE__) || (defined(HOST_SOLARIS) && (HOST_SOLARIS < 10 || HOST_SOLARIS == 11))
     9#if defined(_BSD) && !defined(__APPLE__) || (defined(HOST_SOLARIS) && (HOST_SOLARIS < 10 || HOST_SOLARIS == 11)) /* VBOX adds sol 11 */
    1010    fpsetround(val);
    1111#elif defined(__arm__)
     
    6161#endif
    6262
    63 #if defined(_ARCH_PPC)
     63#if defined(__powerpc__)
    6464
    6565/* correct (but slow) PowerPC rint() (glibc version is incorrect) */
    66 static double qemu_rint(double x)
     66double qemu_rint(double x)
    6767{
    6868    double y = 4503599627370496.0;
     
    230230{
    231231    if (a < b) {
    232         return float_relation_less;
     232        return -1;
    233233    } else if (a == b) {
    234         return float_relation_equal;
     234        return 0;
    235235    } else if (a > b) {
    236         return float_relation_greater;
    237     } else {
    238         return float_relation_unordered;
     236        return 1;
     237    } else {
     238        return 2;
    239239    }
    240240}
     
    242242{
    243243    if (isless(a, b)) {
    244         return float_relation_less;
     244        return -1;
    245245    } else if (a == b) {
    246         return float_relation_equal;
     246        return 0;
    247247    } else if (isgreater(a, b)) {
    248         return float_relation_greater;
    249     } else {
    250         return float_relation_unordered;
     248        return 1;
     249    } else {
     250        return 2;
    251251    }
    252252}
     
    258258    a = u.i;
    259259    return ( ( ( a>>22 ) & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF );
    260 }
    261 
    262 int float32_is_nan( float32 a1 )
    263 {
    264     float32u u;
    265     uint64_t a;
    266     u.f = a1;
    267     a = u.i;
    268     return ( 0xFF800000 < ( a<<1 ) );
    269260}
    270261
     
    401392{
    402393    if (a < b) {
    403         return float_relation_less;
     394        return -1;
    404395    } else if (a == b) {
    405         return float_relation_equal;
     396        return 0;
    406397    } else if (a > b) {
    407         return float_relation_greater;
    408     } else {
    409         return float_relation_unordered;
     398        return 1;
     399    } else {
     400        return 2;
    410401    }
    411402}
     
    413404{
    414405    if (isless(a, b)) {
    415         return float_relation_less;
     406        return -1;
    416407    } else if (a == b) {
    417         return float_relation_equal;
     408        return 0;
    418409    } else if (isgreater(a, b)) {
    419         return float_relation_greater;
    420     } else {
    421         return float_relation_unordered;
     410        return 1;
     411    } else {
     412        return 2;
    422413    }
    423414}
     
    441432    a = u.i;
    442433
    443     return ( LIT64( 0xFFF0000000000000 ) < (bits64) ( a<<1 ) );
     434    return ( LIT64( 0xFFE0000000000000 ) < (bits64) ( a<<1 ) );
    444435
    445436}
     
    493484{
    494485    if (a < b) {
    495         return float_relation_less;
     486        return -1;
    496487    } else if (a == b) {
    497         return float_relation_equal;
     488        return 0;
    498489    } else if (a > b) {
    499         return float_relation_greater;
    500     } else {
    501         return float_relation_unordered;
     490        return 1;
     491    } else {
     492        return 2;
    502493    }
    503494}
     
    505496{
    506497    if (isless(a, b)) {
    507         return float_relation_less;
     498        return -1;
    508499    } else if (a == b) {
    509         return float_relation_equal;
     500        return 0;
    510501    } else if (isgreater(a, b)) {
    511         return float_relation_greater;
    512     } else {
    513         return float_relation_unordered;
     502        return 1;
     503    } else {
     504        return 2;
    514505    }
    515506}
    516507int floatx80_is_signaling_nan( floatx80 a1)
    517 {
    518     floatx80u u;
    519     uint64_t aLow;
    520     u.f = a1;
    521 
    522     aLow = u.i.low & ~ LIT64( 0x4000000000000000 );
    523     return
    524            ( ( u.i.high & 0x7FFF ) == 0x7FFF )
    525         && (bits64) ( aLow<<1 )
    526         && ( u.i.low == aLow );
    527 }
    528 
    529 int floatx80_is_nan( floatx80 a1 )
    530508{
    531509    floatx80u u;
  • trunk/src/recompiler/fpu/softfloat-native.h

    r36125 r36140  
    44#if (defined(_BSD) && !defined(__APPLE__)) || defined(HOST_SOLARIS)
    55#include <ieeefp.h>
    6 #elif defined(_MSC_VER)
    7 # include <fpieee.h>
    8 # ifndef fabsf
    9 #  define fabsf(f) ((float)fabs(f))
    10 # endif
     6#define fabsf(f) ((float)fabs(f))
    117#else
    128#include <fenv.h>
    139#endif
    1410
    15 #if defined(__OpenBSD__) || defined(__NetBSD__)
     11#ifdef __OpenBSD__
     12/* Get OpenBSD version number */
    1613#include <sys/param.h>
    1714#endif
     
    3936#endif
    4037
    41 #ifdef __NetBSD__
    42 #ifndef isgreater
    43 #define isgreater(x, y)         __builtin_isgreater(x, y)
    44 #endif
    45 #ifndef isgreaterequal
    46 #define isgreaterequal(x, y)    __builtin_isgreaterequal(x, y)
    47 #endif
    48 #ifndef isless
    49 #define isless(x, y)            __builtin_isless(x, y)
    50 #endif
    51 #ifndef islessequal
    52 #define islessequal(x, y)       __builtin_islessequal(x, y)
    53 #endif
    54 #ifndef isunordered
    55 #define isunordered(x, y)       __builtin_isunordered(x, y)
    56 #endif
    57 #endif
    58 
    59 
    6038#define isnormal(x)             (fpclass(x) >= FP_NZERO)
    6139#define isgreater(x, y)         ((!unordered(x, y)) && ((x) > (y)))
     
    145123
    146124typedef struct float_status {
    147     int float_rounding_mode;
    148 #ifdef FLOATX80
    149     int floatx80_rounding_precision;
     125    signed char float_rounding_mode;
     126#ifdef FLOATX80
     127    signed char floatx80_rounding_precision;
    150128#endif
    151129} float_status;
     
    251229int float32_compare_quiet( float32, float32 STATUS_PARAM );
    252230int float32_is_signaling_nan( float32 );
    253 int float32_is_nan( float32 );
    254231
    255232INLINE float32 float32_abs(float32 a)
     
    261238{
    262239    return -a;
    263 }
    264 
    265 INLINE float32 float32_is_infinity(float32 a)
    266 {
    267     return fpclassify(a) == FP_INFINITE;
    268 }
    269 
    270 INLINE float32 float32_is_neg(float32 a)
    271 {
    272     float32u u;
    273     u.f = a;
    274     return u.i >> 31;
    275 }
    276 
    277 INLINE float32 float32_is_zero(float32 a)
    278 {
    279     return fpclassify(a) == FP_ZERO;
    280240}
    281241
     
    372332}
    373333
    374 INLINE float64 float64_is_infinity(float64 a)
    375 {
    376     return fpclassify(a) == FP_INFINITE;
    377 }
    378 
    379 INLINE float64 float64_is_neg(float64 a)
    380 {
    381     float64u u;
    382     u.f = a;
    383     return u.i >> 63;
    384 }
    385 
    386 INLINE float64 float64_is_zero(float64 a)
    387 {
    388     return fpclassify(a) == FP_ZERO;
    389 }
    390 
    391334INLINE float64 float64_scalbn(float64 a, int n)
    392335{
     
    464407int floatx80_compare_quiet( floatx80, floatx80 STATUS_PARAM );
    465408int floatx80_is_signaling_nan( floatx80 );
    466 int floatx80_is_nan( floatx80 );
    467409
    468410INLINE floatx80 floatx80_abs(floatx80 a)
     
    476418}
    477419
    478 INLINE floatx80 floatx80_is_infinity(floatx80 a)
    479 {
    480     return fpclassify(a) == FP_INFINITE;
    481 }
    482 
    483 INLINE floatx80 floatx80_is_neg(floatx80 a)
    484 {
    485     floatx80u u;
    486     u.f = a;
    487     return u.i.high >> 15;
    488 }
    489 
    490 INLINE floatx80 floatx80_is_zero(floatx80 a)
    491 {
    492     return fpclassify(a) == FP_ZERO;
    493 }
    494 
    495420INLINE floatx80 floatx80_scalbn(floatx80 a, int n)
    496421{
  • trunk/src/recompiler/fpu/softfloat-specialize.h

    r21292 r36140  
    3838
    3939/*----------------------------------------------------------------------------
     40| Underflow tininess-detection mode, statically initialized to default value.
     41| (The declaration in `softfloat.h' must match the `int8' type here.)
     42*----------------------------------------------------------------------------*/
     43int8 float_detect_tininess = float_tininess_after_rounding;
     44
     45/*----------------------------------------------------------------------------
    4046| Raises the exceptions specified by `flags'.  Floating-point traps can be
    4147| defined here if desired.  It is currently not possible for such a trap
     
    6268#if defined(TARGET_SPARC)
    6369#define float32_default_nan make_float32(0x7FFFFFFF)
    64 #elif defined(TARGET_POWERPC) || defined(TARGET_ARM)
     70#elif defined(TARGET_POWERPC)
    6571#define float32_default_nan make_float32(0x7FC00000)
    6672#elif defined(TARGET_HPPA)
     
    144150    flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
    145151    bits32 av, bv, res;
    146 
    147     if ( STATUS(default_nan_mode) )
    148         return float32_default_nan;
    149152
    150153    aIsNaN = float32_is_nan( a );
     
    190193#if defined(TARGET_SPARC)
    191194#define float64_default_nan make_float64(LIT64( 0x7FFFFFFFFFFFFFFF ))
    192 #elif defined(TARGET_POWERPC) || defined(TARGET_ARM)
     195#elif defined(TARGET_POWERPC)
    193196#define float64_default_nan make_float64(LIT64( 0x7FF8000000000000 ))
    194197#elif defined(TARGET_HPPA)
     
    279282    flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
    280283    bits64 av, bv, res;
    281 
    282     if ( STATUS(default_nan_mode) )
    283         return float64_default_nan;
    284284
    285285    aIsNaN = float64_is_nan( a );
     
    419419    flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
    420420
    421     if ( STATUS(default_nan_mode) ) {
    422         a.low = floatx80_default_nan_low;
    423         a.high = floatx80_default_nan_high;
    424         return a;
    425     }
    426 
    427421    aIsNaN = floatx80_is_nan( a );
    428422    aIsSignalingNaN = floatx80_is_signaling_nan( a );
     
    545539    flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
    546540
    547     if ( STATUS(default_nan_mode) ) {
    548         a.low = float128_default_nan_low;
    549         a.high = float128_default_nan_high;
    550         return a;
    551     }
    552 
    553541    aIsNaN = float128_is_nan( a );
    554542    aIsSignalingNaN = float128_is_signaling_nan( a );
  • trunk/src/recompiler/fpu/softfloat.c

    r21292 r36140  
    3131=============================================================================*/
    3232
    33 /* FIXME: Flush-To-Zero only effects results.  Denormal inputs should also
    34    be flushed to zero.  */
    3533#include "softfloat.h"
    3634
     
    297295        }
    298296        if ( zExp < 0 ) {
    299             if ( STATUS(flush_to_zero) ) return packFloat32( zSign, 0, 0 );
    300297            isTiny =
    301298                   ( STATUS(float_detect_tininess) == float_tininess_before_rounding )
     
    461458        }
    462459        if ( zExp < 0 ) {
    463             if ( STATUS(flush_to_zero) ) return packFloat64( zSign, 0, 0 );
    464460            isTiny =
    465461                   ( STATUS(float_detect_tininess) == float_tininess_before_rounding )
     
    640636        }
    641637        if ( zExp <= 0 ) {
    642             if ( STATUS(flush_to_zero) ) return packFloatx80( zSign, 0, 0 );
    643638            isTiny =
    644639                   ( STATUS(float_detect_tininess) == float_tininess_before_rounding )
     
    971966        }
    972967        if ( zExp < 0 ) {
    973             if ( STATUS(flush_to_zero) ) return packFloat128( zSign, 0, 0, 0 );
    974968            isTiny =
    975969                   ( STATUS(float_detect_tininess) == float_tininess_before_rounding )
     
    16441638            return a;
    16451639        }
    1646         if ( aExp == 0 ) {
    1647             if ( STATUS(flush_to_zero) ) return packFloat32( zSign, 0, 0 );
    1648             return packFloat32( zSign, 0, ( aSig + bSig )>>6 );
    1649         }
     1640        if ( aExp == 0 ) return packFloat32( zSign, 0, ( aSig + bSig )>>6 );
    16501641        zSig = 0x40000000 + aSig + bSig;
    16511642        zExp = aExp;
     
    20552046    return roundAndPackFloat32( 0, zExp, zSig STATUS_VAR );
    20562047
    2057 }
    2058 
    2059 /*----------------------------------------------------------------------------
    2060 | Returns the binary log of the single-precision floating-point value `a'.
    2061 | The operation is performed according to the IEC/IEEE Standard for Binary
    2062 | Floating-Point Arithmetic.
    2063 *----------------------------------------------------------------------------*/
    2064 float32 float32_log2( float32 a STATUS_PARAM )
    2065 {
    2066     flag aSign, zSign;
    2067     int16 aExp;
    2068     bits32 aSig, zSig, i;
    2069 
    2070     aSig = extractFloat32Frac( a );
    2071     aExp = extractFloat32Exp( a );
    2072     aSign = extractFloat32Sign( a );
    2073 
    2074     if ( aExp == 0 ) {
    2075         if ( aSig == 0 ) return packFloat32( 1, 0xFF, 0 );
    2076         normalizeFloat32Subnormal( aSig, &aExp, &aSig );
    2077     }
    2078     if ( aSign ) {
    2079         float_raise( float_flag_invalid STATUS_VAR);
    2080         return float32_default_nan;
    2081     }
    2082     if ( aExp == 0xFF ) {
    2083         if ( aSig ) return propagateFloat32NaN( a, float32_zero STATUS_VAR );
    2084         return a;
    2085     }
    2086 
    2087     aExp -= 0x7F;
    2088     aSig |= 0x00800000;
    2089     zSign = aExp < 0;
    2090     zSig = aExp << 23;
    2091 
    2092     for (i = 1 << 22; i > 0; i >>= 1) {
    2093         aSig = ( (bits64)aSig * aSig ) >> 23;
    2094         if ( aSig & 0x01000000 ) {
    2095             aSig >>= 1;
    2096             zSig |= i;
    2097         }
    2098     }
    2099 
    2100     if ( zSign )
    2101         zSig = -zSig;
    2102 
    2103     return normalizeRoundAndPackFloat32( zSign, 0x85, zSig STATUS_VAR );
    21042048}
    21052049
     
    26522596            return a;
    26532597        }
    2654         if ( aExp == 0 ) {
    2655             if ( STATUS(flush_to_zero) ) return packFloat64( zSign, 0, 0 );
    2656             return packFloat64( zSign, 0, ( aSig + bSig )>>9 );
    2657         }
     2598        if ( aExp == 0 ) return packFloat64( zSign, 0, ( aSig + bSig )>>9 );
    26582599        zSig = LIT64( 0x4000000000000000 ) + aSig + bSig;
    26592600        zExp = aExp;
     
    30512992    return roundAndPackFloat64( 0, zExp, zSig STATUS_VAR );
    30522993
    3053 }
    3054 
    3055 /*----------------------------------------------------------------------------
    3056 | Returns the binary log of the double-precision floating-point value `a'.
    3057 | The operation is performed according to the IEC/IEEE Standard for Binary
    3058 | Floating-Point Arithmetic.
    3059 *----------------------------------------------------------------------------*/
    3060 float64 float64_log2( float64 a STATUS_PARAM )
    3061 {
    3062     flag aSign, zSign;
    3063     int16 aExp;
    3064     bits64 aSig, aSig0, aSig1, zSig, i;
    3065 
    3066     aSig = extractFloat64Frac( a );
    3067     aExp = extractFloat64Exp( a );
    3068     aSign = extractFloat64Sign( a );
    3069 
    3070     if ( aExp == 0 ) {
    3071         if ( aSig == 0 ) return packFloat64( 1, 0x7FF, 0 );
    3072         normalizeFloat64Subnormal( aSig, &aExp, &aSig );
    3073     }
    3074     if ( aSign ) {
    3075         float_raise( float_flag_invalid STATUS_VAR);
    3076         return float64_default_nan;
    3077     }
    3078     if ( aExp == 0x7FF ) {
    3079         if ( aSig ) return propagateFloat64NaN( a, float64_zero STATUS_VAR );
    3080         return a;
    3081     }
    3082 
    3083     aExp -= 0x3FF;
    3084     aSig |= LIT64( 0x0010000000000000 );
    3085     zSign = aExp < 0;
    3086     zSig = (bits64)aExp << 52;
    3087     for (i = 1LL << 51; i > 0; i >>= 1) {
    3088         mul64To128( aSig, aSig, &aSig0, &aSig1 );
    3089         aSig = ( aSig0 << 12 ) | ( aSig1 >> 52 );
    3090         if ( aSig & LIT64( 0x0020000000000000 ) ) {
    3091             aSig >>= 1;
    3092             zSig |= i;
    3093         }
    3094     }
    3095 
    3096     if ( zSign )
    3097         zSig = -zSig;
    3098     return normalizeRoundAndPackFloat64( zSign, 0x408, zSig STATUS_VAR );
    30992994}
    31002995
     
    47034598        }
    47044599        add128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 );
    4705         if ( aExp == 0 ) {
    4706             if ( STATUS(flush_to_zero) ) return packFloat128( zSign, 0, 0, 0 );
    4707             return packFloat128( zSign, 0, zSig0, zSig1 );
    4708         }
     4600        if ( aExp == 0 ) return packFloat128( zSign, 0, zSig0, zSig1 );
    47094601        zSig2 = 0;
    47104602        zSig0 |= LIT64( 0x0002000000000000 );
     
    55885480        return a;
    55895481    }
    5590     if ( aExp != 0 )
    5591         aSig |= 0x00800000;
    5592     else if ( aSig == 0 )
    5593         return a;
    5594 
    5595     aExp += n - 1;
    5596     aSig <<= 7;
    5597     return normalizeRoundAndPackFloat32( aSign, aExp, aSig STATUS_VAR );
     5482    aExp += n;
     5483    return roundAndPackFloat32( aSign, aExp, aSig STATUS_VAR );
    55985484}
    55995485
     
    56115497        return a;
    56125498    }
    5613     if ( aExp != 0 )
    5614         aSig |= LIT64( 0x0010000000000000 );
    5615     else if ( aSig == 0 )
    5616         return a;
    5617 
    5618     aExp += n - 1;
    5619     aSig <<= 10;
    5620     return normalizeRoundAndPackFloat64( aSign, aExp, aSig STATUS_VAR );
     5499    aExp += n;
     5500    return roundAndPackFloat64( aSign, aExp, aSig STATUS_VAR );
    56215501}
    56225502
     
    56355515        return a;
    56365516    }
    5637     if (aExp == 0 && aSig == 0)
    5638         return a;
    5639 
    56405517    aExp += n;
    5641     return normalizeRoundAndPackFloatx80( STATUS(floatx80_rounding_precision),
    5642                                           aSign, aExp, aSig, 0 STATUS_VAR );
     5518    return roundAndPackFloatx80( STATUS(floatx80_rounding_precision),
     5519                                 aSign, aExp, aSig, 0 STATUS_VAR );
    56435520}
    56445521#endif
     
    56585535        return a;
    56595536    }
    5660     if ( aExp != 0 )
    5661         aSig0 |= LIT64( 0x0001000000000000 );
    5662     else if ( aSig0 == 0 && aSig1 == 0 )
    5663         return a;
    5664 
    5665     aExp += n - 1;
    5666     return normalizeRoundAndPackFloat128( aSign, aExp, aSig0, aSig1
    5667                                           STATUS_VAR );
     5537    aExp += n;
     5538    return roundAndPackFloat128( aSign, aExp, aSig0, aSig1, 0 STATUS_VAR );
    56685539
    56695540}
  • trunk/src/recompiler/fpu/softfloat.h

    r26499 r36140  
    5555typedef uint8_t uint8;
    5656typedef int8_t int8;
    57 #ifndef _AIX
    5857typedef int uint16;
    5958typedef int int16;
    60 #endif
    6159typedef unsigned int uint32;
    6260typedef signed int int32;
     
    9593#else
    9694/* native float support */
    97 #if (defined(__i386__) || defined(__x86_64__)) && (!defined(_BSD) || defined(VBOX))
     95#if (defined(__i386__) || defined(__x86_64__)) && (!defined(_BSD) || defined(VBOX)) /** @todo VBOX: not correct on windows */
    9896#define FLOATX80
    9997#endif
     
    199197    signed char floatx80_rounding_precision;
    200198#endif
    201     flag flush_to_zero;
    202     flag default_nan_mode;
    203199} float_status;
    204200
    205201void set_float_rounding_mode(int val STATUS_PARAM);
    206202void set_float_exception_flags(int val STATUS_PARAM);
    207 INLINE void set_flush_to_zero(flag val STATUS_PARAM)
    208 {
    209     STATUS(flush_to_zero) = val;
    210 }
    211 INLINE void set_default_nan_mode(flag val STATUS_PARAM)
    212 {
    213     STATUS(default_nan_mode) = val;
    214 }
    215203INLINE int get_float_exception_flags(float_status *status)
    216204{
     
    278266float32 float32_rem( float32, float32 STATUS_PARAM );
    279267float32 float32_sqrt( float32 STATUS_PARAM );
    280 float32 float32_log2( float32 STATUS_PARAM );
    281268int float32_eq( float32, float32 STATUS_PARAM );
    282269int float32_le( float32, float32 STATUS_PARAM );
     
    301288}
    302289
    303 INLINE int float32_is_infinity(float32 a)
    304 {
    305     return (float32_val(a) & 0x7fffffff) == 0x7f800000;
    306 }
    307 
    308 INLINE int float32_is_neg(float32 a)
    309 {
    310     return float32_val(a) >> 31;
    311 }
    312 
    313 INLINE int float32_is_zero(float32 a)
    314 {
    315     return (float32_val(a) & 0x7fffffff) == 0;
    316 }
    317 
    318290#define float32_zero make_float32(0)
    319 #define float32_one make_float32(0x3f800000)
    320291
    321292/*----------------------------------------------------------------------------
     
    349320float64 float64_rem( float64, float64 STATUS_PARAM );
    350321float64 float64_sqrt( float64 STATUS_PARAM );
    351 float64 float64_log2( float64 STATUS_PARAM );
    352322int float64_eq( float64, float64 STATUS_PARAM );
    353323int float64_le( float64, float64 STATUS_PARAM );
     
    372342}
    373343
    374 INLINE int float64_is_infinity(float64 a)
    375 {
    376     return (float64_val(a) & 0x7fffffffffffffffLL ) == 0x7ff0000000000000LL;
    377 }
    378 
    379 INLINE int float64_is_neg(float64 a)
    380 {
    381     return float64_val(a) >> 63;
    382 }
    383 
    384 INLINE int float64_is_zero(float64 a)
    385 {
    386     return (float64_val(a) & 0x7fffffffffffffffLL) == 0;
    387 }
    388 
    389344#define float64_zero make_float64(0)
    390 #define float64_one make_float64(0x3ff0000000000000LL)
    391345
    392346#ifdef FLOATX80
     
    435389    a.high ^= 0x8000;
    436390    return a;
    437 }
    438 
    439 INLINE int floatx80_is_infinity(floatx80 a)
    440 {
    441     return (a.high & 0x7fff) == 0x7fff && a.low == 0;
    442 }
    443 
    444 INLINE int floatx80_is_neg(floatx80 a)
    445 {
    446     return a.high >> 15;
    447 }
    448 
    449 INLINE int floatx80_is_zero(floatx80 a)
    450 {
    451     return (a.high & 0x7fff) == 0 && a.low == 0;
    452391}
    453392
     
    503442}
    504443
    505 INLINE int float128_is_infinity(float128 a)
    506 {
    507     return (a.high & 0x7fffffffffffffffLL) == 0x7fff000000000000LL && a.low == 0;
    508 }
    509 
    510 INLINE int float128_is_neg(float128 a)
    511 {
    512     return a.high >> 63;
    513 }
    514 
    515 INLINE int float128_is_zero(float128 a)
    516 {
    517     return (a.high & 0x7fffffffffffffffLL) == 0 && a.low == 0;
    518 }
    519 
    520444#endif
    521445
  • trunk/src/recompiler/hostregs_helper.h

    r36125 r36140  
    2828 */
    2929
    30 /* The GCC global register variable extension is used to reserve some
     30/* The GCC global register vairable extension is used to reserve some
    3131   host registers for use by dyngen.  However only the core parts of the
    3232   translation engine are compiled with these settings.  We must manually
  • trunk/src/recompiler/osdep.h

    r36125 r36140  
    22#define QEMU_OSDEP_H
    33
    4 #ifdef VBOX
     4#ifdef VBOX /** @todo clean up this, it's not fully synched. */
    55
    66#include <iprt/alloc.h>
     
    8181
    8282#ifndef likely
    83 #ifndef VBOX
    8483#if __GNUC__ < 3
    8584#define __builtin_expect(x, n) (x)
     
    8887#define likely(x)   __builtin_expect(!!(x), 1)
    8988#define unlikely(x)   __builtin_expect(!!(x), 0)
    90 #else /* VBOX */
    91 #define likely(cond)        RT_LIKELY(cond)
    92 #define unlikely(cond)      RT_UNLIKELY(cond)
    93 #endif
    9489#endif /* !likely */
    9590
     
    131126#endif
    132127
     128#ifndef VBOX
     129#define qemu_printf printf
     130#endif
     131
    133132#if defined (__GNUC__) && defined (__GNUC_MINOR_)
    134133# define QEMU_GNUC_PREREQ(maj, min) \
  • trunk/src/recompiler/qemu-lock.h

    r36125 r36140  
    3030   pthread mutexes, and non-NPTL userspace isn't threadsafe anyway.
    3131   In either case a spinlock is probably the wrong kind of lock.
    32    Spinlocks are only good if you know another CPU has the lock and is
     32   Spinlocks are only good if you know annother CPU has the lock and is
    3333   likely to release it soon.  In environments where you have more threads
    3434   than physical CPUs (the extreme case being a single CPU host) a spinlock
  • trunk/src/recompiler/softmmu_exec.h

    r17040 r36140  
    11/* Common softmmu definitions and inline routines.  */
    22
    3 #define ldul_user ldl_user
    4 #define ldul_kernel ldl_kernel
     3#define ldul_user       ldl_user
     4#define ldul_kernel     ldl_kernel
     5#define ldul_hypv       ldl_hypv
     6#define ldul_executive  ldl_executive
     7#define ldul_supervisor ldl_supervisor
     8
     9#include "softmmu_defs.h"
    510
    611#define ACCESS_TYPE 0
    7 #define MEMSUFFIX _kernel
     12#define MEMSUFFIX MMU_MODE0_SUFFIX
    813#define DATA_SIZE 1
    914#include "softmmu_header.h"
     
    2126
    2227#define ACCESS_TYPE 1
    23 #define MEMSUFFIX _user
     28#define MEMSUFFIX MMU_MODE1_SUFFIX
    2429#define DATA_SIZE 1
    2530#include "softmmu_header.h"
     
    3641#undef MEMSUFFIX
    3742
     43#if (NB_MMU_MODES >= 3)
     44
     45#define ACCESS_TYPE 2
     46#define MEMSUFFIX MMU_MODE2_SUFFIX
     47#define DATA_SIZE 1
     48#include "softmmu_header.h"
     49
     50#define DATA_SIZE 2
     51#include "softmmu_header.h"
     52
     53#define DATA_SIZE 4
     54#include "softmmu_header.h"
     55
     56#define DATA_SIZE 8
     57#include "softmmu_header.h"
     58#undef ACCESS_TYPE
     59#undef MEMSUFFIX
     60
     61#if (NB_MMU_MODES >= 4)
     62
     63#define ACCESS_TYPE 3
     64#define MEMSUFFIX MMU_MODE3_SUFFIX
     65#define DATA_SIZE 1
     66#include "softmmu_header.h"
     67
     68#define DATA_SIZE 2
     69#include "softmmu_header.h"
     70
     71#define DATA_SIZE 4
     72#include "softmmu_header.h"
     73
     74#define DATA_SIZE 8
     75#include "softmmu_header.h"
     76#undef ACCESS_TYPE
     77#undef MEMSUFFIX
     78
     79#if (NB_MMU_MODES > 4)
     80#error "NB_MMU_MODES > 4 is not supported for now"
     81#endif /* (NB_MMU_MODES > 4) */
     82#endif /* (NB_MMU_MODES == 4) */
     83#endif /* (NB_MMU_MODES >= 3) */
     84
    3885/* these access are slower, they must be as rare as possible */
    39 #define ACCESS_TYPE 2
     86#define ACCESS_TYPE (NB_MMU_MODES)
    4087#define MEMSUFFIX _data
    4188#define DATA_SIZE 1
  • trunk/src/recompiler/softmmu_header.h

    r36125 r36140  
    222222                  : "%eax", "%ecx", "%edx", "memory", "cc");
    223223}
     224
    224225#else
    225226
     
    228229static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
    229230{
    230 
    231231    int page_index;
    232232    RES_TYPE res;
     
    273273
    274274/* generic store macro */
     275
    275276static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
    276277{
  • trunk/src/recompiler/target-i386/cpu.h

    r36125 r36140  
    100100#define DESC_P_MASK     (1 << 15)
    101101#define DESC_DPL_SHIFT  13
     102#define DESC_DPL_MASK   (1 << DESC_DPL_SHIFT)
    102103#define DESC_S_MASK     (1 << 12)
    103104#define DESC_TYPE_SHIFT 8
     
    114115
    115116/* eflags masks */
    116 #define CC_C    0x0001
    117 #define CC_P    0x0004
     117#define CC_C    0x0001
     118#define CC_P    0x0004
    118119#define CC_A    0x0010
    119120#define CC_Z    0x0040
     
    125126#define VM_SHIFT   17
    126127
    127 #define TF_MASK                 0x00000100
    128 #define IF_MASK                 0x00000200
    129 #define DF_MASK                 0x00000400
     128#define TF_MASK                 0x00000100
     129#define IF_MASK                 0x00000200
     130#define DF_MASK                 0x00000400
    130131#define IOPL_MASK               0x00003000
    131 #define NT_MASK                 0x00004000
     132#define NT_MASK                 0x00004000
    132133#define RF_MASK                 0x00010000
    133134#define VM_MASK                 0x00020000
     
    138139
    139140/* hidden flags - used internally by qemu to represent additional cpu
    140    states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not redundant. We avoid
    141    using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring
    142    with eflags. */
     141   states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not
     142   redundant. We avoid using the IOPL_MASK, TF_MASK and VM_MASK bit
     143   position to ease oring with eflags. */
    143144/* current cpl */
    144145#define HF_CPL_SHIFT         0
     
    163164#define HF_OSFXSR_SHIFT     16 /* CR4.OSFXSR */
    164165#define HF_VM_SHIFT         17 /* must be same as eflags */
    165 #define HF_HALTED_SHIFT     18 /* CPU halted */
    166166#define HF_SMM_SHIFT        19 /* CPU in SMM mode */
    167167#define HF_SVME_SHIFT       20 /* SVME enabled (copy of EFER.SVME) */
     
    182182#define HF_CS64_MASK         (1 << HF_CS64_SHIFT)
    183183#define HF_OSFXSR_MASK       (1 << HF_OSFXSR_SHIFT)
    184 #define HF_HALTED_MASK       (1 << HF_HALTED_SHIFT)
    185184#define HF_SMM_MASK          (1 << HF_SMM_SHIFT)
    186185#define HF_SVME_MASK         (1 << HF_SVME_SHIFT)
     
    255254#define MSR_IA32_APICBASE_BASE          (0xfffff<<12)
    256255
    257 #ifndef MSR_IA32_SYSENTER_CS /* VBox x86.h kludge */
    258 #define MSR_IA32_SYSENTER_CS            0x174
    259 #define MSR_IA32_SYSENTER_ESP           0x175
    260 #define MSR_IA32_SYSENTER_EIP           0x176
    261 #endif
    262 
    263256#define MSR_IA32_SYSENTER_CS            0x174
    264257#define MSR_IA32_SYSENTER_ESP           0x175
     
    283276
    284277#ifdef VBOX
    285 #define MSR_APIC_RANGE_START            0x800
    286 #define MSR_APIC_RANGE_END              0x900
     278# define MSR_APIC_RANGE_START           0x800
     279# define MSR_APIC_RANGE_END             0x900
    287280#endif
    288281
     
    315308#define CPUID_PAT  (1 << 16)
    316309#define CPUID_PSE36   (1 << 17)
     310#define CPUID_PN   (1 << 18)
    317311#define CPUID_CLFLUSH (1 << 19)
    318312#define CPUID_DTS (1 << 21)
     
    322316#define CPUID_SSE  (1 << 25)
    323317#define CPUID_SSE2 (1 << 26)
    324 #define CPUID_SS   (1 << 27)
    325 #define CPUID_HT   (1 << 28)
    326 #define CPUID_TM   (1 << 29)
     318#define CPUID_SS (1 << 27)
     319#define CPUID_HT (1 << 28)
     320#define CPUID_TM (1 << 29)
    327321#define CPUID_IA64 (1 << 30)
    328 #define CPUID_PBE  (1 << 31)
     322#define CPUID_PBE (1 << 31)
    329323
    330324#define CPUID_EXT_SSE3     (1 << 0)
     
    409403enum {
    410404    CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
    411     CC_OP_EFLAGS,  /* all cc are explicitely computed, CC_SRC = flags */
     405    CC_OP_EFLAGS,  /* all cc are explicitly computed, CC_SRC = flags */
    412406
    413407    CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
     
    461455    CC_OP_SARQ,
    462456
    463     CC_OP_NB
     457    CC_OP_NB,
    464458};
    465459
     
    538532
    539533typedef struct CPUX86State {
    540   /* standard registers */
     534    /* standard registers */
    541535    target_ulong regs[CPU_NB_REGS];
    542536    target_ulong eip;
     
    550544    uint32_t cc_op;
    551545    int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
    552     uint32_t hflags;  /* TB flags, see HF_xxx constants. These flags
    553                          are known at translation time. */
     546    uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
     547                        are known at translation time. */
    554548    uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
    555549
     
    601595    uint32_t alignment0;
    602596#endif
    603     uint64_t sysenter_esp;
    604     uint64_t sysenter_eip;
     597    target_ulong sysenter_esp;
     598    target_ulong sysenter_eip;
    605599    uint64_t efer;
    606600    uint64_t star;
     
    675669       user */
    676670    struct APICState *apic_state;
    677 #else
     671#else  /* VBOX */
    678672    uint32_t alignment2[3];
    679673    /** Profiling tb_flush. */
    680674    STAMPROFILE StatTbFlush;
    681 #endif
     675#endif /* VBOX */
    682676} CPUX86State;
    683677
     
    694688} SegmentCache_Ver16;
    695689
    696 #define CPU_NB_REGS_VER16 8
     690# define CPU_NB_REGS_VER16 8
    697691
    698692/* Version 1.6 structure; just for loading the old saved state */
    699693typedef struct CPUX86State_Ver16 {
    700 #if TARGET_LONG_BITS > HOST_LONG_BITS
     694# if TARGET_LONG_BITS > HOST_LONG_BITS
    701695    /* temporaries if we cannot store them in host registers */
    702696    uint32_t t0, t1, t2;
    703 #endif
     697# endif
    704698
    705699    /* standard registers */
     
    733727    uint8_t fptags[8];   /* 0 = valid, 1 = empty */
    734728    union {
    735 #ifdef USE_X86LDOUBLE
     729# ifdef USE_X86LDOUBLE
    736730        CPU86_LDouble d __attribute__((aligned(16)));
    737 #else
     731# else
    738732        CPU86_LDouble d;
    739 #endif
     733# endif
    740734        MMXReg mmx;
    741735    } fpregs[8];
     
    743737    /* emulator internal variables */
    744738    float_status fp_status;
    745 #ifdef VBOX
     739# ifdef VBOX
    746740    uint32_t alignment3[3]; /* force the long double to start a 16 byte line. */
    747 #endif
     741# endif
    748742    CPU86_LDouble ft0;
    749 #if defined(VBOX) && defined(RT_ARCH_X86) && !defined(RT_OS_DARWIN)
     743# if defined(VBOX) && defined(RT_ARCH_X86) && !defined(RT_OS_DARWIN)
    750744    uint32_t alignment4; /* long double is 12 byte, pad it to 16. */
    751 #endif
     745# endif
    752746    union {
    753747        float f;
     
    767761    uint32_t sysenter_esp;
    768762    uint32_t sysenter_eip;
    769 #ifdef VBOX
     763# ifdef VBOX
    770764    uint32_t alignment0;
    771 #endif
     765# endif
    772766    uint64_t efer;
    773767    uint64_t star;
     
    776770
    777771    /* temporary data for USE_CODE_COPY mode */
    778 #ifdef USE_CODE_COPY
     772# ifdef USE_CODE_COPY
    779773    uint32_t tmp0;
    780774    uint32_t saved_esp;
    781775    int native_fp_regs; /* if true, the FPU state is in the native CPU regs */
    782 #endif
     776# endif
    783777
    784778    /* exception/interrupt handling */
     
    788782/** CPUX86State state flags
    789783 * @{ */
    790 #define CPU_RAW_RING0            0x0002 /* Set after first time RawR0 is executed, never cleared. */
    791 #define CPU_EMULATE_SINGLE_INSTR 0x0040 /* Execute a single instruction in emulation mode */
    792 #define CPU_EMULATE_SINGLE_STEP  0x0080 /* go into single step mode */
    793 #define CPU_RAW_HWACC            0x0100 /* Set after first time HWACC is executed, never cleared. */
     784# define CPU_RAW_RING0            0x0002 /* Set after first time RawR0 is executed, never cleared. */
     785# define CPU_EMULATE_SINGLE_INSTR 0x0040 /* Execute a single instruction in emulation mode */
     786# define CPU_EMULATE_SINGLE_STEP  0x0080 /* go into single step mode */
     787# define CPU_RAW_HWACC            0x0100 /* Set after first time HWACC is executed, never cleared. */
    794788/** @} */
    795789#endif /* !VBOX */
     
    887881   they can trigger unexpected exceptions */
    888882void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
    889 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32);
    890 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32);
     883void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
     884void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
    891885
    892886/* you can call this signal handler from your SIGBUS and SIGSEGV
     
    943937void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr);
    944938void save_raw_fp_state(CPUX86State *env, uint8_t *ptr);
    945 
    946 #endif
     939#endif /* VBOX */
    947940
    948941#define TARGET_PAGE_BITS 12
  • trunk/src/recompiler/target-i386/exec.h

    r36125 r36140  
    4040#include "cpu-defs.h"
    4141
    42 /* at least 4 register variables are defined */
    4342register struct CPUX86State *env asm(AREG0);
    4443
    4544#include "qemu-log.h"
    4645
    47 #ifndef reg_EAX
    4846#define EAX (env->regs[R_EAX])
    49 #endif
    50 #ifndef reg_ECX
    5147#define ECX (env->regs[R_ECX])
    52 #endif
    53 #ifndef reg_EDX
    5448#define EDX (env->regs[R_EDX])
    55 #endif
    56 #ifndef reg_EBX
    5749#define EBX (env->regs[R_EBX])
    58 #endif
    59 #ifndef reg_ESP
    6050#define ESP (env->regs[R_ESP])
    61 #endif
    62 #ifndef reg_EBP
    6351#define EBP (env->regs[R_EBP])
    64 #endif
    65 #ifndef reg_ESI
    6652#define ESI (env->regs[R_ESI])
    67 #endif
    68 #ifndef reg_EDI
    6953#define EDI (env->regs[R_EDI])
    70 #endif
    71 #define EIP  (env->eip)
     54#define EIP (env->eip)
    7255#define DF  (env->df)
    7356
     
    127110}
    128111
    129 void check_iob_T0(void);
    130 void check_iow_T0(void);
    131 void check_iol_T0(void);
    132 void check_iob_DX(void);
    133 void check_iow_DX(void);
    134 void check_iol_DX(void);
    135 
    136112#if !defined(CONFIG_USER_ONLY)
    137113
    138114#include "softmmu_exec.h"
    139 
    140 static inline double ldfq(target_ulong ptr)
    141 {
    142     union {
    143         double d;
    144         uint64_t i;
    145     } u;
    146     u.i = ldq(ptr);
    147     return u.d;
    148 }
    149 
    150 static inline void stfq(target_ulong ptr, double v)
    151 {
    152     union {
    153         double d;
    154         uint64_t i;
    155     } u;
    156     u.d = v;
    157     stq(ptr, u.i);
    158 }
    159 
    160 static inline float ldfl(target_ulong ptr)
    161 {
    162     union {
    163         float f;
    164         uint32_t i;
    165     } u;
    166     u.i = ldl(ptr);
    167     return u.f;
    168 }
    169 
    170 static inline void stfl(target_ulong ptr, float v)
    171 {
    172     union {
    173         float f;
    174         uint32_t i;
    175     } u;
    176     u.f = v;
    177     stl(ptr, u.i);
    178 }
    179115
    180116#endif /* !defined(CONFIG_USER_ONLY) */
     
    252188
    253189#define RC_MASK         0xc00
    254 #ifndef RC_NEAR
    255190#define RC_NEAR         0x000
    256 #endif
    257 #ifndef RC_DOWN
    258191#define RC_DOWN         0x400
    259 #endif
    260 #ifndef RC_UP
    261192#define RC_UP           0x800
    262 #endif
    263 #ifndef RC_CHOP
    264193#define RC_CHOP         0xc00
    265 #endif
    266194
    267195#define MAXTAN 9223372036854775808.0
     
    328256static inline void fpop(void)
    329257{
    330     env->fptags[env->fpstt] = 1; /* invalidate stack entry */
     258    env->fptags[env->fpstt] = 1; /* invvalidate stack entry */
    331259    env->fpstt = (env->fpstt + 1) & 7;
    332260}
     
    369297#else
    370298
    371 /* XXX: same endianness assumed */
    372 
    373 #ifdef CONFIG_USER_ONLY
    374 
    375 static inline CPU86_LDouble helper_fldt(target_ulong ptr)
    376 {
    377     return *(CPU86_LDouble *)ptr;
    378 }
    379 
    380 static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr)
    381 {
    382     *(CPU86_LDouble *)ptr = f;
    383 }
    384 
    385 #else
    386 
    387299/* we use memory access macros */
    388300
     
    404316    stw(ptr + 8, temp.l.upper);
    405317}
    406 
    407 #endif /* !CONFIG_USER_ONLY */
    408318
    409319#endif /* USE_X86LDOUBLE */
     
    442352    DF = 1 - (2 * ((eflags >> 10) & 1));
    443353    env->eflags = (env->eflags & ~update_mask) |
    444         (eflags & update_mask);
     354        (eflags & update_mask) | 0x2;
    445355}
    446356
  • trunk/src/recompiler/target-i386/helper.c

    r33656 r36140  
    3333#include <string.h>
    3434#ifndef VBOX
    35 #include <inttypes.h>
    36 #include <signal.h>
    37 #include <assert.h>
     35# include <inttypes.h>
     36# include <signal.h>
     37# include <assert.h>
    3838#endif
    3939
     
    105105}
    106106#endif /* !VBOX */
     107
    107108#ifndef VBOX
    108109CPUX86State *cpu_x86_init(const char *cpu_model)
    109 {
     110#else
     111CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model)
     112#endif
     113{
     114#ifndef VBOX
    110115    CPUX86State *env;
     116#endif
    111117    static int inited;
    112118
     119#ifndef VBOX
    113120    env = qemu_mallocz(sizeof(CPUX86State));
    114121    if (!env)
    115122        return NULL;
    116 #else
    117 CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model)
    118 {
    119     static int inited;
    120123#endif
    121124    cpu_exec_init(env);
     
    302305            /* Some CPUs got no CPUID_SEP */
    303306        .ext_features = CPUID_EXT_MONITOR |
    304             CPUID_EXT_SSE3 /* PNI */,
     307            CPUID_EXT_SSE3 /* PNI */, CPUID_EXT_SSSE3,
    305308            /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
    306309             * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
     
    457460        }
    458461    }
    459 #endif // !VBOX
     462#endif /* !VBOX */
    460463    return 0;
    461464}
     
    854857    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
    855858        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
    856 
    857859#ifdef VBOX
     860
    858861    remR3ChangeCpuMode(env);
    859862#endif
  • trunk/src/recompiler/target-i386/helper.h

    r17040 r36140  
    143143DEF_HELPER(int32_t, helper_fisttl_ST0, (void))
    144144DEF_HELPER(int64_t, helper_fisttll_ST0, (void))
    145 #else
     145#else  /* VBOX */
    146146DEF_HELPER(RTCCUINTREG, helper_fsts_ST0, (void))
    147147DEF_HELPER(uint64_t, helper_fstl_ST0, (void))
     
    152152DEF_HELPER(RTCCINTREG, helper_fisttl_ST0, (void))
    153153DEF_HELPER(int64_t, helper_fisttll_ST0, (void))
    154 #endif
     154#endif /* VBOX */
    155155DEF_HELPER(void, helper_fldt_ST0, (target_ulong ptr))
    156156DEF_HELPER(void, helper_fstt_ST0, (target_ulong ptr))
     
    195195DEF_HELPER(uint32_t, helper_fnstsw, (void))
    196196DEF_HELPER(uint32_t, helper_fnstcw, (void))
    197 #else
     197#else  /* VBOX */
    198198DEF_HELPER(RTCCUINTREG, helper_fnstsw, (void))
    199199DEF_HELPER(RTCCUINTREG, helper_fnstcw, (void))
    200 #endif
     200#endif /* VBOX */
    201201DEF_HELPER(void, helper_fldcw, (uint32_t val))
    202202DEF_HELPER(void, helper_fclex, (void))
     
    257257void sync_seg(CPUX86State *env1, int seg_reg, int selector);
    258258void sync_ldtr(CPUX86State *env1, int selector);
    259 
    260 #endif
     259#endif /* VBOX */
    261260
    262261#undef DEF_HELPER
  • trunk/src/recompiler/target-i386/op_helper.c

    r36125 r36140  
    3333
    3434#ifdef VBOX
    35 #include "qemu-common.h"
    36 #include <math.h>
    37 #include "tcg.h"
    38 #endif
     35# include "qemu-common.h"
     36# include <math.h>
     37# include "tcg.h"
     38#endif /* VBOX */
    3939//#define DEBUG_PCALL
    4040
     
    139139
    140140#ifdef VBOX
     141
    141142void helper_write_eflags_vme(target_ulong t0)
    142143{
     
    190191            (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
    191192}
    192 #endif
     193
     194#endif /* VBOX */
    193195
    194196/* return non zero if error */
     
    207209        selector = selector & 0xfffc;
    208210    }
    209 #endif
     211#endif /* VBOX */
    210212
    211213    if (selector & 0x4)
     
    254256    cpu_x86_load_seg_cache(env, seg, selector,
    255257                           (selector << 4), 0xffff, flags);
    256 #else
     258#else  /* VBOX */
    257259    cpu_x86_load_seg_cache(env, seg, selector,
    258260                           (selector << 4), 0xffff, 0);
    259 #endif
     261#endif /* VBOX */
    260262}
    261263
     
    314316        selector = selector & 0xfffc;
    315317    }
    316 #endif
     318#endif /* VBOX */
    317319
    318320    if ((selector & 0xfffc) != 0) {
     
    358360            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
    359361#ifdef VBOX
    360 #if 0
     362# if 0
    361363        /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
    362364        cpu_x86_load_seg_cache(env, seg_reg, selector,
    363365                               0, 0, 0);
    364 #endif
    365 #endif
     366# endif
     367#endif /* VBOX */
    366368    }
    367369}
     
    637639    unsigned int io_offset;
    638640#endif /* VBOX */
     641
    639642    /* TSS must be a valid 32 bit one */
    640643    if (!(env->tr.flags & DESC_P_MASK) ||
     
    678681        sync_seg(env, reg, env->segs[reg].newselector);
    679682}
    680 #endif
     683#endif /* VBOX */
    681684
    682685void helper_check_iob(uint32_t t0)
     
    12711274#ifndef VBOX
    12721275    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
    1273 #else
     1276#else  /* VBOX */
    12741277    /*
    12751278     * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
     
    12781281     */
    12791282    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
    1280 #endif
     1283#endif /* VBOX */
    12811284}
    12821285#endif
     
    15431546                  target_ulong next_eip, int is_hw)
    15441547{
     1548#ifdef VBOX
    15451549    if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
    15461550        if (is_int) {
     
    15521556        }
    15531557    }
     1558#endif
    15541559
    15551560    if (loglevel & CPU_LOG_INT) {
     
    26482653    selector &= 0xffff;
    26492654    cpl = env->hflags & HF_CPL_MASK;
    2650 
    26512655#ifdef VBOX
     2656
    26522657    /* Trying to load a selector with CPL=1? */
    26532658    if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
     
    26562661        selector = selector & 0xfffc;
    26572662    }
    2658 #endif
     2663#endif /* VBOX */
    26592664    if ((selector & 0xfffc) == 0) {
    26602665        /* null selector case */
     
    36653670        ECX = 0;
    36663671}
    3667 #endif
     3672#endif /* VBOX */
    36683673
    36693674void helper_rdpmc(void)
     
    36773682    EAX = 0;
    36783683    EDX = 0;
    3679 #else
     3684#else  /* !VBOX */
    36803685    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
    36813686        raise_exception(EXCP0D_GPF);
     
    36853690    /* currently unimplemented */
    36863691    raise_exception_err(EXCP06_ILLOP, 0);
    3687 #endif
     3692#endif /* !VBOX */
    36883693}
    36893694
     
    38183823        val = 1000ULL;
    38193824        /* CPU multiplier */
    3820         val |= ((uint64_t)4ULL << 40);
    3821         break;
    3822 #endif
     3825        val |= (((uint64_t)4ULL) << 40);
     3826        break;
     3827#endif /* !VBOX */
    38233828#ifdef TARGET_X86_64
    38243829    case MSR_LSTAR:
     
    38603865            val = 0;
    38613866        }
    3862 #endif
     3867#endif /* VBOX */
    38633868        break;
    38643869    }
     
    41484153    return u.i;
    41494154}
     4155
    41504156#ifndef VBOX
    41514157int32_t helper_fist_ST0(void)
     
    53185324    if ((uint32_t)ECX > 1)
    53195325        raise_exception(EXCP0D_GPF);
    5320 #else
     5326#else  /* !VBOX */
    53215327    if ((uint32_t)ECX != 0)
    53225328        raise_exception(EXCP0D_GPF);
    5323 #endif
     5329#endif /* !VBOX */
    53245330    /* XXX: store address ? */
    53255331    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
     
    53325338#ifdef VBOX
    53335339    helper_hlt(next_eip_addend);
    5334 #else
     5340#else /* !VBOX */
    53355341    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
    53365342    EIP += next_eip_addend;
     
    53435349        do_hlt();
    53445350    }
    5345 #endif
     5351#endif /* !VBOX */
    53465352}
    53475353
     
    53865392    env->eflags |= VIF_MASK;
    53875393}
    5388 #endif
     5394#endif /* VBOX */
    53895395
    53905396#if 0
     
    55125518    remR3PhysWriteU64(addr, val);
    55135519}
    5514 #endif
     5520#endif /* VBOX */
    55155521
    55165522/* try to fill the TLB and return an exception if error. If retaddr is
     
    64286434#ifndef VBOX
    64296435    switch(type) {
    6430 #ifndef VBOX
    64316436    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
    6432 #else
    6433     case SVM_EXIT_READ_CR0:     case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
    6434     case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
    6435     case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
    6436 #endif
    64376437        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
    64386438            helper_vmexit(type, param);
    64396439        }
    64406440        break;
    6441 #ifndef VBOX
    64426441    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
    6443 #else
    6444     case SVM_EXIT_WRITE_CR0:     case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
    6445     case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
    6446     case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
    6447 #endif
    64486442        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
    64496443            helper_vmexit(type, param);
     
    65016495        break;
    65026496    }
    6503 #else
     6497#else  /* VBOX */
    65046498     AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
    6505 #endif
     6499#endif /* VBOX */
    65066500}
    65076501
     
    67466740}
    67476741
    6748 #ifndef VBOX
    67496742CCTable cc_table[CC_OP_NB] = {
    67506743    [CC_OP_DYNAMIC] = { /* should never happen */ },
     
    68146807#endif
    68156808};
    6816 #else /* VBOX */
    6817 /* Sync carefully with cpu.h */
    6818 CCTable cc_table[CC_OP_NB] = {
    6819     /* CC_OP_DYNAMIC */ { 0, 0 },
    6820 
    6821     /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
    6822 
    6823     /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
    6824     /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
    6825     /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
    6826 #ifdef TARGET_X86_64
    6827     /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
    6828 #else
    6829     /* CC_OP_MULQ */ { 0, 0 },
    6830 #endif
    6831 
    6832     /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
    6833     /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw  },
    6834     /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl  },
    6835 #ifdef TARGET_X86_64
    6836     /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq  },
    6837 #else
    6838     /* CC_OP_ADDQ */ { 0, 0 },
    6839 #endif
    6840 
    6841     /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
    6842     /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw  },
    6843     /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl  },
    6844 #ifdef TARGET_X86_64
    6845     /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
    6846 #else
    6847     /* CC_OP_ADCQ */ { 0, 0 },
    6848 #endif
    6849 
    6850     /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb  },
    6851     /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw  },
    6852     /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl  },
    6853 #ifdef TARGET_X86_64
    6854     /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq  },
    6855 #else
    6856     /* CC_OP_SUBQ */ { 0, 0 },
    6857 #endif
    6858 
    6859     /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb  },
    6860     /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw  },
    6861     /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl  },
    6862 #ifdef TARGET_X86_64
    6863     /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq  },
    6864 #else
    6865     /* CC_OP_SBBQ */ { 0, 0 },
    6866 #endif
    6867 
    6868     /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
    6869     /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
    6870     /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
    6871 #ifdef TARGET_X86_64
    6872     /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq  },
    6873 #else
    6874     /* CC_OP_LOGICQ */ { 0, 0 },
    6875 #endif
    6876 
    6877     /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
    6878     /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
    6879     /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
    6880 #ifdef TARGET_X86_64
    6881     /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl  },
    6882 #else
    6883     /* CC_OP_INCQ */ { 0, 0 },
    6884 #endif
    6885 
    6886     /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
    6887     /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
    6888     /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
    6889 #ifdef TARGET_X86_64
    6890     /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl  },
    6891 #else
    6892     /* CC_OP_DECQ */ { 0, 0 },
    6893 #endif
    6894 
    6895     /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
    6896     /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
    6897     /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
    6898 #ifdef TARGET_X86_64
    6899     /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq  },
    6900 #else
    6901     /* CC_OP_SHLQ */ { 0, 0 },
    6902 #endif
    6903 
    6904     /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
    6905     /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
    6906     /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
    6907 #ifdef TARGET_X86_64
    6908     /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
    6909 #else
    6910     /* CC_OP_SARQ */ { 0, 0 },
    6911 #endif
    6912 };
    6913 #endif /* VBOX */
     6809
  • trunk/src/recompiler/target-i386/opreg_template.h

    r33656 r36140  
    2929 */
    3030
     31#error "VBOX: obsolete file?"
     32
    3133void OPPROTO glue(op_movl_A0,REGNAME)(void)
    3234{
  • trunk/src/recompiler/target-i386/ops_mem.h

    r1 r36140  
     1#error "VBOX: obsolete file?"
    12void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T0_A0)(void)
    23{
  • trunk/src/recompiler/target-i386/ops_sse.h

    r36125 r36140  
    3131#if SHIFT == 0
    3232#define Reg MMXReg
    33 #ifndef VBOX
    3433#define XMM_ONLY(x...)
    35 #else
    36 #define XMM_ONLY(x)
    37 #endif
    3834#define B(n) MMX_B(n)
    3935#define W(n) MMX_W(n)
     
    4339#else
    4440#define Reg XMMReg
    45 #ifndef VBOX
    4641#define XMM_ONLY(x...) x
    47 #else
    48 #define XMM_ONLY(x) x
    49 #endif
    5042#define B(n) XMM_B(n)
    5143#define W(n) XMM_W(n)
     
    7769#endif
    7870    }
     71    FORCE_RET();
    7972}
    8073
     
    122115#endif
    123116    }
     117    FORCE_RET();
    124118}
    125119
     
    142136#endif
    143137    }
     138    FORCE_RET();
    144139}
    145140
     
    179174#endif
    180175    }
     176    FORCE_RET();
    181177}
    182178
     
    197193#endif
    198194    }
     195    FORCE_RET();
    199196}
    200197
     
    215212#endif
    216213    }
     214    FORCE_RET();
    217215}
    218216
     
    229227    for(i = 16 - shift; i < 16; i++)
    230228        d->B(i) = 0;
     229    FORCE_RET();
    231230}
    232231
     
    242241    for(i = 0; i < shift; i++)
    243242        d->B(i) = 0;
     243    FORCE_RET();
    244244}
    245245#endif
     
    443443            (int16_t)s->W(2*i+1) * (int16_t)d->W(2*i+1);
    444444    }
     445    FORCE_RET();
    445446}
    446447
     
    489490            stb(a0 + i, d->B(i));
    490491    }
     492    FORCE_RET();
    491493}
    492494
     
    926928    ret = float32_compare_quiet(s0, s1, &env->sse_status);
    927929    CC_SRC = comis_eflags[ret + 1];
     930    FORCE_RET();
    928931}
    929932
     
    937940    ret = float32_compare(s0, s1, &env->sse_status);
    938941    CC_SRC = comis_eflags[ret + 1];
     942    FORCE_RET();
    939943}
    940944
     
    948952    ret = float64_compare_quiet(d0, d1, &env->sse_status);
    949953    CC_SRC = comis_eflags[ret + 1];
     954    FORCE_RET();
    950955}
    951956
     
    959964    ret = float64_compare(d0, d1, &env->sse_status);
    960965    CC_SRC = comis_eflags[ret + 1];
     966    FORCE_RET();
    961967}
    962968
     
    15041510    d->elem(0) = F(0);\
    15051511    d->elem(1) = F(1);\
    1506     if (num > 2) {\
    1507         d->elem(2) = F(2);\
    1508         d->elem(3) = F(3);\
    1509         if (num > 4) {\
    1510             d->elem(4) = F(4);\
    1511             d->elem(5) = F(5);\
     1512    d->elem(2) = F(2);\
     1513    d->elem(3) = F(3);\
     1514    if (num > 3) {\
     1515        d->elem(4) = F(4);\
     1516        d->elem(5) = F(5);\
     1517        if (num > 5) {\
    15121518            d->elem(6) = F(6);\
    15131519            d->elem(7) = F(7);\
  • trunk/src/recompiler/target-i386/ops_template.h

    r33656 r36140  
    2929 */
    3030
     31#error "VBOX: obsolete file?"
    3132#define DATA_BITS (1 << (3 + SHIFT))
    3233#define SHIFT_MASK (DATA_BITS - 1)
  • trunk/src/recompiler/target-i386/ops_template_mem.h

    r33656 r36140  
    2929 */
    3030
     31#error "VBOX: Obsolete file?"
    3132#ifdef MEM_WRITE
    3233
  • trunk/src/recompiler/target-i386/translate.c

    r36125 r36140  
    3333#include <string.h>
    3434#ifndef VBOX
    35 #include <inttypes.h>
    36 #include <signal.h>
    37 #include <assert.h>
     35# include <inttypes.h>
     36# include <signal.h>
     37# include <assert.h>
    3838#endif /* !VBOX */
    3939
     
    5252#ifdef TARGET_X86_64
    5353#define X86_64_ONLY(x) x
    54 #ifndef VBOX
    5554#define X86_64_DEF(x...) x
    56 #else
    57 #define X86_64_DEF(x...) x
    58 #endif
    5955#define CODE64(s) ((s)->code64)
    6056#define REX_X(s) ((s)->rex_x)
     
    6662#else
    6763#define X86_64_ONLY(x) NULL
    68 #ifndef VBOX
    6964#define X86_64_DEF(x...)
    70 #else
    71 #define X86_64_DEF(x)
    72 #endif
    7365#define CODE64(s) 0
    7466#define REX_X(s) 0
     
    120112
    121113#endif /* VBOX */
    122 
    123114
    124115typedef struct DisasContext {
     
    170161
    171162#ifdef VBOX
    172 static void gen_check_external_event();
     163static void gen_check_external_event(void);
    173164#endif
    174165
     
    719710
    720711#ifdef VBOX
    721 static void gen_check_external_event()
    722 {
    723 #if 1
     712
     713static void gen_check_external_event(void)
     714{
     715# if 1
    724716    /** @todo: once TCG codegen improves, we may want to use version
    725717        from else version */
    726718    tcg_gen_helper_0_0(helper_check_external_event);
    727 #else
     719# else
    728720    int skip_label;
    729721    TCGv t0;
     
    747739
    748740   gen_set_label(skip_label);
    749 #endif
    750 }
    751 
    752 #if 0 /* unused code? */
     741# endif
     742}
     743
     744# if 0 /* unused code? */
    753745static void gen_check_external_event2()
    754746{
    755747    tcg_gen_helper_0_0(helper_check_external_event);
    756748}
    757 #endif
    758 
    759 #endif
     749# endif
     750
     751#endif /* VBOX */
    760752
    761753static inline void gen_jmp_im(target_ulong pc)
     
    770762    gen_jmp_im(pc);
    771763# ifdef VBOX_DUMP_STATE
    772      tcg_gen_helper_0_0(helper_dump_state);
     764    tcg_gen_helper_0_0(helper_dump_state);
    773765# endif
    774766}
     
    11351127
    11361128/* generate a conditional jump to label 'l1' according to jump opcode
    1137    value 'b'. In the fast case, T0 is guaranteed not to be used. */
     1129   value 'b'. In the fast case, T0 is guaranted not to be used. */
    11381130static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
    11391131{
     
    14321424                                 target_ulong cur_eip, target_ulong next_eip) \
    14331425{                                                                             \
    1434     int l2;                                                                   \
     1426    int l2;\
    14351427    gen_update_cc_op(s);                                                      \
    14361428    l2 = gen_jz_ecx_string(s, next_eip);                                      \
     
    14501442                                   int nz)                                    \
    14511443{                                                                             \
    1452     int l2;                                                                   \
     1444    int l2;\
    14531445    gen_update_cc_op(s);                                                      \
    14541446    l2 = gen_jz_ecx_string(s, next_eip);                                      \
     
    21532145            }
    21542146        }
    2155         /* index == 4 means no index */
    2156         if (havesib && (index != 4)) {
     2147        /* XXX: index == 4 is always invalid */
     2148        if (havesib && (index != 4 || scale != 0)) {
    21572149#ifdef TARGET_X86_64
    21582150            if (s->aflag == 2) {
     
    24012393        (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))  {
    24022394#ifdef VBOX
    2403         gen_check_external_event(s);
     2395        gen_check_external_event();
    24042396#endif /* VBOX */
    24052397        /* jump to same page: we can use a direct jump */
     
    24672459    } else {
    24682460        /* slow case: it is more efficient not to generate a jump,
    2469            although it is questionable whether this optimization is
     2461           although it is questionnable whether this optimization is
    24702462           worth to */
    24712463        inv = b & 1;
     
    28322824
    28332825#ifdef VBOX
    2834     gen_check_external_event(s);
     2826    gen_check_external_event();
    28352827#endif /* VBOX */
    28362828
     
    32933285        case 0x02b: /* movntps */
    32943286        case 0x12b: /* movntps */
     3287        case 0x3f0: /* lddqu */
    32953288            if (mod == 3)
    32963289                goto illegal_op;
    32973290            gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
    32983291            gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
    3299             break;
    3300         case 0x3f0: /* lddqu */
    3301             if (mod == 3)
    3302                 goto illegal_op;
    3303             gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
    3304             gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
    33053292            break;
    33063293        case 0x6e: /* movd mm, ea */
     
    38173804                    case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
    38183805                    case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
    3819                         tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
     3806                        tcg_gen_qemu_ld32u(cpu_tmp2_i32, cpu_A0,
    38203807                                          (s->mem_index >> 2) - 1);
    3821                         tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
    38223808                        tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
    38233809                                        offsetof(XMMReg, XMM_L(0)));
     
    43024288#endif /* VBOX */
    43034289
    4304 
    43054290/* convert one instruction. s->is_jmp is set if the translation must
    43064291   be stopped. Return the next pc value */
     
    43154300    if (unlikely(loglevel & CPU_LOG_TB_OP))
    43164301        tcg_gen_debug_insn_start(pc_start);
    4317 
    43184302    s->pc = pc_start;
    43194303    prefixes = 0;
     
    43364320    gen_update_eip(pc_start - s->cs_base);
    43374321# endif
    4338 #endif
     4322#endif /* VBOX */
    43394323
    43404324 next_byte:
     
    63826366        gen_check_io(s, ot, pc_start - s->cs_base,
    63836367                     svm_is_rep(prefixes));
    6384 #ifdef VBOX /* bird: linux is writing to this port for delaying I/O. */
     6368#ifdef VBOX /* bird: linux is writing to this port for delaying I/O. */ /** @todo this breaks AIX, remove. */
    63856369        if (val == 0x80)
    63866370            break;
     
    73507334        op = (modrm >> 3) & 7;
    73517335        rm = modrm & 7;
    7352 
    73537336#ifdef VBOX
    73547337        /* 0f 01 f9 */
     
    73617344            break;
    73627345        }
    7363 #endif
     7346#endif /* VBOX */
    73647347        switch(op) {
    73657348        case 0: /* sgdt */
     
    76257608        {
    76267609            int label1;
    7627             TCGv t0, t1, t2, a0;
     7610            TCGv t0, t1, t2;
     7611#ifdef VBOX
     7612            TCGv a0;
     7613#endif
    76287614
    76297615            if (!s->pe || s->vm86)
    76307616                goto illegal_op;
    7631 
    76327617            t0 = tcg_temp_local_new(TCG_TYPE_TL);
    76337618            t1 = tcg_temp_local_new(TCG_TYPE_TL);
     
    80358020    dc->vme = !!(env->cr[4] & CR4_VME_MASK);
    80368021    dc->pvi = !!(env->cr[4] & CR4_PVI_MASK);
    8037 #ifdef VBOX_WITH_CALL_RECORD
     8022# ifdef VBOX_WITH_CALL_RECORD
    80388023    if (    !(env->state & CPU_RAW_RING0)
    80398024        &&  (env->cr[0] & CR0_PG_MASK)
     
    80438028    else
    80448029        dc->record_call = 0;
    8045 #endif
     8030# endif
    80468031#endif
    80478032    dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
     
    81388123            break;
    81398124#ifdef VBOX
    8140 #ifdef DEBUG
     8125# ifdef DEBUG
    81418126/*
    81428127        if(cpu_check_code_raw(env, pc_ptr, env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK))) == ERROR_SUCCESS)
     
    81468131        }
    81478132*/
    8148 #endif
     8133# endif /* DEBUG */
    81498134        if (env->state & CPU_EMULATE_SINGLE_INSTR)
    81508135        {
  • trunk/src/recompiler/tcg/README

    r33540 r36140  
    55TCG (Tiny Code Generator) began as a generic backend for a C
    66compiler. It was simplified to be used in QEMU. It also has its roots
    7 in the QOP code generator written by Paul Brook. 
     7in the QOP code generator written by Paul Brook.
    88
    992) Definitions
     
    3131
    3232A TCG "basic block" corresponds to a list of instructions terminated
    33 by a branch instruction. 
     33by a branch instruction.
    3434
    35353) Intermediate representation
     
    9898
    9999   and_i32 t0, t0, $0xffffffff
    100    
     100
    101101  is suppressed.
    102102
     
    285285
    286286t0 = read(t1 + offset)
    287 Load 8, 16, 32 or 64 bits with or without sign extension from host memory. 
     287Load 8, 16, 32 or 64 bits with or without sign extension from host memory.
    288288offset must be a constant.
    289289
     
    386386- The first N parameters are passed in registers.
    387387- The next parameters are passed on the stack by storing them as words.
    388 - Some registers are clobbered during the call. 
     388- Some registers are clobbered during the call.
    389389- The function can return 0 or 1 value in registers. On a 32 bit
    390390  target, functions must be able to return 2 values in registers for
     
    425425
    426426- Don't hesitate to use helpers for complicated or seldom used target
    427   instructions. There is little performance advantage in using TCG to
     427  intructions. There is little performance advantage in using TCG to
    428428  implement target instructions taking more than about twenty TCG
    429429  instructions.
  • trunk/src/recompiler/tcg/i386/tcg-target.c

    r36125 r36140  
    199199#define P_EXT   0x100 /* 0x0f opcode prefix */
    200200
    201 #if !defined(VBOX) || !defined(_MSC_VER)
    202201static const uint8_t tcg_cond_to_jcc[10] = {
    203202    [TCG_COND_EQ] = JCC_JE,
     
    212211    [TCG_COND_GTU] = JCC_JA,
    213212};
    214 #else
    215 /* Fortunately, ordering is right */
    216 static const uint8_t tcg_cond_to_jcc[10] = {
    217     JCC_JE,
    218     JCC_JNE,
    219     JCC_JL,
    220     JCC_JGE,
    221     JCC_JLE,
    222     JCC_JG,
    223     JCC_JB,
    224     JCC_JAE,
    225     JCC_JBE,
    226     JCC_JA,
    227 };
    228 #endif
    229213
    230214static inline void tcg_out_opc(TCGContext *s, int opc)
     
    291275        tcg_out32(s, arg);
    292276    }
    293 }
    294 
    295 static inline void tcg_out_push(TCGContext *s, int reg)
    296 {
    297     tcg_out_opc(s, 0x50 + reg);
    298 }
    299 
    300 static inline void tcg_out_pop(TCGContext *s, int reg)
    301 {
    302     tcg_out_opc(s, 0x58 + reg);
    303277}
    304278
     
    10651039        tcg_abort();
    10661040    }
    1067 #else
     1041#else  /* VBOX && REM_PHYS_ADDR_IN_TLB */
    10681042    tcg_out_vbox_phys_write(s, opc, r0, data_reg, data_reg2);
    1069 #endif
     1043#endif /* VBOX && REM_PHYS_ADDR_IN_TLB */
    10701044
    10711045#if defined(CONFIG_SOFTMMU)
     
    13761350};
    13771351
     1352static inline void tcg_out_push(TCGContext *s, int reg)
     1353{
     1354    tcg_out_opc(s, 0x50 + reg);
     1355}
     1356
     1357static inline void tcg_out_pop(TCGContext *s, int reg)
     1358{
     1359    tcg_out_opc(s, 0x58 + reg);
     1360}
     1361
    13781362/* Generate global QEMU prologue and epilogue code */
    13791363void tcg_target_qemu_prologue(TCGContext *s)
  • trunk/src/recompiler/tcg/i386/tcg-target.h

    r36125 r36140  
    5353#define TCG_AREG3 TCG_REG_EDI
    5454#else
    55 #define TCG_AREG0 TCG_REG_ESI
    56 #define TCG_AREG1 TCG_REG_EDI
     55# define TCG_AREG0 TCG_REG_ESI
     56# define TCG_AREG1 TCG_REG_EDI
    5757#endif
    5858
  • trunk/src/recompiler/tcg/tcg-dyngen.c

    r29520 r36140  
    3131#include <inttypes.h>
    3232#else
    33 #include <stdio.h>
    34 #include "osdep.h"
     33# include <stdio.h>
     34# include "osdep.h"
    3535#endif
    3636
  • trunk/src/recompiler/tcg/tcg-runtime.c

    r29520 r36140  
    2727#include <stdio.h>
    2828#include <string.h>
    29 #ifndef VBOX
    3029#include <inttypes.h>
    31 #endif
    3230
    3331#include "config.h"
  • trunk/src/recompiler/tcg/tcg.c

    r36125 r36140  
    3636#include <string.h>
    3737#include <inttypes.h>
    38 #else
    39 #include <stdio.h>
    40 #include "osdep.h"
    41 #endif
     38#else  /* VBOX */
     39# include <stdio.h>
     40# include "osdep.h"
     41#endif /* VBOX */
    4242#ifdef _WIN32
    4343#include <malloc.h>
     
    4747#include "qemu-common.h"
    4848
    49 /* Note: the long term plan is to reduce the dependencies on the QEMU
     49/* Note: the long term plan is to reduce the dependancies on the QEMU
    5050   CPU definitions. Currently they are used for qemu_ld/st
    5151   instructions */
     
    6565 * @todo: fix it in compiler
    6666 */
    67 #if defined(TARGET_X86_64) && (TCG_TARGET_REG_BITS == 32)
    68 #undef USE_LIVENESS_ANALYSIS
    69 #endif
    70 #endif
     67# if defined(TARGET_X86_64) && (TCG_TARGET_REG_BITS == 32)
     68#  undef USE_LIVENESS_ANALYSIS
     69# endif
     70#endif /* VBOX */
    7171
    7272static void patch_reloc(uint8_t *code_ptr, int type,
     
    7777#ifndef VBOX
    7878#define DEF2(s, iargs, oargs, cargs, flags) { #s, iargs, oargs, cargs, iargs + oargs + cargs, flags, 0 },
    79 #else
    80 #define DEF2(s, iargs, oargs, cargs, flags) { #s, iargs, oargs, cargs, iargs + oargs + cargs, flags, 0, 0, 0 },
    81 #endif
     79#else  /* VBOX */
     80# define DEF2(s, iargs, oargs, cargs, flags) { #s, iargs, oargs, cargs, iargs + oargs + cargs, flags, 0, 0, 0 },
     81#endif /* VBOX */
    8282#include "tcg-opc.h"
    8383#undef DEF
     
    508508            n *= 2;
    509509        }
    510 
    511510#ifdef VBOX
    512511        s->helpers = qemu_realloc(s->helpers, n * sizeof(TCGHelperInfo));
     
    766765}
    767766
    768 #ifndef VBOX
    769767static const char * const cond_name[] =
    770768{
     
    780778    [TCG_COND_GTU] = "gtu"
    781779};
    782 #else
    783 static const char * const cond_name[] =
    784 {
    785     "eq",
    786     "ne",
    787     "lt",
    788     "ge",
    789     "le",
    790     "gt",
    791     "ltu",
    792     "geu",
    793     "leu",
    794     "gtu"
    795 };
    796 #endif
    797780
    798781void tcg_dump_ops(TCGContext *s, FILE *outfile)
     
    14331416}
    14341417
    1435 /* save globals to their canonical location and assume they can be
     1418/* save globals to their cannonical location and assume they can be
    14361419   modified be the following code. 'allocated_regs' is used in case a
    14371420   temporary registers needs to be allocated to store a constant. */
     
    19431926        tcg_dump_ops(s, logfile);
    19441927        fprintf(logfile, "\n");
    1945         fflush(logfile);
    19461928    }
    19471929#endif
  • trunk/src/recompiler/tcg/tcg.h

    r36125 r36140  
    2222 * THE SOFTWARE.
    2323 */
    24 
    2524#include "tcg-target.h"
    2625
     
    110109typedef tcg_target_ulong TCGArg;
    111110
    112 /* Define a type and accessor macros for variables.  Using a struct is
     111/* Define a type and accessor macros for varables.  Using a struct is
    113112   nice because it gives some level of type safely.  Ideally the compiler
    114113   be able to see through all this.  However in practice this is not true,
     
    194193    unsigned int mem_coherent:1;
    195194    unsigned int mem_allocated:1;
    196     unsigned int temp_local:1; /* If true, the temp is saved across
     195    unsigned int temp_local:1; /* If true, the temp is saved accross
    197196                                  basic blocks. Otherwise, it is not
    198                                   preserved across basic blocks. */
     197                                  preserved accross basic blocks. */
    199198    unsigned int temp_allocated:1; /* never used for code gen */
    200199    /* index of next free temp of same base type, -1 if end */
     
    370369    abort();\
    371370} while (0)
    372 #else
    373 #define VBOX_STR(x) #x
    374 #define VBOX_XSTR(x) VBOX_STR(x)
    375 #define tcg_abort() \
    376 do {\
    377     remAbort(-1, "TCG fatal error: "__FILE__":"VBOX_XSTR(__LINE__));     \
    378 } while (0)
     371#else  /* VBOX */
     372# define tcg_abort() \
     373    do {\
     374        remAbort(-1, "TCG fatal error: "__FILE__":" RT_XSTR(__LINE__));     \
     375    } while (0)
    379376extern void qemu_qsort(void* base, size_t nmemb, size_t size,
    380377                       int(*compar)(const void*, const void*));
    381378#define tcg_exit(status) \
    382 do {\
    383     remAbort(-1, "TCG exit: "__FILE__":"VBOX_XSTR(__LINE__));\
    384 } while (0)
    385 #endif
     379    do {\
     380        remAbort(-1, "TCG exit: "__FILE__":" RT_XSTR(__LINE__));\
     381    } while (0)
     382#endif /* VBOX */
    386383
    387384void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);
     
    440437#else
    441438
    442 #if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
    443 #define tcg_qemu_tb_exec(tb_ptr, ret)        \
     439# if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
     440#  define tcg_qemu_tb_exec(tb_ptr, ret)        \
    444441    __asm__ __volatile__("call *%%ecx" : "=a"(ret) : "a"(tb_ptr), "c" (&code_gen_prologue[0]) : "memory", "%edx", "cc")
    445 #else
     442# else  /* !VBOX || !GCC_WITH_BUGGY_REG_PARAM */
    446443#define tcg_qemu_tb_exec(tb_ptr) ((long REGPARM (*)(void *))code_gen_prologue)(tb_ptr)
    447 #endif
    448 
    449 #endif
     444# endif /* !VBOX || !GCC_WITH_BUGGY_REG_PARAM */
     445
     446#endif
  • trunk/src/recompiler/tcg/x86_64/tcg-target.c

    r36125 r36140  
    253253}
    254254
    255 static inline void tcg_out_push(TCGContext *s, int reg)
    256 {
    257     tcg_out_opc(s, (0x50 + (reg & 7)), 0, reg, 0);
    258 }
    259 
    260 static inline void tcg_out_pop(TCGContext *s, int reg)
    261 {
    262     tcg_out_opc(s, (0x58 + (reg & 7)), 0, reg, 0);
    263 }
    264 
    265 
    266255/* rm < 0 means no register index plus (-rm - 1 immediate bytes) */
    267256static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm,
     
    633622}
    634623
    635 #endif
     624#endif /* VBOX && REM_PHYS_ADDR_IN_TLB */
    636625
    637626static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
     
    812801        tcg_abort();
    813802    }
    814 #else /* VBOX */
     803#else  /* VBOX && REM_PHYS_ADDR_IN_TLB */
    815804    tcg_out_vbox_phys_read(s, opc, r0, data_reg);
    816 #endif /* VBOX */
     805#endif /* VBOX && REM_PHYS_ADDR_IN_TLB */
    817806
    818807#if defined(CONFIG_SOFTMMU)
     
    964953        tcg_abort();
    965954    }
    966 #else /* VBOX */
     955#else  /* VBOX && REM_PHYS_ADDR_IN_TLB */
    967956    tcg_out_vbox_phys_write(s, opc, r0, data_reg);
    968 #endif /* VBOX */
     957#endif /* VBOX && REM_PHYS_ADDR_IN_TLB */
    969958
    970959#if defined(CONFIG_SOFTMMU)
     
    1003992                                                   args[0]));
    1004993#else
    1005             /* @todo: can we clobber RAX here? */
     994            /** @todo: can we clobber RAX here? */
    1006995            tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_RAX,
    1007996                         (tcg_target_long)&(s->tb_next[args[0]]));
     
    13191308};
    13201309
     1310static inline void tcg_out_push(TCGContext *s, int reg)
     1311{
     1312    tcg_out_opc(s, (0x50 + (reg & 7)), 0, reg, 0);
     1313}
     1314
     1315static inline void tcg_out_pop(TCGContext *s, int reg)
     1316{
     1317    tcg_out_opc(s, (0x58 + (reg & 7)), 0, reg, 0);
     1318}
     1319
    13211320/* Generate global QEMU prologue and epilogue code */
    13221321void tcg_target_qemu_prologue(TCGContext *s)
  • trunk/src/recompiler/tcg/x86_64/tcg-target.h

    r29520 r36140  
    6868#define TCG_TARGET_HAS_ext32s_i64
    6969
    70 /* Must be in sync with dyngen register notion, see dyngen-exec.h */
     70/* Note: must be synced with dyngen-exec.h */
    7171#define TCG_AREG0 TCG_REG_R14
    7272#define TCG_AREG1 TCG_REG_R15
  • trunk/src/recompiler/tests/Makefile

    r2426 r36140  
    11-include ../config-host.mak
     2VPATH=$(SRC_PATH)/tests
    23
    3 CFLAGS=-Wall -O2 -g
     4CFLAGS=-Wall -O2 -g -fno-strict-aliasing
    45#CFLAGS+=-msse2
    56LDFLAGS=
    67
    78ifeq ($(ARCH),i386)
    8 TESTS=linux-test testthread sha1-i386 test-i386 runcom
     9TESTS=linux-test testthread sha1-i386 test-i386
    910endif
    1011ifeq ($(ARCH),x86_64)
     
    1314TESTS+=sha1# test_path
    1415#TESTS+=test_path
     16#TESTS+=runcom
    1517
    16 QEMU=../i386-user/qemu-i386
     18QEMU=../i386-linux-user/qemu-i386
    1719
    1820all: $(TESTS)
     
    3234test-i386: test-i386.c test-i386-code16.S test-i386-vm86.S \
    3335           test-i386.h test-i386-shift.h test-i386-muldiv.h
    34         $(CC) $(CFLAGS) $(LDFLAGS) -static -o $@ \
    35               test-i386.c test-i386-code16.S test-i386-vm86.S -lm
     36        $(CC) -m32 $(CFLAGS) $(LDFLAGS) -static -o $@ \
     37              $(<D)/test-i386.c $(<D)/test-i386-code16.S $(<D)/test-i386-vm86.S -lm
    3638
    3739test-x86_64: test-i386.c \
    3840           test-i386.h test-i386-shift.h test-i386-muldiv.h
    39         $(CC) $(CFLAGS) $(LDFLAGS) -static -o $@ test-i386.c -lm
     41        $(CC) -m64 $(CFLAGS) $(LDFLAGS) -static -o $@ $(<D)/test-i386.c -lm
    4042
    4143ifeq ($(ARCH),i386)
     
    4749        $(QEMU) test-i386 > test-i386.out
    4850        @if diff -u test-i386.ref test-i386.out ; then echo "Auto Test OK"; fi
    49 ifeq ($(ARCH),i386)
    50         $(QEMU) -no-code-copy test-i386 > test-i386.out
    51         @if diff -u test-i386.ref test-i386.out ; then echo "Auto Test OK (no code copy)"; fi
    52 endif
     51
     52.PHONY: test-mmap
     53test-mmap: test-mmap.c
     54        $(CC) $(CFLAGS) -Wall -static -O2 $(LDFLAGS) -o $@ $<
     55        -./test-mmap
     56        -$(QEMU) ./test-mmap
     57        -$(QEMU) -p 8192 ./test-mmap 8192
     58        -$(QEMU) -p 16384 ./test-mmap 16384
     59        -$(QEMU) -p 32768 ./test-mmap 32768
    5360
    5461# generic Linux and CPU test
     
    8390        arm-linux-gcc -Wall -g -O2 -c -o $@ $<
    8491
     92test-arm-iwmmxt: test-arm-iwmmxt.s
     93        cpp < $< | arm-linux-gnu-gcc -Wall -static -march=iwmmxt -mabi=aapcs -x assembler - -o $@
     94
    8595# MIPS test
    8696hello-mips: hello-mips.c
     
    90100        mipsel-linux-gnu-gcc -nostdlib -static -mno-abicalls -fno-PIC -mabi=32 -Wall -Wextra -g -O2 -o $@ $<
    91101
    92 # XXX: find a way to compile easily a test for each arch
    93 test2:
    94         @for arch in i386 arm armeb sparc ppc mips mipsel; do \
    95            ../$${arch}-user/qemu-$${arch} $${arch}/ls -l linux-test.c ; \
    96         done
     102# testsuite for the CRIS port.
     103test-cris:
     104        $(MAKE) -C cris check
    97105
    98106clean:
  • trunk/src/recompiler/tests/qruncom.c

    r33540 r36140  
    6060}
    6161
    62 static void set_gate(void *ptr, unsigned int type, unsigned int dpl, 
     62static void set_gate(void *ptr, unsigned int type, unsigned int dpl,
    6363                     unsigned long addr, unsigned int sel)
    6464{
     
    142142}
    143143
    144 static void host_segv_handler(int host_signum, siginfo_t *info, 
     144static void host_segv_handler(int host_signum, siginfo_t *info,
    145145                              void *puc)
    146146{
     
    161161        usage();
    162162    filename = argv[1];
    163    
    164     vm86_mem = mmap((void *)0x00000000, 0x110000, 
    165                     PROT_WRITE | PROT_READ | PROT_EXEC, 
     163
     164    vm86_mem = mmap((void *)0x00000000, 0x110000,
     165                    PROT_WRITE | PROT_READ | PROT_EXEC,
    166166                    MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
    167167    if (vm86_mem == MAP_FAILED) {
     
    186186    {
    187187        struct sigaction act;
    188        
     188
    189189        sigfillset(&act.sa_mask);
    190190        act.sa_flags = SA_SIGINFO;
     
    194194        sigaction(SIGSEGV, &act, NULL);
    195195        sigaction(SIGBUS, &act, NULL);
    196 #if defined (TARGET_I386) && defined(USE_CODE_COPY)
    197         sigaction(SIGFPE, &act, NULL);
    198 #endif
    199196    }
    200197
    201198    //    cpu_set_log(CPU_LOG_TB_IN_ASM | CPU_LOG_TB_OUT_ASM | CPU_LOG_EXEC);
    202199
    203     env = cpu_init();
    204 
    205     /* disable code copy to simplify debugging */
    206     code_copy_enabled = 0;
     200    env = cpu_init("qemu32");
    207201
    208202    /* set user mode state (XXX: should be done automatically by
     
    219213       mode. We also activate the VM86 flag to run DOS code */
    220214    env->eflags |= IF_MASK | VM_MASK;
    221    
     215
    222216    /* init basic registers */
    223217    env->eip = 0x100;
     
    225219    seg = (COM_BASE_ADDR - 0x100) >> 4;
    226220
    227     cpu_x86_load_seg_cache(env, R_CS, seg, 
    228                            (seg << 4), 0xffff, 0);
    229     cpu_x86_load_seg_cache(env, R_SS, seg, 
    230                            (seg << 4), 0xffff, 0);
    231     cpu_x86_load_seg_cache(env, R_DS, seg, 
    232                            (seg << 4), 0xffff, 0);
    233     cpu_x86_load_seg_cache(env, R_ES, seg, 
    234                            (seg << 4), 0xffff, 0);
    235     cpu_x86_load_seg_cache(env, R_FS, seg, 
    236                            (seg << 4), 0xffff, 0);
    237     cpu_x86_load_seg_cache(env, R_GS, seg, 
     221    cpu_x86_load_seg_cache(env, R_CS, seg,
     222                           (seg << 4), 0xffff, 0);
     223    cpu_x86_load_seg_cache(env, R_SS, seg,
     224                           (seg << 4), 0xffff, 0);
     225    cpu_x86_load_seg_cache(env, R_DS, seg,
     226                           (seg << 4), 0xffff, 0);
     227    cpu_x86_load_seg_cache(env, R_ES, seg,
     228                           (seg << 4), 0xffff, 0);
     229    cpu_x86_load_seg_cache(env, R_FS, seg,
     230                           (seg << 4), 0xffff, 0);
     231    cpu_x86_load_seg_cache(env, R_GS, seg,
    238232                           (seg << 4), 0xffff, 0);
    239233
     
    261255    set_idt(18, 0);
    262256    set_idt(19, 0);
    263        
     257
    264258    /* put return code */
    265259    *seg_to_linear(env->segs[R_CS].selector, 0) = 0xb4; /* mov ah, $0 */
     
    275269    env->regs[R_EDI] = 0xfffe;
    276270
    277     /* inform the emulator of the mapped memory */
    278     page_set_flags(0x00000000, 0x110000, 
     271    /* inform the emulator of the mmaped memory */
     272    page_set_flags(0x00000000, 0x110000,
    279273                   PAGE_WRITE | PAGE_READ | PAGE_EXEC | PAGE_VALID);
    280274
  • trunk/src/recompiler/tests/test-i386-code16.S

    r2426 r36140  
    88
    99        .globl code16_func1
    10        
     10
    1111        /* basic test */
    1212code16_func1 = . - code16_start
     
    2525        data32 lret
    2626
    27 /* test various jmp opcodes */       
     27/* test various jmp opcodes */
    2828        .globl code16_func3
    2929code16_func3 = . - code16_start
     
    3737        add $2, %ax
    38382:
    39        
     39
    4040        call myfunc
    41        
     41
    4242        lcall $CS_SEG, $(myfunc2 - code16_start)
    4343
     
    4545myjmp1_next:
    4646
    47         cs lcall myfunc2_addr - code16_start
     47        cs lcall *myfunc2_addr - code16_start
    4848
    49         cs ljmp myjmp2_addr - code16_start
     49        cs ljmp *myjmp2_addr - code16_start
    5050myjmp2_next:
    5151
    5252        data32 lret
    53        
     53
    5454myfunc2_addr:
    5555        .short myfunc2 - code16_start
  • trunk/src/recompiler/tests/test-i386-shift.h

    r2426 r36140  
    146146    exec_opl(s2, s0, s1, 0);
    147147#ifdef OP_SHIFTD
    148     if (s1 <= 15)
    149         exec_opw(s2, s0, s1, 0);
     148    exec_opw(s2, s0, s1, 0);
    150149#else
    151150    exec_opw(s2, s0, s1, 0);
  • trunk/src/recompiler/tests/test-i386-vm86.S

    r1 r36140  
    1515        es movw $GET_OFFSET(int90_test), 0x90 * 4
    1616        es movw %cs, 0x90 * 4 + 2
    17        
     17
    1818        /* launch int 0x90 */
    1919
     
    2525        int $0x21
    2626
    27         pushf 
     27        pushf
    2828        popw %dx
    2929        movb $0xff, %ah
     
    3131
    3232        cli
    33         pushf 
     33        pushf
    3434        popw %dx
    3535        movb $0xff, %ah
    3636        int $0x21
    3737
    38         sti       
    39         pushfl 
     38        sti
     39        pushfl
    4040        popl %edx
    4141        movb $0xff, %ah
    4242        int $0x21
    43        
     43
    4444#if 0
    4545        movw $GET_OFFSET(IF_msg1), %dx
     
    5555#endif
    5656
    57         pushf 
     57        pushf
    5858        popw %dx
    5959        movb $0xff, %ah
    6060        int $0x21
    61        
     61
    6262        pushfl
    6363        movw %sp, %bx
     
    7474
    7575int90_test:
    76         pushf 
     76        pushf
    7777        pop %dx
    7878        movb $0xff, %ah
     
    8383        movb $0xff, %ah
    8484        int $0x21
    85        
     85
    8686        movw $GET_OFFSET(int90_msg), %dx
    8787        movb $0x09, %ah
    8888        int $0x21
    8989        iret
    90                    
     90
    9191int90_msg:
    9292        .string "INT90 started\n$"
    93  
     93
    9494hello_world:
    9595        .string "Hello VM86 world\n$"
     
    102102
    103103vm86_code_end:
    104        
  • trunk/src/recompiler/tests/test-i386.c

    r33656 r36140  
    11/*
    22 *  x86 CPU test
    3  * 
     3 *
    44 *  Copyright (c) 2003 Fabrice Bellard
    55 *
     
    4141
    4242#if !defined(__x86_64__)
    43 #define TEST_VM86
     43//#define TEST_VM86
    4444#define TEST_SEGS
    4545#endif
    4646//#define LINUX_VM86_IOPL_FIX
    4747//#define TEST_P4_FLAGS
    48 #if defined(__x86_64__)
     48#ifdef __SSE__
    4949#define TEST_SSE
    5050#define TEST_CMOV  1
    5151#define TEST_FCOMI 1
    5252#else
    53 //#define TEST_SSE
    54 #define TEST_CMOV  0
    55 #define TEST_FCOMI 0
     53#undef TEST_SSE
     54#define TEST_CMOV  1
     55#define TEST_FCOMI 1
    5656#endif
    5757
     
    468468}
    469469
     470#define TEST_LOOP(insn) \
     471{\
     472    for(i = 0; i < sizeof(ecx_vals) / sizeof(long); i++) {\
     473        ecx = ecx_vals[i];\
     474        for(zf = 0; zf < 2; zf++) {\
     475    asm("test %2, %2\n\t"\
     476        "movl $1, %0\n\t"\
     477          insn " 1f\n\t" \
     478        "movl $0, %0\n\t"\
     479        "1:\n\t"\
     480        : "=a" (res)\
     481        : "c" (ecx), "b" (!zf)); \
     482    printf("%-10s ECX=" FMTLX " ZF=%ld r=%d\n", insn, ecx, zf, res);      \
     483        }\
     484   }\
     485}
     486
     487void test_loop(void)
     488{
     489    long ecx, zf;
     490    const long ecx_vals[] = {
     491        0,
     492        1,
     493        0x10000,
     494        0x10001,
     495#if defined(__x86_64__)
     496        0x100000000L,
     497        0x100000001L,
     498#endif
     499    };
     500    int i, res;
     501
     502#if !defined(__x86_64__)
     503    TEST_LOOP("jcxz");
     504    TEST_LOOP("loopw");
     505    TEST_LOOP("loopzw");
     506    TEST_LOOP("loopnzw");
     507#endif
     508
     509    TEST_LOOP("jecxz");
     510    TEST_LOOP("loopl");
     511    TEST_LOOP("loopzl");
     512    TEST_LOOP("loopnzl");
     513}
     514
    470515#undef CC_MASK
    471516#ifdef TEST_P4_FLAGS
     
    481526#include "test-i386-muldiv.h"
    482527
    483 void test_imulw2(long op0, long op1) 
     528void test_imulw2(long op0, long op1)
    484529{
    485530    long res, s1, s0, flags;
     
    490535    asm volatile ("push %4\n\t"
    491536         "popf\n\t"
    492          "imulw %w2, %w0\n\t" 
     537         "imulw %w2, %w0\n\t"
    493538         "pushf\n\t"
    494539         "pop %1\n\t"
     
    499544}
    500545
    501 void test_imull2(long op0, long op1) 
     546void test_imull2(long op0, long op1)
    502547{
    503548    long res, s1, s0, flags;
     
    508553    asm volatile ("push %4\n\t"
    509554         "popf\n\t"
    510          "imull %k2, %k0\n\t" 
     555         "imull %k2, %k0\n\t"
    511556         "pushf\n\t"
    512557         "pop %1\n\t"
     
    518563
    519564#if defined(__x86_64__)
    520 void test_imulq2(long op0, long op1) 
     565void test_imulq2(long op0, long op1)
    521566{
    522567    long res, s1, s0, flags;
     
    527572    asm volatile ("push %4\n\t"
    528573         "popf\n\t"
    529          "imulq %2, %0\n\t" 
     574         "imulq %2, %0\n\t"
    530575         "pushf\n\t"
    531576         "pop %1\n\t"
     
    684729        "mov $0x12345678, %0\n"\
    685730        #op " %" size "2, %" size "0 ; setz %b1" \
    686         : "=r" (res), "=q" (resz)\
    687         : "g" (val));\
     731        : "=&r" (res), "=&q" (resz)\
     732        : "r" (val));\
    688733    printf("%-10s A=" FMTLX " R=" FMTLX " %ld\n", #op, val, res, resz);\
    689734}
     
    714759};
    715760
    716 union float64u q_nan = { .l = 0xFFF8000000000000 };
    717 union float64u s_nan = { .l = 0xFFF0000000000000 };
     761union float64u q_nan = { .l = 0xFFF8000000000000LL };
     762union float64u s_nan = { .l = 0xFFF0000000000000LL };
    718763
    719764void test_fops(double a, double b)
     
    750795        long double fpregs[8];
    751796    } float_env32;
    752    
     797
    753798    asm volatile ("fnstenv %0\n" : : "m" (float_env32));
    754799    float_env32.fpus &= ~0x7f;
     
    769814        : "=a" (fpus)
    770815        : "t" (a), "u" (b));
    771     printf("fcom(%f %f)=%04lx \n", 
     816    printf("fcom(%f %f)=%04lx \n",
    772817           a, b, fpus & (0x4500 | FPUS_EMASK));
    773818    fpu_clear_exceptions();
     
    776821        : "=a" (fpus)
    777822        : "t" (a), "u" (b));
    778     printf("fucom(%f %f)=%04lx\n", 
     823    printf("fucom(%f %f)=%04lx\n",
    779824           a, b, fpus & (0x4500 | FPUS_EMASK));
    780825    if (TEST_FCOMI) {
     
    787832            : "=r" (eflags), "=a" (fpus)
    788833            : "t" (a), "u" (b));
    789         printf("fcomi(%f %f)=%04lx %02lx\n", 
     834        printf("fcomi(%f %f)=%04lx %02lx\n",
    790835               a, b, fpus & FPUS_EMASK, eflags & (CC_Z | CC_P | CC_C));
    791836        fpu_clear_exceptions();
     
    796841            : "=r" (eflags), "=a" (fpus)
    797842            : "t" (a), "u" (b));
    798         printf("fucomi(%f %f)=%04lx %02lx\n", 
     843        printf("fucomi(%f %f)=%04lx %02lx\n",
    799844               a, b, fpus & FPUS_EMASK, eflags & (CC_Z | CC_P | CC_C));
    800845    }
     
    824869    printf("(long double)%f = %Lf\n", a, la);
    825870    printf("a=" FMT64X "\n", *(uint64_t *)&a);
    826     printf("la=" FMT64X " %04x\n", *(uint64_t *)&la, 
     871    printf("la=" FMT64X " %04x\n", *(uint64_t *)&la,
    827872           *(unsigned short *)((char *)(&la) + 8));
    828873
     
    830875    asm volatile ("fstcw %0" : "=m" (fpuc));
    831876    for(i=0;i<4;i++) {
    832         asm volatile ("fldcw %0" : : "m" ((fpuc & ~0x0c00) | (i << 10)));
     877        uint16_t val16;
     878        val16 = (fpuc & ~0x0c00) | (i << 10);
     879        asm volatile ("fldcw %0" : : "m" (val16));
    833880        asm volatile ("fist %0" : "=m" (wa) : "t" (a));
    834881        asm volatile ("fistl %0" : "=m" (ia) : "t" (a));
     
    866913    asm("fbstp %0" : "=m" (bcd[0]) : "t" (a) : "st");
    867914    asm("fbld %1" : "=t" (b) : "m" (bcd[0]));
    868     printf("a=%f bcd=%04x%04x%04x%04x%04x b=%f\n", 
     915    printf("a=%f bcd=%04x%04x%04x%04x%04x b=%f\n",
    869916           a, bcd[4], bcd[3], bcd[2], bcd[1], bcd[0], b);
    870917}
     
    9871034    test_fcvt(q_nan.d);
    9881035    test_fconst();
    989     test_fbcd(1234567890123456);
    990     test_fbcd(-123451234567890);
     1036    test_fbcd(1234567890123456.0);
     1037    test_fbcd(-123451234567890.0);
    9911038    test_fenv();
    9921039    if (TEST_CMOV) {
     
    10521099    TEST_BCD(aaa, 0x1234040a, 0, (CC_C | CC_A));
    10531100    TEST_BCD(aaa, 0x123405fa, 0, (CC_C | CC_A));
    1054    
     1101
    10551102    TEST_BCD(aas, 0x12340205, CC_A, (CC_C | CC_A));
    10561103    TEST_BCD(aas, 0x12340306, CC_A, (CC_C | CC_A));
     
    10741121    asm(#op " %" size "0, %" size "1" \
    10751122        : "=q" (op0), opconst (op1) \
    1076         : "0" (op0), "1" (op1));\
     1123        : "0" (op0));\
    10771124    printf("%-10s A=" FMTLX " B=" FMTLX "\n",\
    10781125           #op, op0, op1);\
     
    10871134    asm(#op " %" size "0, %" size "1" \
    10881135        : "=q" (op0), opconst (op1) \
    1089         : "0" (op0), "1" (op1), "a" (op2));\
     1136        : "0" (op0), "a" (op2));\
    10901137    printf("%-10s EAX=" FMTLX " A=" FMTLX " C=" FMTLX "\n",\
    10911138           #op, op2, op0, op1);\
     
    10951142{
    10961143#if defined(__x86_64__)
    1097     TEST_XCHG(xchgq, "", "=q");
    1098 #endif
    1099     TEST_XCHG(xchgl, "k", "=q");
    1100     TEST_XCHG(xchgw, "w", "=q");
    1101     TEST_XCHG(xchgb, "b", "=q");
     1144    TEST_XCHG(xchgq, "", "+q");
     1145#endif
     1146    TEST_XCHG(xchgl, "k", "+q");
     1147    TEST_XCHG(xchgw, "w", "+q");
     1148    TEST_XCHG(xchgb, "b", "+q");
    11021149
    11031150#if defined(__x86_64__)
    11041151    TEST_XCHG(xchgq, "", "=m");
    11051152#endif
    1106     TEST_XCHG(xchgl, "k", "=m");
    1107     TEST_XCHG(xchgw, "w", "=m");
    1108     TEST_XCHG(xchgb, "b", "=m");
     1153    TEST_XCHG(xchgl, "k", "+m");
     1154    TEST_XCHG(xchgw, "w", "+m");
     1155    TEST_XCHG(xchgb, "b", "+m");
    11091156
    11101157#if defined(__x86_64__)
    1111     TEST_XCHG(xaddq, "", "=q");
    1112 #endif
    1113     TEST_XCHG(xaddl, "k", "=q");
    1114     TEST_XCHG(xaddw, "w", "=q");
    1115     TEST_XCHG(xaddb, "b", "=q");
     1158    TEST_XCHG(xaddq, "", "+q");
     1159#endif
     1160    TEST_XCHG(xaddl, "k", "+q");
     1161    TEST_XCHG(xaddw, "w", "+q");
     1162    TEST_XCHG(xaddb, "b", "+q");
    11161163
    11171164    {
     
    11231170
    11241171#if defined(__x86_64__)
    1125     TEST_XCHG(xaddq, "", "=m");
    1126 #endif
    1127     TEST_XCHG(xaddl, "k", "=m");
    1128     TEST_XCHG(xaddw, "w", "=m");
    1129     TEST_XCHG(xaddb, "b", "=m");
     1172    TEST_XCHG(xaddq, "", "+m");
     1173#endif
     1174    TEST_XCHG(xaddl, "k", "+m");
     1175    TEST_XCHG(xaddw, "w", "+m");
     1176    TEST_XCHG(xaddb, "b", "+m");
    11301177
    11311178#if defined(__x86_64__)
    1132     TEST_CMPXCHG(cmpxchgq, "", "=q", 0xfbca7654);
    1133 #endif
    1134     TEST_CMPXCHG(cmpxchgl, "k", "=q", 0xfbca7654);
    1135     TEST_CMPXCHG(cmpxchgw, "w", "=q", 0xfbca7654);
    1136     TEST_CMPXCHG(cmpxchgb, "b", "=q", 0xfbca7654);
     1179    TEST_CMPXCHG(cmpxchgq, "", "+q", 0xfbca7654);
     1180#endif
     1181    TEST_CMPXCHG(cmpxchgl, "k", "+q", 0xfbca7654);
     1182    TEST_CMPXCHG(cmpxchgw, "w", "+q", 0xfbca7654);
     1183    TEST_CMPXCHG(cmpxchgb, "b", "+q", 0xfbca7654);
    11371184
    11381185#if defined(__x86_64__)
    1139     TEST_CMPXCHG(cmpxchgq, "", "=q", 0xfffefdfc);
    1140 #endif
    1141     TEST_CMPXCHG(cmpxchgl, "k", "=q", 0xfffefdfc);
    1142     TEST_CMPXCHG(cmpxchgw, "w", "=q", 0xfffefdfc);
    1143     TEST_CMPXCHG(cmpxchgb, "b", "=q", 0xfffefdfc);
     1186    TEST_CMPXCHG(cmpxchgq, "", "+q", 0xfffefdfc);
     1187#endif
     1188    TEST_CMPXCHG(cmpxchgl, "k", "+q", 0xfffefdfc);
     1189    TEST_CMPXCHG(cmpxchgw, "w", "+q", 0xfffefdfc);
     1190    TEST_CMPXCHG(cmpxchgb, "b", "+q", 0xfffefdfc);
    11441191
    11451192#if defined(__x86_64__)
    1146     TEST_CMPXCHG(cmpxchgq, "", "=m", 0xfbca7654);
    1147 #endif
    1148     TEST_CMPXCHG(cmpxchgl, "k", "=m", 0xfbca7654);
    1149     TEST_CMPXCHG(cmpxchgw, "w", "=m", 0xfbca7654);
    1150     TEST_CMPXCHG(cmpxchgb, "b", "=m", 0xfbca7654);
     1193    TEST_CMPXCHG(cmpxchgq, "", "+m", 0xfbca7654);
     1194#endif
     1195    TEST_CMPXCHG(cmpxchgl, "k", "+m", 0xfbca7654);
     1196    TEST_CMPXCHG(cmpxchgw, "w", "+m", 0xfbca7654);
     1197    TEST_CMPXCHG(cmpxchgb, "b", "+m", 0xfbca7654);
    11511198
    11521199#if defined(__x86_64__)
    1153     TEST_CMPXCHG(cmpxchgq, "", "=m", 0xfffefdfc);
    1154 #endif
    1155     TEST_CMPXCHG(cmpxchgl, "k", "=m", 0xfffefdfc);
    1156     TEST_CMPXCHG(cmpxchgw, "w", "=m", 0xfffefdfc);
    1157     TEST_CMPXCHG(cmpxchgb, "b", "=m", 0xfffefdfc);
     1200    TEST_CMPXCHG(cmpxchgq, "", "+m", 0xfffefdfc);
     1201#endif
     1202    TEST_CMPXCHG(cmpxchgl, "k", "+m", 0xfffefdfc);
     1203    TEST_CMPXCHG(cmpxchgw, "w", "+m", 0xfffefdfc);
     1204    TEST_CMPXCHG(cmpxchgb, "b", "+m", 0xfffefdfc);
    11581205
    11591206    {
    11601207        uint64_t op0, op1, op2;
     1208        long eax, edx;
    11611209        long i, eflags;
    11621210
    11631211        for(i = 0; i < 2; i++) {
    1164             op0 = 0x123456789abcd;
     1212            op0 = 0x123456789abcdLL;
     1213            eax = i2l(op0 & 0xffffffff);
     1214            edx = i2l(op0 >> 32);
    11651215            if (i == 0)
    1166                 op1 = 0xfbca765423456;
     1216                op1 = 0xfbca765423456LL;
    11671217            else
    11681218                op1 = op0;
    1169             op2 = 0x6532432432434;
    1170             asm("cmpxchg8b %1\n"
     1219            op2 = 0x6532432432434LL;
     1220            asm("cmpxchg8b %2\n"
    11711221                "pushf\n"
    1172                 "pop %2\n"
    1173                 : "=A" (op0), "=m" (op1), "=g" (eflags)
    1174                 : "0" (op0), "m" (op1), "b" ((int)op2), "c" ((int)(op2 >> 32)));
    1175             printf("cmpxchg8b: op0=" FMT64X " op1=" FMT64X " CC=%02lx\n",
    1176                     op0, op1, eflags & CC_Z);
     1222                "pop %3\n"
     1223                : "=a" (eax), "=d" (edx), "=m" (op1), "=g" (eflags)
     1224                : "0" (eax), "1" (edx), "m" (op1), "b" ((int)op2), "c" ((int)(op2 >> 32)));
     1225            printf("cmpxchg8b: eax=" FMTLX " edx=" FMTLX " op1=" FMT64X " CC=%02lx\n",
     1226                   eax, edx, op1, eflags & CC_Z);
    11771227        }
    11781228    }
     
    11831233/* segmentation tests */
    11841234
     1235#include <sys/syscall.h>
     1236#include <unistd.h>
    11851237#include <asm/ldt.h>
    1186 #include <linux/unistd.h>
    11871238#include <linux/version.h>
    11881239
    1189 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
     1240static inline int modify_ldt(int func, void * ptr, unsigned long bytecount)
     1241{
     1242    return syscall(__NR_modify_ldt, func, ptr, bytecount);
     1243}
    11901244
    11911245#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
     
    12011255{\
    12021256    int res, res2;\
     1257    uint16_t mseg = seg;\
    12031258    res = 0x12345678;\
    12041259    asm (op " %" size "2, %" size "0\n" \
     
    12071262         "movl $1, %1\n"\
    12081263         "1:\n"\
    1209          : "=r" (res), "=r" (res2) : "m" (seg), "0" (res));\
     1264         : "=r" (res), "=r" (res2) : "m" (mseg), "0" (res));\
    12101265    printf(op ": Z=%d %08x\n", res2, res & ~(mask));\
     1266}
     1267
     1268#define TEST_ARPL(op, size, op1, op2)\
     1269{\
     1270    long a, b, c;                               \
     1271    a = (op1);                                  \
     1272    b = (op2);                                  \
     1273    asm volatile(op " %" size "3, %" size "0\n"\
     1274                 "movl $0,%1\n"\
     1275                 "jnz 1f\n"\
     1276                 "movl $1,%1\n"\
     1277                 "1:\n"\
     1278                 : "=r" (a), "=r" (c) : "0" (a), "r" (b));    \
     1279    printf(op size " A=" FMTLX " B=" FMTLX " R=" FMTLX " z=%ld\n",\
     1280           (long)(op1), (long)(op2), a, c);\
    12111281}
    12121282
     
    12871357    segoff.seg = MK_SEL(2);
    12881358    segoff.offset = 0xabcdef12;
    1289     asm volatile("lfs %2, %0\n\t" 
     1359    asm volatile("lfs %2, %0\n\t"
    12901360                 "movl %%fs, %1\n\t"
    1291                  : "=r" (res), "=g" (res2) 
     1361                 : "=r" (res), "=g" (res2)
    12921362                 : "m" (segoff));
    12931363    printf("FS:reg = %04x:%08x\n", res2, res);
     
    13021372    TEST_LR("lslw", "w", 0xfff8, 0);
    13031373    TEST_LR("lsll", "", 0xfff8, 0);
     1374
     1375    TEST_ARPL("arpl", "w", 0x12345678 | 3, 0x762123c | 1);
     1376    TEST_ARPL("arpl", "w", 0x12345678 | 1, 0x762123c | 3);
     1377    TEST_ARPL("arpl", "w", 0x12345678 | 1, 0x762123c | 1);
    13041378}
    13051379
     
    13281402
    13291403    /* call the first function */
    1330     asm volatile ("lcall %1, %2" 
     1404    asm volatile ("lcall %1, %2"
    13311405                  : "=a" (res)
    13321406                  : "i" (MK_SEL(1)), "i" (&code16_func1): "memory", "cc");
    13331407    printf("func1() = 0x%08x\n", res);
    1334     asm volatile ("lcall %2, %3" 
     1408    asm volatile ("lcall %2, %3"
    13351409                  : "=a" (res), "=c" (res2)
    13361410                  : "i" (MK_SEL(1)), "i" (&code16_func2): "memory", "cc");
    13371411    printf("func2() = 0x%08x spdec=%d\n", res, res2);
    1338     asm volatile ("lcall %1, %2" 
     1412    asm volatile ("lcall %1, %2"
    13391413                  : "=a" (res)
    13401414                  : "i" (MK_SEL(1)), "i" (&code16_func3): "memory", "cc");
     
    13741448
    13751449#if defined(__x86_64__)
     1450#if 0
    13761451    {
     1452        /* XXX: see if Intel Core2 and AMD64 behavior really
     1453           differ. Here we implemented the Intel way which is not
     1454           compatible yet with QEMU. */
    13771455        static struct __attribute__((packed)) {
    1378             uint32_t offset;
     1456            uint64_t offset;
    13791457            uint16_t seg;
    13801458        } desc;
     
    13841462
    13851463        asm volatile ("push %1\n"
    1386                       "call func_lret\n" 
     1464                      "call func_lret\n"
    13871465                      : "=a" (res)
    13881466                      : "r" (cs_sel) : "memory", "cc");
    13891467        printf("func_lret=" FMTLX "\n", res);
    13901468
    1391         /* NOTE: we assume that &func_lret < 4GB */
    13921469        desc.offset = (long)&func_lret;
    13931470        desc.seg = cs_sel;
    1394        
     1471
    13951472        asm volatile ("xor %%rax, %%rax\n"
    1396                       "rex64 lcall %1\n"
     1473                      "rex64 lcall *(%%rcx)\n"
    13971474                      : "=a" (res)
    1398                       : "m" (desc)
     1475                      : "c" (&desc)
    13991476                      : "memory", "cc");
    14001477        printf("func_lret2=" FMTLX "\n", res);
     
    14031480                      "mov $ 1f, %%rax\n"
    14041481                      "push %%rax\n"
    1405                       "ljmp %1\n"
     1482                      "rex64 ljmp *(%%rcx)\n"
    14061483                      "1:\n"
    14071484                      : "=a" (res)
    1408                       : "m" (desc), "b" (cs_sel)
     1485                      : "c" (&desc), "b" (cs_sel)
    14091486                      : "memory", "cc");
    14101487        printf("func_lret3=" FMTLX "\n", res);
    14111488    }
     1489#endif
    14121490#else
    1413     asm volatile ("push %%cs ; call %1" 
     1491    asm volatile ("push %%cs ; call %1"
    14141492                  : "=a" (res)
    14151493                  : "m" (func_lret): "memory", "cc");
    14161494    printf("func_lret=" FMTLX "\n", res);
    14171495
    1418     asm volatile ("pushf ; push %%cs ; call %1" 
     1496    asm volatile ("pushf ; push %%cs ; call %1"
    14191497                  : "=a" (res)
    14201498                  : "m" (func_iret): "memory", "cc");
     
    14831561   TEST_STRING(stos, "rep ");
    14841562   TEST_STRING(lods, ""); /* to verify stos */
    1485    TEST_STRING(lods, "rep "); 
     1563   TEST_STRING(lods, "rep ");
    14861564   TEST_STRING(movs, "");
    14871565   TEST_STRING(movs, "rep ");
     
    15161594}
    15171595
    1518 #undef __syscall_return
    1519 #define __syscall_return(type, res) \
    1520 do { \
    1521         return (type) (res); \
    1522 } while (0)
    1523 
    1524 _syscall2(int, vm86, int, func, struct vm86plus_struct *, v86)
     1596static inline int vm86(int func, struct vm86plus_struct *v86)
     1597{
     1598    return syscall(__NR_vm86, func, v86);
     1599}
    15251600
    15261601extern char vm86_code_start;
     
    15371612    int seg, ret;
    15381613
    1539     vm86_mem = mmap((void *)0x00000000, 0x110000, 
    1540                     PROT_WRITE | PROT_READ | PROT_EXEC, 
     1614    vm86_mem = mmap((void *)0x00000000, 0x110000,
     1615                    PROT_WRITE | PROT_READ | PROT_EXEC,
    15411616                    MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
    15421617    if (vm86_mem == MAP_FAILED) {
     
    15611636    /* move code to proper address. We use the same layout as a .com
    15621637       dos program. */
    1563     memcpy(vm86_mem + (VM86_CODE_CS << 4) + VM86_CODE_IP, 
     1638    memcpy(vm86_mem + (VM86_CODE_CS << 4) + VM86_CODE_IP,
    15641639           &vm86_code_start, &vm86_code_end - &vm86_code_start);
    15651640
     
    15731648            {
    15741649                int int_num, ah, v;
    1575                
     1650
    15761651                int_num = VM86_ARG(ret);
    15771652                if (int_num != 0x21)
     
    16761751    struct sigaction act;
    16771752    volatile int val;
    1678    
     1753
    16791754    act.sa_sigaction = sig_handler;
    16801755    sigemptyset(&act.sa_mask);
     
    17291804        ldt.useable = 1;
    17301805        modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
    1731        
     1806
    17321807        if (setjmp(jmp_env) == 0) {
    17331808            /* segment not present */
     
    17541829        v1 = *(char *)0x1234;
    17551830    }
    1756    
     1831
    17571832    /* test illegal instruction reporting */
    17581833    printf("UD2 exception:\n");
     
    17661841        asm volatile("lock nop");
    17671842    }
    1768    
     1843
    17691844    printf("INT exception:\n");
    17701845    if (setjmp(jmp_env) == 0) {
     
    18381913                      "orl $0x00100, (%%esp)\n"
    18391914                      "popf\n"
    1840                       "movl $0xabcd, %0\n" 
     1915                      "movl $0xabcd, %0\n"
    18411916                      "movl $0x0, %0\n" : "=m" (val) : : "cc", "memory");
    18421917    }
     
    18691944                  "orl $0x00100, (%%esp)\n"
    18701945                  "popf\n"
    1871                   "movl $0xabcd, %0\n" 
     1946                  "movl $0xabcd, %0\n"
    18721947
    18731948                  /* jmp test */
     
    18951970                  "movl $4, %%ecx\n"
    18961971                  "rep cmpsb\n"
    1897                  
     1972
    18981973                  /* getpid() syscall: single step should skip one
    18991974                     instruction */
     
    19011976                  "int $0x80\n"
    19021977                  "movl $0, %%eax\n"
    1903                  
     1978
    19041979                  /* when modifying SS, trace is not done on the next
    19051980                     instruction */
     
    19171992                  "addl $1, %0\n"
    19181993                  "movl $1, %%eax\n"
    1919                  
     1994
    19201995                  "pushf\n"
    19211996                  "andl $~0x00100, (%%esp)\n"
    19221997                  "popf\n"
    1923                   : "=m" (val) 
    1924                   : 
     1998                  : "=m" (val)
     1999                  :
    19252000                  : "cc", "memory", "eax", "ecx", "esi", "edi");
    19262001    printf("val=%d\n", val);
     
    19352010};
    19362011
    1937 asm("smc_code2:\n"
     2012asm(".section \".data\"\n"
     2013    "smc_code2:\n"
    19382014    "movl 4(%esp), %eax\n"
    19392015    "movl %eax, smc_patch_addr2 + 1\n"
     
    19482024    "smc_patch_addr2:\n"
    19492025    "movl $1, %eax\n"
    1950     "ret\n");
     2026    "ret\n"
     2027    ".previous\n"
     2028    );
    19512029
    19522030typedef int FuncType(void);
     
    19552033{
    19562034    int i;
    1957 
    19582035    printf("self modifying code:\n");
    19592036    printf("func1 = 0x%x\n", ((FuncType *)code)());
     
    20372114
    20382115typedef int __m64 __attribute__ ((__mode__ (__V2SI__)));
    2039 typedef int __m128 __attribute__ ((__mode__(__V4SF__)));
     2116typedef float __m128 __attribute__ ((__mode__(__V4SF__)));
    20402117
    20412118typedef union {
     
    22112288
    22122289/* Force %xmm0 usage to avoid the case where both register index are 0
    2213    to test instruction decoding more extensively */
     2290   to test intruction decoding more extensively */
    22142291#define CVT_OP_XMM2MMX(op)\
    22152292{\
    22162293    asm volatile (#op " %1, %0" : "=y" (r.q[0]) : "x" (a.dq) \
    2217                   : "%xmm0");\
     2294                  : "%xmm0"); \
     2295    asm volatile("emms\n"); \
    22182296    printf("%-9s: a=" FMT64X "" FMT64X " r=" FMT64X "\n",\
    22192297           #op,\
     
    22252303{\
    22262304    asm volatile (#op " %1, %0" : "=x" (r.dq) : "y" (a.q[0]));\
     2305    asm volatile("emms\n"); \
    22272306    printf("%-9s: a=" FMT64X " r=" FMT64X "" FMT64X "\n",\
    22282307           #op,\
     
    22932372        " fxsave %1\n"
    22942373        " fninit\n"
    2295         : "=m" (*(uint32_t *)fp2), "=m" (*(uint32_t *)fp) 
     2374        : "=m" (*(uint32_t *)fp2), "=m" (*(uint32_t *)fp)
    22962375        : "m" (a), "m" (b));
    22972376    printf("fpuc=%04x\n", fp->fpuc);
     
    23002379    for(i = 0; i < 3; i++) {
    23012380        printf("ST%d: " FMT64X " %04x\n",
    2302                i, 
     2381               i,
    23032382               *(uint64_t *)&fp->fpregs1[i * 16],
    23042383               *(uint16_t *)&fp->fpregs1[i * 16 + 8]);
     
    23122391    for(i = 0; i < nb_xmm; i++) {
    23132392        printf("xmm%d: " FMT64X "" FMT64X "\n",
    2314                i, 
     2393               i,
    23152394               *(uint64_t *)&fp->xmm_regs[i * 16],
    23162395               *(uint64_t *)&fp->xmm_regs[i * 16 + 8]);
     
    23522431    MMX_OP2(pmulhuw);
    23532432    MMX_OP2(pmulhw);
    2354    
     2433
    23552434    MMX_OP2(psubsb);
    23562435    MMX_OP2(psubsw);
     
    23912470    asm volatile ("pmovmskb %1, %0" : "=r" (r.l[0]) : "y" (a.q[0]));
    23922471    printf("%-9s: r=%08x\n", "pmovmskb", r.l[0]);
    2393    
     2472
    23942473    asm volatile ("pmovmskb %1, %0" : "=r" (r.l[0]) : "x" (a.dq));
    23952474    printf("%-9s: r=%08x\n", "pmovmskb", r.l[0]);
     
    24032482        b.q[0] = test_values[1][0];
    24042483        b.q[1] = test_values[1][1];
    2405         asm volatile("maskmovq %1, %0" : 
     2484        asm volatile("maskmovq %1, %0" :
    24062485                     : "y" (a.q[0]), "y" (b.q[0]), "D" (&r)
    2407                      : "memory"); 
    2408         printf("%-9s: r=" FMT64X " a=" FMT64X " b=" FMT64X "\n", 
    2409                "maskmov", 
    2410                r.q[0], 
    2411                a.q[0], 
     2486                     : "memory");
     2487        printf("%-9s: r=" FMT64X " a=" FMT64X " b=" FMT64X "\n",
     2488               "maskmov",
     2489               r.q[0],
     2490               a.q[0],
    24122491               b.q[0]);
    2413         asm volatile("maskmovdqu %1, %0" : 
     2492        asm volatile("maskmovdqu %1, %0" :
    24142493                     : "x" (a.dq), "x" (b.dq), "D" (&r)
    2415                      : "memory"); 
    2416         printf("%-9s: r=" FMT64X "" FMT64X " a=" FMT64X "" FMT64X " b=" FMT64X "" FMT64X "\n", 
    2417                "maskmov", 
    2418                r.q[1], r.q[0], 
    2419                a.q[1], a.q[0], 
     2494                     : "memory");
     2495        printf("%-9s: r=" FMT64X "" FMT64X " a=" FMT64X "" FMT64X " b=" FMT64X "" FMT64X "\n",
     2496               "maskmov",
     2497               r.q[1], r.q[0],
     2498               a.q[1], a.q[0],
    24202499               b.q[1], b.q[0]);
    24212500    }
     
    25172596        SSE_OPS(cmpnle);
    25182597        SSE_OPS(cmpord);
    2519        
    2520        
     2598
     2599
    25212600        a.d[0] = 2.7;
    25222601        a.d[1] = -3.4;
     
    25942673#endif
    25952674
     2675#define TEST_CONV_RAX(op)\
     2676{\
     2677    unsigned long a, r;\
     2678    a = i2l(0x8234a6f8);\
     2679    r = a;\
     2680    asm volatile(#op : "=a" (r) : "0" (r));\
     2681    printf("%-10s A=" FMTLX " R=" FMTLX "\n", #op, a, r);\
     2682}
     2683
     2684#define TEST_CONV_RAX_RDX(op)\
     2685{\
     2686    unsigned long a, d, r, rh;                   \
     2687    a = i2l(0x8234a6f8);\
     2688    d = i2l(0x8345a1f2);\
     2689    r = a;\
     2690    rh = d;\
     2691    asm volatile(#op : "=a" (r), "=d" (rh) : "0" (r), "1" (rh));   \
     2692    printf("%-10s A=" FMTLX " R=" FMTLX ":" FMTLX "\n", #op, a, r, rh);  \
     2693}
     2694
     2695void test_conv(void)
     2696{
     2697    TEST_CONV_RAX(cbw);
     2698    TEST_CONV_RAX(cwde);
     2699#if defined(__x86_64__)
     2700    TEST_CONV_RAX(cdqe);
     2701#endif
     2702
     2703    TEST_CONV_RAX_RDX(cwd);
     2704    TEST_CONV_RAX_RDX(cdq);
     2705#if defined(__x86_64__)
     2706    TEST_CONV_RAX_RDX(cqo);
     2707#endif
     2708
     2709    {
     2710        unsigned long a, r;
     2711        a = i2l(0x12345678);
     2712        asm volatile("bswapl %k0" : "=r" (r) : "0" (a));
     2713        printf("%-10s: A=" FMTLX " R=" FMTLX "\n", "bswapl", a, r);
     2714    }
     2715#if defined(__x86_64__)
     2716    {
     2717        unsigned long a, r;
     2718        a = i2l(0x12345678);
     2719        asm volatile("bswapq %0" : "=r" (r) : "0" (a));
     2720        printf("%-10s: A=" FMTLX " R=" FMTLX "\n", "bswapq", a, r);
     2721    }
     2722#endif
     2723}
     2724
    25962725extern void *__start_initcall;
    25972726extern void *__stop_initcall;
     
    26112740    test_mul();
    26122741    test_jcc();
     2742    test_loop();
    26132743    test_floats();
    26142744#if !defined(__x86_64__)
     
    26262756    test_vm86();
    26272757#endif
     2758#if !defined(__x86_64__)
    26282759    test_exceptions();
    2629 #if !defined(__x86_64__)
    26302760    test_self_modifying_code();
    26312761    test_single_step();
    26322762#endif
    26332763    test_enter();
     2764    test_conv();
    26342765#ifdef TEST_SSE
    26352766    test_sse();
  • trunk/src/recompiler/tests/test_path.c

    r1 r36140  
    150150    return 0;
    151151}
    152        
     152
  • trunk/src/recompiler/translate-all.c

    r33656 r36140  
    3232#include <stdio.h>
    3333#include <string.h>
     34#include <inttypes.h>
    3435
    3536#include "config.h"
     
    5556target_ulong gen_opc_npc[OPC_BUF_SIZE];
    5657target_ulong gen_opc_jump_pc[2];
    57 #elif defined(TARGET_MIPS)
     58#elif defined(TARGET_MIPS) || defined(TARGET_SH4)
    5859uint32_t gen_opc_hflags[OPC_BUF_SIZE];
    5960#endif
     
    7172    if (max == 0) {
    7273        max = TCG_MAX_OP_SIZE;
    73 #define DEF(s, n, copy_size) max = (copy_size > max) ? copy_size : max;
     74#define DEF(s, n, copy_size) max = copy_size > max? copy_size : max;
    7475#include "tcg-opc.h"
    7576#undef DEF
     
    8081}
    8182
    82 void cpu_gen_init()
     83void cpu_gen_init(void)
    8384{
    8485    tcg_context_init(&tcg_ctx);
     
    9394   code).
    9495*/
    95 int cpu_gen_code(CPUState *env, TranslationBlock *tb,
    96                  int *gen_code_size_ptr)
     96int cpu_gen_code(CPUState *env, TranslationBlock *tb, int *gen_code_size_ptr)
    9797{
    9898    TCGContext *s = &tcg_ctx;
     
    111111#ifdef VBOX
    112112    RAWEx_ProfileStart(env, STATS_QEMU_COMPILATION);
     113#endif
     114
    113115    tcg_func_start(s);
    114116
    115117    gen_intermediate_code(env, tb);
    116 #else /* !VBOX */
    117     tcg_func_start(s);
    118 
    119     gen_intermediate_code(env, tb);
    120 #endif /* !VBOX */
    121118
    122119    /* generate machine code */
     
    129126    s->tb_next = NULL;
    130127    /* the following two entries are optional (only used for string ops) */
     128    /* XXX: not used ? */
    131129    tb->tb_jmp_offset[2] = 0xffff;
    132130    tb->tb_jmp_offset[3] = 0xffff;
     
    141139    s->code_time -= profile_getclock();
    142140#endif
    143 
    144141    gen_code_size = dyngen_code(s, gen_code_buf);
    145142    *gen_code_size_ptr = gen_code_size;
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette