VirtualBox

Changeset 36175 in vbox for trunk/src/recompiler


Ignore:
Timestamp:
Mar 4, 2011 4:21:09 PM (14 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
70371
Message:

rem: Synced up to v0.11.1 (35bfc7324e2e6946c4113ada5db30553a1a7c40b) from git://git.savannah.nongnu.org/qemu.git.

Location:
trunk/src/recompiler
Files:
3 added
49 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/recompiler/Sun/config.h

    r28800 r36175  
    2121#define TARGET_I386 1
    2222#define CONFIG_SOFTMMU 1
     23#define TARGET_PHYS_ADDR_BITS 64
    2324
    2425#ifdef VBOX_WITH_64_BITS_GUESTS
  • trunk/src/recompiler/VBoxRecompiler.c

    r36170 r36175  
    324324     * Register ram types.
    325325     */
    326     pVM->rem.s.iMMIOMemType    = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
     326    pVM->rem.s.iMMIOMemType    = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
    327327    AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
    328     pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
     328    pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
    329329    AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
    330330    Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
     
    469469
    470470/**
    471  * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
     471 * Initializes phys_ram_dirty and phys_ram_dirty_size.
    472472 *
    473473 * @returns VBox status code.
     
    484484                          ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
    485485                          VERR_OUT_OF_RANGE);
    486     phys_ram_size = cb;
    487486    phys_ram_dirty_size = cb >> PAGE_SHIFT;
    488487    AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
     
    813812     */
    814813    interrupt_request = pVM->rem.s.Env.interrupt_request;
    815     Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER  | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
     814    Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER  | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
    816815    pVM->rem.s.Env.interrupt_request = 0;
    817816    cpu_single_step(&pVM->rem.s.Env, 1);
     
    953952    {
    954953        int interrupt_request = pVM->rem.s.Env.interrupt_request;
    955         Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
     954        Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
    956955#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
    957956        cpu_single_step(&pVM->rem.s.Env, 0);
     
    24392438     * (See @remark for why we don't check for other FFs.)
    24402439     */
    2441     pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
     2440    pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
    24422441    if (    pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
    24432442        ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
     
    45004499#define LOG_GROUP LOG_GROUP_REM_IOPORT
    45014500
    4502 void cpu_outb(CPUState *env, int addr, int val)
     4501void cpu_outb(CPUState *env, pio_addr_t addr, uint8_t val)
    45034502{
    45044503    int rc;
     
    45194518}
    45204519
    4521 void cpu_outw(CPUState *env, int addr, int val)
     4520void cpu_outw(CPUState *env, pio_addr_t addr, uint16_t val)
    45224521{
    45234522    //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
     
    45344533}
    45354534
    4536 void cpu_outl(CPUState *env, int addr, int val)
     4535void cpu_outl(CPUState *env, pio_addr_t addr, uint32_t val)
    45374536{
    45384537    int rc;
     
    45504549}
    45514550
    4552 int cpu_inb(CPUState *env, int addr)
     4551uint8_t cpu_inb(CPUState *env, pio_addr_t addr)
    45534552{
    45544553    uint32_t u32 = 0;
     
    45584557        if (/*addr != 0x61 && */addr != 0x71)
    45594558            Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
    4560         return (int)u32;
     4559        return (uint8_t)u32;
    45614560    }
    45624561    if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
     
    45644563        Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
    45654564        remR3RaiseRC(env->pVM, rc);
    4566         return (int)u32;
     4565        return (uint8_t)u32;
    45674566    }
    45684567    remAbort(rc, __FUNCTION__);
    4569     return 0xff;
    4570 }
    4571 
    4572 int cpu_inw(CPUState *env, int addr)
     4568    return UINT8_C(0xff);
     4569}
     4570
     4571uint16_t cpu_inw(CPUState *env, pio_addr_t addr)
    45734572{
    45744573    uint32_t u32 = 0;
     
    45774576    {
    45784577        Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
    4579         return (int)u32;
     4578        return (uint16_t)u32;
    45804579    }
    45814580    if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
     
    45834582        Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
    45844583        remR3RaiseRC(env->pVM, rc);
    4585         return (int)u32;
     4584        return (uint16_t)u32;
    45864585    }
    45874586    remAbort(rc, __FUNCTION__);
    4588     return 0xffff;
    4589 }
    4590 
    4591 int cpu_inl(CPUState *env, int addr)
     4587    return UINT16_C(0xffff);
     4588}
     4589
     4590uint32_t cpu_inl(CPUState *env, pio_addr_t addr)
    45924591{
    45934592    uint32_t u32 = 0;
     
    45984597//    loglevel = ~0;
    45994598        Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
    4600         return (int)u32;
     4599        return u32;
    46014600    }
    46024601    if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
     
    46044603        Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
    46054604        remR3RaiseRC(env->pVM, rc);
    4606         return (int)u32;
     4605        return u32;
    46074606    }
    46084607    remAbort(rc, __FUNCTION__);
    4609     return 0xffffffff;
     4608    return UINT32_C(0xffffffff);
    46104609}
    46114610
  • trunk/src/recompiler/bswap.h

    r36170 r36175  
    152152    uint8_t *p1 = (uint8_t *)p;
    153153
    154     p1[0] = (uint8_t)v;
     154    p1[0] = v & 0xff;
    155155    p1[1] = v >> 8;
    156156}
     
    160160    uint8_t *p1 = (uint8_t *)p;
    161161
    162     p1[0] = (uint8_t)v;
     162    p1[0] = v & 0xff;
    163163    p1[1] = v >> 8;
    164164    p1[2] = v >> 16;
     
    189189
    190190    p1[0] = v >> 8;
    191     p1[1] = (uint8_t)v;
     191    p1[1] = v & 0xff;
    192192}
    193193
     
    199199    p1[1] = v >> 16;
    200200    p1[2] = v >> 8;
    201     p1[3] = (uint8_t)v;
     201    p1[3] = v & 0xff;
    202202}
    203203
  • trunk/src/recompiler/cpu-all.h

    r36170 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    3938#endif /* VBOX */
    4039#include "qemu-common.h"
    41 
    42 #if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__)
    43 #define WORDS_ALIGNED
    44 #endif
     40#include "cpu-common.h"
    4541
    4642/* some important defines:
     
    5753 */
    5854
    59 #include "bswap.h"
    6055#include "softfloat.h"
    6156
     
    887882
    888883void page_dump(FILE *f);
     884int walk_memory_regions(void *,
     885    int (*fn)(void *, unsigned long, unsigned long, unsigned long));
    889886int page_get_flags(target_ulong address);
    890887void page_set_flags(target_ulong start, target_ulong end, int flags);
     
    893890void cpu_exec_init_all(unsigned long tb_size);
    894891CPUState *cpu_copy(CPUState *env);
     892CPUState *qemu_get_cpu(int cpu);
    895893
    896894void cpu_dump_state(CPUState *env, FILE *f,
     
    912910extern int use_icount;
    913911
    914 #define CPU_INTERRUPT_EXIT   0x01 /* wants exit from main loop */
    915912#define CPU_INTERRUPT_HARD   0x02 /* hardware interrupt pending */
    916913#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
     
    922919#define CPU_INTERRUPT_VIRQ   0x100 /* virtual interrupt pending.  */
    923920#define CPU_INTERRUPT_NMI    0x200 /* NMI pending. */
     921#define CPU_INTERRUPT_INIT   0x400 /* INIT pending. */
     922#define CPU_INTERRUPT_SIPI   0x800 /* SIPI pending. */
     923#define CPU_INTERRUPT_MCE    0x1000 /* (x86 only) MCE pending. */
    924924
    925925#ifdef VBOX
    926926/** Executes a single instruction. cpu_exec() will normally return EXCP_SINGLE_INSTR. */
    927 # define CPU_INTERRUPT_SINGLE_INSTR             0x0400
     927# define CPU_INTERRUPT_SINGLE_INSTR             0x02000000
    928928/** Executing a CPU_INTERRUPT_SINGLE_INSTR request, quit the cpu_loop. (for exceptions and suchlike) */
    929 # define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT   0x0800
     929# define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT   0x04000000
    930930/** VM execution was interrupted by VMR3Reset, VMR3Suspend or VMR3PowerOff. */
    931 # define CPU_INTERRUPT_RC                       0x1000
    932 /** Exit current TB to process an external interrupt request (also in op.c!!) */
    933 # define CPU_INTERRUPT_EXTERNAL_EXIT            0x2000
    934 /** Exit current TB to process an external interrupt request (also in op.c!!) */
    935 # define CPU_INTERRUPT_EXTERNAL_HARD            0x4000
    936 /** Exit current TB to process an external interrupt request (also in op.c!!) */
    937 # define CPU_INTERRUPT_EXTERNAL_TIMER           0x8000
    938 /** Exit current TB to process an external interrupt request (also in op.c!!) */
    939 # define CPU_INTERRUPT_EXTERNAL_DMA             0x10000
     931# define CPU_INTERRUPT_RC                       0x08000000
     932/** Exit current TB to process an external request. */
     933# define CPU_INTERRUPT_EXTERNAL_EXIT            0x10000000
     934/** Exit current TB to process an external interrupt request. */
     935# define CPU_INTERRUPT_EXTERNAL_HARD            0x20000000
     936/** Exit current TB to process an external timer request. */
     937# define CPU_INTERRUPT_EXTERNAL_TIMER           0x40000000
     938/** Exit current TB to process an external DMA request. */
     939# define CPU_INTERRUPT_EXTERNAL_DMA             0x80000000
    940940#endif /* VBOX */
    941941void cpu_interrupt(CPUState *s, int mask);
    942942void cpu_reset_interrupt(CPUState *env, int mask);
     943
     944void cpu_exit(CPUState *s);
     945
     946int qemu_cpu_has_work(CPUState *env);
    943947
    944948/* Breakpoint/watchpoint flags */
     
    10001004
    10011005/* IO ports API */
    1002 
    1003 /* NOTE: as these functions may be even used when there is an isa
    1004    brige on non x86 targets, we always defined them */
    1005 #ifndef NO_CPU_IO_DEFS
    1006 void cpu_outb(CPUState *env, int addr, int val);
    1007 void cpu_outw(CPUState *env, int addr, int val);
    1008 void cpu_outl(CPUState *env, int addr, int val);
    1009 int cpu_inb(CPUState *env, int addr);
    1010 int cpu_inw(CPUState *env, int addr);
    1011 int cpu_inl(CPUState *env, int addr);
    1012 #endif
    1013 
    1014 /* address in the RAM (different from a physical address) */
    1015 #ifdef USE_KQEMU
    1016 typedef uint32_t ram_addr_t;
    1017 #else
    1018 typedef unsigned long ram_addr_t;
    1019 #endif
     1006#include "ioport.h"
    10201007
    10211008/* memory API */
    10221009
    10231010#ifndef VBOX
    1024 extern ram_addr_t phys_ram_size;
    10251011extern int phys_ram_fd;
    1026 extern uint8_t *phys_ram_base;
    10271012extern uint8_t *phys_ram_dirty;
    10281013extern ram_addr_t ram_size;
     1014extern ram_addr_t last_ram_offset;
    10291015#else /* VBOX */
    1030 extern RTGCPHYS phys_ram_size;
    10311016/** This is required for bounds checking the phys_ram_dirty accesses. */
    10321017extern RTGCPHYS phys_ram_dirty_size;
     
    10401025   so only a limited number of ids are avaiable.  */
    10411026
    1042 #define IO_MEM_SHIFT       3
    10431027#define IO_MEM_NB_ENTRIES  (1 << (TARGET_PAGE_BITS  - IO_MEM_SHIFT))
    1044 
    1045 #define IO_MEM_RAM         (0 << IO_MEM_SHIFT) /* hardcoded offset */
    1046 #define IO_MEM_ROM         (1 << IO_MEM_SHIFT) /* hardcoded offset */
    1047 #define IO_MEM_UNASSIGNED  (2 << IO_MEM_SHIFT)
    1048 #define IO_MEM_NOTDIRTY    (3 << IO_MEM_SHIFT)
    1049 
    1050 /* Acts like a ROM when read and like a device when written.  */
    1051 #define IO_MEM_ROMD        (1)
    1052 #define IO_MEM_SUBPAGE     (2)
    1053 #define IO_MEM_SUBWIDTH    (4)
    10541028
    10551029/* Flags stored in the low bits of the TLB virtual address.  These are
     
    10631037#define TLB_MMIO        (1 << 5)
    10641038
    1065 typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
    1066 typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
    1067 
    1068 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
    1069                                          ram_addr_t size,
    1070                                          ram_addr_t phys_offset,
    1071                                          ram_addr_t region_offset);
    1072 static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
    1073                                                 ram_addr_t size,
    1074                                                 ram_addr_t phys_offset)
    1075 {
    1076     cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
    1077 }
    1078 
    1079 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
    1080 ram_addr_t qemu_ram_alloc(ram_addr_t);
    1081 void qemu_ram_free(ram_addr_t addr);
    1082 int cpu_register_io_memory(int io_index,
    1083                            CPUReadMemoryFunc **mem_read,
    1084                            CPUWriteMemoryFunc **mem_write,
    1085                            void *opaque);
    1086 void cpu_unregister_io_memory(int table_address);
    1087 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
    1088 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
    1089 
    1090 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
    1091                             int len, int is_write);
    1092 static inline void cpu_physical_memory_read(target_phys_addr_t addr,
    1093                                             uint8_t *buf, int len)
    1094 {
    1095     cpu_physical_memory_rw(addr, buf, len, 0);
    1096 }
    1097 static inline void cpu_physical_memory_write(target_phys_addr_t addr,
    1098                                              const uint8_t *buf, int len)
    1099 {
    1100     cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
    1101 }
    1102 void *cpu_physical_memory_map(target_phys_addr_t addr,
    1103                               target_phys_addr_t *plen,
    1104                               int is_write);
    1105 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
    1106                                int is_write, target_phys_addr_t access_len);
    1107 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
    1108 void cpu_unregister_map_client(void *cookie);
    1109 
    1110 uint32_t ldub_phys(target_phys_addr_t addr);
    1111 uint32_t lduw_phys(target_phys_addr_t addr);
    1112 uint32_t ldl_phys(target_phys_addr_t addr);
    1113 uint64_t ldq_phys(target_phys_addr_t addr);
    1114 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
    1115 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
    1116 void stb_phys(target_phys_addr_t addr, uint32_t val);
    1117 void stw_phys(target_phys_addr_t addr, uint32_t val);
    1118 void stl_phys(target_phys_addr_t addr, uint32_t val);
    1119 void stq_phys(target_phys_addr_t addr, uint64_t val);
    1120 
    1121 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
    1122                                    const uint8_t *buf, int len);
    11231039int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
    11241040                        uint8_t *buf, int len, int is_write);
     
    11301046
    11311047/* read dirty bit (return 0 or 1) */
    1132 #ifndef VBOX
    11331048static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
    11341049{
    1135     return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
    1136 }
    1137 #else  /* VBOX */
    1138 DECLINLINE(int) cpu_physical_memory_is_dirty(ram_addr_t addr)
    1139 {
     1050#ifdef VBOX
    11401051    if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
    11411052    {
     
    11441055        return 0;
    11451056    }
     1057#endif /* VBOX */
    11461058    return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
    11471059}
    1148 #endif /* VBOX */
    1149 
    1150 #ifndef VBOX
     1060
    11511061static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
    11521062                                                int dirty_flags)
    11531063{
    1154     return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
    1155 }
    1156 #else  /* VBOX */
    1157 DECLINLINE(int) cpu_physical_memory_get_dirty(ram_addr_t addr,
    1158                                               int dirty_flags)
    1159 {
     1064#ifdef VBOX
    11601065    if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
    11611066    {
     
    11641069        return 0xff & dirty_flags; /** @todo I don't think this is the right thing to return, fix! */
    11651070    }
     1071#endif /* VBOX */
    11661072    return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
    11671073}
    1168 #endif /* VBOX */
    1169 
    1170 #ifndef VBOX
     1074
    11711075static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
    11721076{
    1173     phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
    1174 }
    1175 #else  /* VBOX */
    1176 DECLINLINE(void) cpu_physical_memory_set_dirty(ram_addr_t addr)
    1177 {
     1077#ifdef VBOX
    11781078    if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
    11791079    {
     
    11821082        return;
    11831083    }
     1084#endif /* VBOX */
    11841085    phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
    11851086}
    1186 #endif /* VBOX */
    11871087
    11881088void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
     
    11941094int cpu_physical_memory_get_dirty_tracking(void);
    11951095
    1196 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr);
     1096int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
     1097                                   target_phys_addr_t end_addr);
    11971098
    11981099void dump_exec_info(FILE *f,
     
    13581259#endif
    13591260
     1261void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
     1262                        uint64_t mcg_status, uint64_t addr, uint64_t misc);
     1263
    13601264#ifdef VBOX
    13611265void tb_invalidate_virt(CPUState *env, uint32_t eip);
  • trunk/src/recompiler/cpu-defs.h

    r36171 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    3837#include <setjmp.h>
    3938#include <inttypes.h>
     39#include <signal.h>
    4040#include "osdep.h"
    4141#include "sys-queue.h"
     42#include "targphys.h"
    4243
    4344#ifndef TARGET_LONG_BITS
    4445#error TARGET_LONG_BITS must be defined before including this header
    45 #endif
    46 
    47 #ifndef TARGET_PHYS_ADDR_BITS
    48 #if TARGET_LONG_BITS >= HOST_LONG_BITS
    49 #define TARGET_PHYS_ADDR_BITS TARGET_LONG_BITS
    50 #else
    51 #define TARGET_PHYS_ADDR_BITS HOST_LONG_BITS
    52 #endif
    5346#endif
    5447
     
    7063#else
    7164#error TARGET_LONG_SIZE undefined
    72 #endif
    73 
    74 /* target_phys_addr_t is the type of a physical address (its size can
    75    be different from 'target_ulong'). We have sizeof(target_phys_addr)
    76    = max(sizeof(unsigned long),
    77    sizeof(size_of_target_physical_address)) because we must pass a
    78    host pointer to memory operations in some cases */
    79 
    80 #if TARGET_PHYS_ADDR_BITS == 32
    81 typedef uint32_t target_phys_addr_t;
    82 #define TARGET_FMT_plx "%08x"
    83 #elif TARGET_PHYS_ADDR_BITS == 64
    84 typedef uint64_t target_phys_addr_t;
    85 #define TARGET_FMT_plx "%016" PRIx64
    86 #else
    87 #error TARGET_PHYS_ADDR_BITS undefined
    8865#endif
    8966
     
    186163                                     memory was accessed */             \
    187164    uint32_t halted; /* Nonzero if the CPU is in suspend state */       \
     165    uint32_t stop;   /* Stop request */                                 \
     166    uint32_t stopped; /* Artificially stopped */                        \
    188167    uint32_t interrupt_request;                                         \
    189168    volatile /*sig_atomic_t - vbox*/ int32_t exit_request;                                 \
     
    221200    int exception_index;                                                \
    222201                                                                        \
    223     void *next_cpu; /* next CPU sharing TB cache */                     \
     202    CPUState *next_cpu; /* next CPU sharing TB cache */                 \
    224203    int cpu_index; /* CPU index (informative) */                        \
     204    uint32_t host_tid; /* host thread ID */                             \
     205    int numa_node; /* NUMA node this cpu is belonging to  */            \
    225206    int running; /* Nonzero if cpu is currently running(usermode).  */  \
    226207    /* user data */                                                     \
    227208    void *opaque;                                                       \
    228209                                                                        \
     210    uint32_t created;                                                   \
     211    struct QemuThread *thread;                                          \
     212    struct QemuCond *halt_cond;                                         \
    229213    const char *cpu_model_str;                                          \
    230214    struct KVMState *kvm_state;                                         \
  • trunk/src/recompiler/cpu-exec.c

    r36171 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    2928
    3029#include "config.h"
    31 #define CPU_NO_GLOBAL_REGS
    3230#include "exec.h"
    3331#include "disas.h"
     
    6159//#define DEBUG_EXEC
    6260//#define DEBUG_SIGNAL
     61
     62int qemu_cpu_has_work(CPUState *env)
     63{
     64    return cpu_has_work(env);
     65}
    6366
    6467void cpu_loop_exit(void)
     
    389392                    }
    390393                    RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
    391                     if (interrupt_request & CPU_INTERRUPT_EXIT)
    392                     {
    393                         env->exception_index = EXCP_INTERRUPT;
    394                         ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
    395                         ret = env->exception_index;
    396                         cpu_loop_exit();
    397                     }
    398394                    if (interrupt_request & CPU_INTERRUPT_RC)
    399395                    {
     
    518514                 gcc-4.4/amd64 anymore, see #3883. */
    519515        env->current_tb = NULL;
    520         if (    !(env->interrupt_request & (  CPU_INTERRUPT_EXIT | CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
     516        if (    !(env->interrupt_request & (  CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
    521517                                            | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
    522518            &&  (   (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
     
    575571#elif defined(TARGET_ARM)
    576572#elif defined(TARGET_PPC)
     573#elif defined(TARGET_MICROBLAZE)
    577574#elif defined(TARGET_MIPS)
    578575#elif defined(TARGET_SH4)
     
    587584    for(;;) {
    588585        if (setjmp(env->jmp_env) == 0) {
     586#if defined(__sparc__) && !defined(HOST_SOLARIS)
     587#undef env
     588                    env = cpu_single_env;
     589#define env cpu_single_env
     590#endif
    589591            env->current_tb = NULL;
    590592            /* if an exception is pending, we execute it here */
     
    624626#elif defined(TARGET_PPC)
    625627                    do_interrupt(env);
     628#elif defined(TARGET_MICROBLAZE)
     629                    do_interrupt(env);
    626630#elif defined(TARGET_MIPS)
    627631                    do_interrupt(env);
     
    643647                env->exception_index = -1;
    644648            }
    645 #ifdef USE_KQEMU
     649#ifdef CONFIG_KQEMU
    646650            if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
    647651                int ret;
     
    691695                    }
    692696#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
    693     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
     697    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
     698    defined(TARGET_MICROBLAZE)
    694699                    if (interrupt_request & CPU_INTERRUPT_HALT) {
    695700                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
     
    700705#endif
    701706#if defined(TARGET_I386)
    702                     if (env->hflags2 & HF2_GIF_MASK) {
     707                    if (interrupt_request & CPU_INTERRUPT_INIT) {
     708                            svm_check_intercept(SVM_EXIT_INIT);
     709                            do_cpu_init(env);
     710                            env->exception_index = EXCP_HALTED;
     711                            cpu_loop_exit();
     712                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
     713                            do_cpu_sipi(env);
     714                    } else if (env->hflags2 & HF2_GIF_MASK) {
    703715                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
    704716                            !(env->hflags & HF_SMM_MASK)) {
     
    713725                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
    714726                            next_tb = 0;
     727                        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
     728                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
     729                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
     730                            next_tb = 0;
    715731                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
    716732                                   (((env->hflags2 & HF2_VINTR_MASK) &&
     
    724740                            intno = cpu_get_pic_interrupt(env);
    725741                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
     742#if defined(__sparc__) && !defined(HOST_SOLARIS)
     743#undef env
     744                    env = cpu_single_env;
     745#define env cpu_single_env
     746#endif
    726747                            do_interrupt(intno, 0, 0, 0, 1);
    727748                            /* ensure that no TB jump will be modified as
     
    755776                        next_tb = 0;
    756777                    }
     778#elif defined(TARGET_MICROBLAZE)
     779                    if ((interrupt_request & CPU_INTERRUPT_HARD)
     780                        && (env->sregs[SR_MSR] & MSR_IE)
     781                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
     782                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
     783                        env->exception_index = EXCP_IRQ;
     784                        do_interrupt(env);
     785                        next_tb = 0;
     786                    }
    757787#elif defined(TARGET_MIPS)
    758788                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
     
    770800#elif defined(TARGET_SPARC)
    771801                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
    772                         (env->psret != 0)) {
     802                        cpu_interrupts_enabled(env)) {
    773803                        int pil = env->interrupt_index & 15;
    774804                        int type = env->interrupt_index & 0xf0;
     
    781811                            do_interrupt(env);
    782812                            env->interrupt_index = 0;
    783 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
     813#if !defined(CONFIG_USER_ONLY)
    784814                            cpu_check_irqs(env);
    785815#endif
     
    884914                              | env->cc_dest | (env->cc_x << 4);
    885915                    log_cpu_state(env, 0);
     916#elif defined(TARGET_MICROBLAZE)
     917                    log_cpu_state(env, 0);
    886918#elif defined(TARGET_MIPS)
    887919                    log_cpu_state(env, 0);
     
    918950                {
    919951                    if (next_tb != 0 &&
    920 #ifdef USE_KQEMU
     952#ifdef CONFIG_KQEMU
    921953                        (env->kqemu_enabled != 2) &&
    922954#endif
     
    9751007                /* reset soft MMU for next block (it can currently
    9761008                   only be set by a memory fault) */
    977 #if defined(USE_KQEMU)
     1009#if defined(CONFIG_KQEMU)
    9781010#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
    9791011                if (kqemu_is_ok(env) &&
     
    10011033    env->sr = (env->sr & 0xffe0)
    10021034              | env->cc_dest | (env->cc_x << 4);
     1035#elif defined(TARGET_MICROBLAZE)
    10031036#elif defined(TARGET_MIPS)
    10041037#elif defined(TARGET_SH4)
     
    11181151    if (ret == 1) {
    11191152#if 0
    1120         printf("PF exception: EIP=0x%RGv CR2=0x%RGv error=0x%x\n",
     1153        printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
    11211154               env->eip, env->cr[2], env->error_code);
    11221155#endif
     
    13491382}
    13501383
     1384#elif defined (TARGET_MICROBLAZE)
     1385static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
     1386                                    int is_write, sigset_t *old_set,
     1387                                    void *puc)
     1388{
     1389    TranslationBlock *tb;
     1390    int ret;
     1391
     1392    if (cpu_single_env)
     1393        env = cpu_single_env; /* XXX: find a correct solution for multithread */
     1394#if defined(DEBUG_SIGNAL)
     1395    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
     1396           pc, address, is_write, *(unsigned long *)old_set);
     1397#endif
     1398    /* XXX: locking issue */
     1399    if (is_write && page_unprotect(h2g(address), pc, puc)) {
     1400        return 1;
     1401    }
     1402
     1403    /* see if it is an MMU fault */
     1404    ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
     1405    if (ret < 0)
     1406        return 0; /* not an MMU fault */
     1407    if (ret == 0)
     1408        return 1; /* the MMU fault was handled without causing real CPU fault */
     1409
     1410    /* now we have a real cpu fault */
     1411    tb = tb_find_pc(pc);
     1412    if (tb) {
     1413        /* the PC is inside the translated code. It means that we have
     1414           a virtual CPU fault */
     1415        cpu_restore_state(tb, env, pc, puc);
     1416    }
     1417    if (ret == 1) {
     1418#if 0
     1419        printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
     1420               env->PC, env->error_code, tb);
     1421#endif
     1422    /* we restore the process signal mask as the sigreturn should
     1423       do it (XXX: use sigsetjmp) */
     1424        sigprocmask(SIG_SETMASK, old_set, NULL);
     1425        cpu_loop_exit();
     1426    } else {
     1427        /* activate soft MMU for this block */
     1428        cpu_resume_from_signal(env, puc);
     1429    }
     1430    /* never comes here */
     1431    return 1;
     1432}
     1433
    13511434#elif defined (TARGET_SH4)
    13521435static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
     
    14911574# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
    14921575# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
     1576# define MASK_sig(context)    ((context)->uc_sigmask)
     1577#elif defined(__OpenBSD__)
     1578# define EIP_sig(context)     ((context)->sc_eip)
     1579# define TRAP_sig(context)    ((context)->sc_trapno)
     1580# define ERROR_sig(context)   ((context)->sc_err)
     1581# define MASK_sig(context)    ((context)->sc_mask)
    14931582#else
    14941583# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
    14951584# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
    14961585# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
     1586# define MASK_sig(context)    ((context)->uc_sigmask)
    14971587#endif
    14981588
     
    15011591{
    15021592    siginfo_t *info = pinfo;
     1593#if defined(__OpenBSD__)
     1594    struct sigcontext *uc = puc;
     1595#else
    15031596    struct ucontext *uc = puc;
     1597#endif
    15041598    unsigned long pc;
    15051599    int trapno;
     
    15161610                             trapno == 0xe ?
    15171611                             (ERROR_sig(uc) >> 1) & 1 : 0,
    1518                              &uc->uc_sigmask, puc);
     1612                             &MASK_sig(uc), puc);
    15191613}
    15201614
     
    15221616
    15231617#ifdef __NetBSD__
    1524 #define REG_ERR _REG_ERR
    1525 #define REG_TRAPNO _REG_TRAPNO
    1526 
    1527 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
    1528 #define QEMU_UC_MACHINE_PC(uc)          _UC_MACHINE_PC(uc)
     1618#define PC_sig(context)       _UC_MACHINE_PC(context)
     1619#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
     1620#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
     1621#define MASK_sig(context)     ((context)->uc_sigmask)
     1622#elif defined(__OpenBSD__)
     1623#define PC_sig(context)       ((context)->sc_rip)
     1624#define TRAP_sig(context)     ((context)->sc_trapno)
     1625#define ERROR_sig(context)    ((context)->sc_err)
     1626#define MASK_sig(context)     ((context)->sc_mask)
    15291627#else
    1530 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
    1531 #define QEMU_UC_MACHINE_PC(uc)          QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
     1628#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
     1629#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
     1630#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
     1631#define MASK_sig(context)     ((context)->uc_sigmask)
    15321632#endif
    15331633
     
    15391639#ifdef __NetBSD__
    15401640    ucontext_t *uc = puc;
     1641#elif defined(__OpenBSD__)
     1642    struct sigcontext *uc = puc;
    15411643#else
    15421644    struct ucontext *uc = puc;
    15431645#endif
    15441646
    1545     pc = QEMU_UC_MACHINE_PC(uc);
     1647    pc = PC_sig(uc);
    15461648    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
    1547                              QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
    1548                              (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
    1549                              &uc->uc_sigmask, puc);
     1649                             TRAP_sig(uc) == 0xe ?
     1650                             (ERROR_sig(uc) >> 1) & 1 : 0,
     1651                             &MASK_sig(uc), puc);
    15501652}
    15511653
     
    16841786      switch((insn >> 19) & 0x3f) {
    16851787      case 0x05: // stb
     1788      case 0x15: // stba
    16861789      case 0x06: // sth
     1790      case 0x16: // stha
    16871791      case 0x04: // st
     1792      case 0x14: // sta
    16881793      case 0x07: // std
     1794      case 0x17: // stda
     1795      case 0x0e: // stx
     1796      case 0x1e: // stxa
    16891797      case 0x24: // stf
     1798      case 0x34: // stfa
    16901799      case 0x27: // stdf
     1800      case 0x37: // stdfa
     1801      case 0x26: // stqf
     1802      case 0x36: // stqfa
    16911803      case 0x25: // stfsr
     1804      case 0x3c: // casa
     1805      case 0x3e: // casxa
    16921806        is_write = 1;
    16931807        break;
  • trunk/src/recompiler/cutils.c

    r36170 r36175  
    577577}
    578578
     579/* XXX: use host strnlen if available ? */
     580int qemu_strnlen(const char *s, int max_len)
     581{
     582    int i;
     583
     584    for(i = 0; i < max_len; i++) {
     585        if (s[i] == '\0') {
     586            break;
     587        }
     588    }
     589    return i;
     590}
     591
    579592#ifndef VBOX
    580593time_t mktimegm(struct tm *tm)
     
    609622}
    610623
     624void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov)
     625{
     626    int i;
     627
     628    qiov->iov = iov;
     629    qiov->niov = niov;
     630    qiov->nalloc = -1;
     631    qiov->size = 0;
     632    for (i = 0; i < niov; i++)
     633        qiov->size += iov[i].iov_len;
     634}
     635
    611636void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len)
    612637{
     638    assert(qiov->nalloc != -1);
     639
    613640    if (qiov->niov == qiov->nalloc) {
    614641        qiov->nalloc = 2 * qiov->nalloc + 1;
     
    623650void qemu_iovec_destroy(QEMUIOVector *qiov)
    624651{
     652    assert(qiov->nalloc != -1);
     653
    625654    qemu_free(qiov->iov);
    626655}
     
    628657void qemu_iovec_reset(QEMUIOVector *qiov)
    629658{
     659    assert(qiov->nalloc != -1);
     660
    630661    qiov->niov = 0;
    631662    qiov->size = 0;
     
    658689    }
    659690}
     691
    660692#endif /* !VBOX */
  • trunk/src/recompiler/disas.h

    r36170 r36175  
    11#ifndef _QEMU_DISAS_H
    22#define _QEMU_DISAS_H
     3
     4#include "qemu-common.h"
    35
    46/* Disassemble this for me please... (debugging). */
    57void disas(FILE *out, void *code, unsigned long size);
    68void target_disas(FILE *out, target_ulong code, target_ulong size, int flags);
    7 void monitor_disas(CPUState *env,
     9
     10#ifndef VBOX
     11/* The usual mess... FIXME: Remove this condition once dyngen-exec.h is gone */
     12#ifndef __DYNGEN_EXEC_H__
     13void monitor_disas(Monitor *mon, CPUState *env,
    814                   target_ulong pc, int nb_insn, int is_physical, int flags);
     15#endif
     16#endif
    917
    1018/* Look up symbol for debugging purpose.  Returns "" if unknown. */
  • trunk/src/recompiler/dyngen-exec.h

    r36170 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    4443   host headers do not allow that. */
    4544#include <stddef.h>
     45#include <stdint.h>
    4646
    4747#ifndef VBOX
     
    4949#ifdef __OpenBSD__
    5050#include <sys/types.h>
    51 #else
    52 typedef unsigned char uint8_t;
    53 typedef unsigned short uint16_t;
    54 typedef unsigned int uint32_t;
    55 // Linux/Sparc64 defines uint64_t
    56 #if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__))
    57 /* XXX may be done for all 64 bits targets ? */
    58 #if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(_ARCH_PPC64)
    59 typedef unsigned long uint64_t;
    60 #else
    61 typedef unsigned long long uint64_t;
    62 #endif
    63 #endif
    64 
    65 /* if Solaris/__sun__, don't typedef int8_t, as it will be typedef'd
    66    prior to this and will cause an error in compliation, conflicting
    67    with /usr/include/sys/int_types.h, line 75 */
    68 #ifndef __sun__
    69 typedef signed char int8_t;
    70 #endif
    71 typedef signed short int16_t;
    72 typedef signed int int32_t;
    73 // Linux/Sparc64 defines int64_t
    74 #if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__))
    75 #if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(_ARCH_PPC64)
    76 typedef signed long int64_t;
    77 #else
    78 typedef signed long long int64_t;
    79 #endif
    80 #endif
    8151#endif
    8252
     
    8454typedef void * host_reg_t;
    8555
    86 #define INT8_MIN                (-128)
    87 #define INT16_MIN               (-32767-1)
    88 #define INT32_MIN               (-2147483647-1)
    89 #define INT64_MIN               (-(int64_t)(9223372036854775807)-1)
    90 #define INT8_MAX                (127)
    91 #define INT16_MAX               (32767)
    92 #define INT32_MAX               (2147483647)
    93 #define INT64_MAX               ((int64_t)(9223372036854775807))
    94 #define UINT8_MAX               (255)
    95 #define UINT16_MAX              (65535)
    96 #define UINT32_MAX              (4294967295U)
    97 #define UINT64_MAX              ((uint64_t)(18446744073709551615))
    98 
    99 #ifdef _BSD
     56#ifdef HOST_BSD
    10057typedef struct __sFILE FILE;
    10158#else
     
    12380#define AREG1 "ebx"
    12481#define AREG2 "esi"
    125 #define AREG3 "edi"
    12682#else  /* VBOX - why are we different? */
    12783# define AREG0 "esi"
     
    13288#define AREG1 "r15"
    13389#define AREG2 "r12"
    134 #define AREG3 "r13"
    135 //#define AREG4 "rbp"
    136 //#define AREG5 "rbx"
    13790#elif defined(_ARCH_PPC)
    13891#define AREG0 "r27"
    13992#define AREG1 "r24"
    14093#define AREG2 "r25"
    141 #define AREG3 "r26"
    142 /* XXX: suppress this hack */
    143 #if defined(CONFIG_USER_ONLY)
    144 #define AREG4 "r16"
    145 #define AREG5 "r17"
    146 #define AREG6 "r18"
    147 #define AREG7 "r19"
    148 #define AREG8 "r20"
    149 #define AREG9 "r21"
    150 #define AREG10 "r22"
    151 #define AREG11 "r23"
    152 #endif
    15394#elif defined(__arm__)
    15495#define AREG0 "r7"
    15596#define AREG1 "r4"
    15697#define AREG2 "r5"
    157 #define AREG3 "r6"
    15898#elif defined(__hppa__)
    15999#define AREG0 "r17"
    160100#define AREG1 "r14"
    161101#define AREG2 "r15"
    162 #define AREG3 "r16"
    163102#elif defined(__mips__)
    164103#define AREG0 "fp"
    165104#define AREG1 "s0"
    166105#define AREG2 "s1"
    167 #define AREG3 "s2"
    168 #define AREG4 "s3"
    169 #define AREG5 "s4"
    170 #define AREG6 "s5"
    171 #define AREG7 "s6"
    172 #define AREG8 "s7"
    173106#elif defined(__sparc__)
    174107#ifdef HOST_SOLARIS
     
    176109#define AREG1 "g3"
    177110#define AREG2 "g4"
    178 #define AREG3 "g5"
    179 #define AREG4 "g6"
    180111#else
    181112#ifdef __sparc_v9__
     
    187118#define AREG1 "g1"
    188119#define AREG2 "g2"
    189 #define AREG3 "g3"
    190 #define AREG4 "l0"
    191 #define AREG5 "l1"
    192 #define AREG6 "l2"
    193 #define AREG7 "l3"
    194 #define AREG8 "l4"
    195 #define AREG9 "l5"
    196 #define AREG10 "l6"
    197 #define AREG11 "l7"
    198120#endif
    199121#endif
     
    202124#define AREG1 "r7"
    203125#define AREG2 "r8"
    204 #define AREG3 "r9"
    205126#elif defined(__alpha__)
    206127/* Note $15 is the frame pointer, so anything in op-i386.c that would
     
    209130#define AREG1 "$9"
    210131#define AREG2 "$10"
    211 #define AREG3 "$11"
    212 #define AREG4 "$12"
    213 #define AREG5 "$13"
    214 #define AREG6 "$14"
    215132#elif defined(__mc68000)
    216133#define AREG0 "%a5"
    217134#define AREG1 "%a4"
    218135#define AREG2 "%d7"
    219 #define AREG3 "%d6"
    220 #define AREG4 "%d5"
    221136#elif defined(__ia64__)
    222137#define AREG0 "r7"
    223138#define AREG1 "r4"
    224139#define AREG2 "r5"
    225 #define AREG3 "r6"
    226140#else
    227141#error unsupported CPU
  • trunk/src/recompiler/elf.h

    r36140 r36175  
    119119 */
    120120#define EM_S390_OLD     0xA390
     121
     122#define EM_XILINX_MICROBLAZE    0xBAAB
    121123
    122124/* This is the info that is needed to parse the dynamic section of the file */
     
    10801082#define EI_DATA         5
    10811083#define EI_VERSION      6
    1082 #define EI_PAD          7
     1084#define EI_OSABI        7
     1085#define EI_PAD          8
     1086
     1087#define ELFOSABI_NONE           0       /* UNIX System V ABI */
     1088#define ELFOSABI_SYSV           0       /* Alias.  */
     1089#define ELFOSABI_HPUX           1       /* HP-UX */
     1090#define ELFOSABI_NETBSD         2       /* NetBSD.  */
     1091#define ELFOSABI_LINUX          3       /* Linux.  */
     1092#define ELFOSABI_SOLARIS        6       /* Sun Solaris.  */
     1093#define ELFOSABI_AIX            7       /* IBM AIX.  */
     1094#define ELFOSABI_IRIX           8       /* SGI Irix.  */
     1095#define ELFOSABI_FREEBSD        9       /* FreeBSD.  */
     1096#define ELFOSABI_TRU64          10      /* Compaq TRU64 UNIX.  */
     1097#define ELFOSABI_MODESTO        11      /* Novell Modesto.  */
     1098#define ELFOSABI_OPENBSD        12      /* OpenBSD.  */
     1099#define ELFOSABI_ARM            97      /* ARM */
     1100#define ELFOSABI_STANDALONE     255     /* Standalone (embedded) application */
    10831101
    10841102#define ELFMAG0         0x7f            /* EI_MAG */
     
    11071125#define NT_PRPSINFO     3
    11081126#define NT_TASKSTRUCT   4
     1127#define NT_AUXV         6
    11091128#define NT_PRXFPREG     0x46e62b7f      /* copied from gdb5.1/include/elf/common.h */
    11101129
  • trunk/src/recompiler/exec-all.h

    r36170 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    5857
    5958/* XXX: make safe guess about sizes */
    60 #define MAX_OP_PER_INSTR 64
     59#define MAX_OP_PER_INSTR 96
    6160/* A Call op needs up to 6 + 2N parameters (N = number of arguments).  */
    6261#define MAX_OPC_PARAM 10
     
    243242
    244243    /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
    245     *(uint32_t *)jmp_addr |= ((addr - (jmp_addr + 8)) >> 2) & 0xffffff;
     244    *(uint32_t *)jmp_addr =
     245        (*(uint32_t *)jmp_addr & ~0xffffff)
     246        | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
    246247
    247248#if QEMU_GNUC_PREREQ(4, 1)
     
    350351{
    351352    int mmu_idx, page_index, pd;
     353    void *p;
    352354
    353355    page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
     
    377379    return addr + env1->phys_addends[mmu_idx][page_index];
    378380# else
    379     return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base;
     381    p = (void *)(unsigned long)addr
     382        + env1->tlb_table[mmu_idx][page_index].addend;
     383    return qemu_ram_addr_from_host(p);
    380384# endif
    381385}
     
    396400#endif
    397401
    398 #ifdef USE_KQEMU
     402#ifdef CONFIG_KQEMU
    399403#define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
    400404
     
    414418extern uint32_t kqemu_comm_base;
    415419
     420extern ram_addr_t kqemu_phys_ram_size;
     421extern uint8_t *kqemu_phys_ram_base;
     422
    416423static inline int kqemu_is_ok(CPUState *env)
    417424{
     
    431438
    432439CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
    433 #endif
     440
     441/* vl.c */
     442#ifndef VBOX
     443extern int singlestep;
     444#endif
     445
     446#endif
  • trunk/src/recompiler/exec.c

    r36171 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    3130#ifndef VBOX
    3231#ifdef _WIN32
    33 #define WIN32_LEAN_AND_MEAN
    3432#include <windows.h>
    3533#else
     
    8583#define SMC_BITMAP_USE_THRESHOLD 10
    8684
    87 #define MMAP_AREA_START        0x00000000
    88 #define MMAP_AREA_END          0xa8000000
    89 
    9085#if defined(TARGET_SPARC64)
    9186#define TARGET_PHYS_ADDR_SPACE_BITS 41
     
    9792#elif defined(TARGET_PPC64)
    9893#define TARGET_PHYS_ADDR_SPACE_BITS 42
    99 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
     94#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
    10095#define TARGET_PHYS_ADDR_SPACE_BITS 42
    101 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
     96#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
    10297#define TARGET_PHYS_ADDR_SPACE_BITS 36
    10398#else
     
    121116    __attribute__((__section__(".gen_code")))           \
    122117    __attribute__((aligned (32)))
     118#elif defined(_WIN32)
     119/* Maximum alignment for Win32 is 16. */
     120#define code_gen_section                                \
     121    __attribute__((aligned (16)))
    123122#else
    124123#define code_gen_section                                \
     
    138137#ifndef VBOX
    139138#if !defined(CONFIG_USER_ONLY)
    140 ram_addr_t phys_ram_size;
    141139int phys_ram_fd;
    142 uint8_t *phys_ram_base;
    143140uint8_t *phys_ram_dirty;
    144141static int in_migration;
    145 static ram_addr_t phys_ram_alloc_offset = 0;
     142
     143typedef struct RAMBlock {
     144    uint8_t *host;
     145    ram_addr_t offset;
     146    ram_addr_t length;
     147    struct RAMBlock *next;
     148} RAMBlock;
     149
     150static RAMBlock *ram_blocks;
     151/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
     152   then we can no longer assume contiguous ram offsets, and external uses
     153   of this variable will break.  */
     154ram_addr_t last_ram_offset;
    146155#endif
    147156#else /* VBOX */
    148 RTGCPHYS phys_ram_size;
    149157/* we have memory ranges (the high PC-BIOS mapping) which
    150158   causes some pages to fall outside the dirty map here. */
     
    225233CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
    226234void *io_mem_opaque[IO_MEM_NB_ENTRIES];
    227 char io_mem_used[IO_MEM_NB_ENTRIES];
     235static char io_mem_used[IO_MEM_NB_ENTRIES];
    228236static int io_mem_watch;
    229237#endif
     
    406414        size_t len = sizeof(PageDesc) * L2_SIZE;
    407415        /* Don't use qemu_malloc because it may recurse.  */
    408         p = mmap(0, len, PROT_READ | PROT_WRITE,
     416        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
    409417                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    410418        *lp = p;
     
    431439
    432440    p = *lp;
    433     if (!p)
    434         return 0;
     441    if (!p) {
     442        return NULL;
     443    }
    435444    return p + (index & (L2_SIZE - 1));
    436445}
     
    512521
    513522#if defined(CONFIG_USER_ONLY)
    514 /* Currently it is not recommanded to allocate big chunks of data in
     523/* Currently it is not recommended to allocate big chunks of data in
    515524   user mode. It will change when a dedicated libc will be used */
    516525#define USE_STATIC_CODE_GEN_BUFFER
    517526#endif
    518527
    519 /* VBox allocates codegen buffer dynamically */
    520 #ifndef VBOX
     528#if defined(VBOX) && defined(USE_STATIC_CODE_GEN_BUFFER)
     529# error "VBox allocates codegen buffer dynamically"
     530#endif
     531
    521532#ifdef USE_STATIC_CODE_GEN_BUFFER
    522533static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
    523 #endif
    524534#endif
    525535
     
    531541    map_exec(code_gen_buffer, code_gen_buffer_size);
    532542#else
    533 #ifdef VBOX
     543# ifdef VBOX
    534544    /* We cannot use phys_ram_size here, as it's 0 now,
    535545     * it only gets initialized once RAM registration callback
     
    537547     */
    538548    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
    539 #else
     549# else  /* !VBOX */
    540550    code_gen_buffer_size = tb_size;
    541551    if (code_gen_buffer_size == 0) {
     
    550560    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
    551561        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
    552 #endif /* VBOX */
     562# endif /* !VBOX */
    553563    /* The code gen buffer location may have constraints depending on
    554564       the host cpu and OS */
    555 #ifdef VBOX
     565# ifdef VBOX
    556566    code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
    557567
     
    561571        return;
    562572    }
    563 #else  /* !VBOX */
     573# else  /* !VBOX */
    564574#if defined(__linux__)
    565575    {
     
    594604        }
    595605    }
    596 #elif defined(__FreeBSD__)
     606#elif defined(__FreeBSD__) || defined(__DragonFly__)
    597607    {
    598608        int flags;
     
    620630    map_exec(code_gen_buffer, code_gen_buffer_size);
    621631#endif
    622 #endif /* !VBOX */
     632# endif /* !VBOX */
    623633#endif /* !USE_STATIC_CODE_GEN_BUFFER */
    624634#ifndef VBOX
     
    656666    CPUState *env = opaque;
    657667
     668    cpu_synchronize_state(env, 0);
     669
    658670    qemu_put_be32s(f, &env->halted);
    659671    qemu_put_be32s(f, &env->interrupt_request);
     
    669681    qemu_get_be32s(f, &env->halted);
    670682    qemu_get_be32s(f, &env->interrupt_request);
    671     env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
     683    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
     684       version_id is increased. */
     685    env->interrupt_request &= ~0x01;
    672686    tlb_flush(env, 1);
     687    cpu_synchronize_state(env, 1);
    673688
    674689    return 0;
    675690}
    676691#endif
     692
     693CPUState *qemu_get_cpu(int cpu)
     694{
     695    CPUState *env = first_cpu;
     696
     697    while (env) {
     698        if (env->cpu_index == cpu)
     699            break;
     700        env = env->next_cpu;
     701    }
     702
     703    return env;
     704}
     705
    677706#endif /* !VBOX */
    678707
     
    682711    int cpu_index;
    683712
     713#if defined(CONFIG_USER_ONLY)
     714    cpu_list_lock();
     715#endif
    684716    env->next_cpu = NULL;
    685717    penv = &first_cpu;
    686718    cpu_index = 0;
    687719    while (*penv != NULL) {
    688         penv = (CPUState **)&(*penv)->next_cpu;
     720        penv = &(*penv)->next_cpu;
    689721        cpu_index++;
    690722    }
    691723    env->cpu_index = cpu_index;
     724    env->numa_node = 0;
    692725    TAILQ_INIT(&env->breakpoints);
    693726    TAILQ_INIT(&env->watchpoints);
    694727    *penv = env;
    695728#ifndef VBOX
     729#if defined(CONFIG_USER_ONLY)
     730    cpu_list_unlock();
     731#endif
    696732#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
    697733    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
     
    789825            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
    790826                  address >= tb->pc + tb->size)) {
    791                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
     827                printf("ERROR invalidate: address=" TARGET_FMT_lx
     828                       " PC=%08lx size=%04x\n",
    792829                       address, (long)tb->pc, tb->size);
    793830            }
     
    811848            }
    812849        }
    813     }
    814 }
    815 
    816 static void tb_jmp_check(TranslationBlock *tb)
    817 {
    818     TranslationBlock *tb1;
    819     unsigned int n1;
    820 
    821     /* suppress any remaining jumps to this TB */
    822     tb1 = tb->jmp_first;
    823     for(;;) {
    824         n1 = (long)tb1 & 3;
    825         tb1 = (TranslationBlock *)((long)tb1 & ~3);
    826         if (n1 == 2)
    827             break;
    828         tb1 = tb1->jmp_next[n1];
    829     }
    830     /* check end of list */
    831     if (tb1 != tb) {
    832         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
    833850    }
    834851}
     
    958975    tb_phys_invalidate_count++;
    959976}
    960 
    961977
    962978#ifdef VBOX
     
    16811697    if (env->singlestep_enabled != enabled) {
    16821698        env->singlestep_enabled = enabled;
    1683         /* must flush all the translated code to avoid inconsistancies */
    1684         /* XXX: only flush what is necessary */
    1685         tb_flush(env);
     1699        if (kvm_enabled())
     1700            kvm_update_guest_debug(env, 0);
     1701        else {
     1702            /* must flush all the translated code to avoid inconsistencies */
     1703            /* XXX: only flush what is necessary */
     1704            tb_flush(env);
     1705        }
    16861706    }
    16871707#endif
     
    16891709
    16901710#ifndef VBOX
     1711
    16911712/* enable or disable low levels log */
    16921713void cpu_set_log(int log_flags)
     
    17251746    cpu_set_log(loglevel);
    17261747}
     1748
    17271749#endif /* !VBOX */
    17281750
    1729 /* mask must never be zero, except for A20 change call */
    1730 void cpu_interrupt(CPUState *env, int mask)
    1731 {
    1732 #if !defined(USE_NPTL)
    1733     TranslationBlock *tb;
    1734     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
    1735 #endif
    1736     int old_mask;
    1737 
    1738     if (mask & CPU_INTERRUPT_EXIT) {
    1739         env->exit_request = 1;
    1740         mask &= ~CPU_INTERRUPT_EXIT;
    1741     }
    1742 
    1743     old_mask = env->interrupt_request;
    1744 #ifdef VBOX
    1745     VM_ASSERT_EMT(env->pVM);
    1746     ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
    1747 #else /* !VBOX */
    1748     /* FIXME: This is probably not threadsafe.  A different thread could
    1749        be in the middle of a read-modify-write operation.  */
    1750     env->interrupt_request |= mask;
    1751 #endif /* !VBOX */
     1751static void cpu_unlink_tb(CPUState *env)
     1752{
    17521753#if defined(USE_NPTL)
    17531754    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
     
    17561757       signals are used primarily to interrupt blocking syscalls.  */
    17571758#else
     1759    TranslationBlock *tb;
     1760    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
     1761
     1762    tb = env->current_tb;
     1763    /* if the cpu is currently executing code, we must unlink it and
     1764       all the potentially executing TB */
     1765    if (tb && !testandset(&interrupt_lock)) {
     1766        env->current_tb = NULL;
     1767        tb_reset_jump_recursive(tb);
     1768        resetlock(&interrupt_lock);
     1769    }
     1770#endif
     1771}
     1772
     1773/* mask must never be zero, except for A20 change call */
     1774void cpu_interrupt(CPUState *env, int mask)
     1775{
     1776    int old_mask;
     1777
     1778    old_mask = env->interrupt_request;
     1779#ifndef VBOX
     1780    env->interrupt_request |= mask;
     1781#else  /* VBOX */
     1782    VM_ASSERT_EMT(env->pVM);
     1783    ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
     1784#endif /* VBOX */
     1785
     1786#ifndef VBOX
     1787#ifndef CONFIG_USER_ONLY
     1788    /*
     1789     * If called from iothread context, wake the target cpu in
     1790     * case its halted.
     1791     */
     1792    if (!qemu_cpu_self(env)) {
     1793        qemu_cpu_kick(env);
     1794        return;
     1795    }
     1796#endif
     1797#endif /* !VBOX */
     1798
    17581799    if (use_icount) {
    17591800        env->icount_decr.u16.high = 0xffff;
     
    17651806#endif
    17661807    } else {
    1767         tb = env->current_tb;
    1768         /* if the cpu is currently executing code, we must unlink it and
    1769            all the potentially executing TB */
    1770         if (tb && !testandset(&interrupt_lock)) {
    1771             env->current_tb = NULL;
    1772             tb_reset_jump_recursive(tb);
    1773             resetlock(&interrupt_lock);
    1774         }
    1775     }
    1776 #endif
     1808        cpu_unlink_tb(env);
     1809    }
    17771810}
    17781811
     
    17881821    env->interrupt_request &= ~mask;
    17891822#endif /* !VBOX */
     1823}
     1824
     1825void cpu_exit(CPUState *env)
     1826{
     1827    env->exit_request = 1;
     1828    cpu_unlink_tb(env);
    17901829}
    17911830
     
    19561995}
    19571996
    1958 #ifdef VBOX
    19591997static CPUTLBEntry s_cputlb_empty_entry = {
    19601998    .addr_read  = -1,
     
    19632001    .addend     = -1,
    19642002};
    1965 #endif /* VBOX */
    19662003
    19672004/* NOTE: if flush_global is true, also flush global entries (not
     
    19792016
    19802017    for(i = 0; i < CPU_TLB_SIZE; i++) {
    1981 #ifdef VBOX
    19822018        int mmu_idx;
    19832019        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
    19842020            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
    19852021        }
    1986 #else  /* !VBOX */
    1987         env->tlb_table[0][i].addr_read = -1;
    1988         env->tlb_table[0][i].addr_write = -1;
    1989         env->tlb_table[0][i].addr_code = -1;
    1990         env->tlb_table[1][i].addr_read = -1;
    1991         env->tlb_table[1][i].addr_write = -1;
    1992         env->tlb_table[1][i].addr_code = -1;
    1993 #if (NB_MMU_MODES >= 3)
    1994         env->tlb_table[2][i].addr_read = -1;
    1995         env->tlb_table[2][i].addr_write = -1;
    1996         env->tlb_table[2][i].addr_code = -1;
    1997 #if (NB_MMU_MODES == 4)
    1998         env->tlb_table[3][i].addr_read = -1;
    1999         env->tlb_table[3][i].addr_write = -1;
    2000         env->tlb_table[3][i].addr_code = -1;
    2001 #endif
    2002 #endif
    2003 #endif /* !VBOX */
    20042022    }
    20052023
    20062024    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
    20072025
     2026#ifdef CONFIG_KQEMU
     2027    if (env->kqemu_enabled) {
     2028        kqemu_flush(env, flush_global);
     2029    }
     2030#endif
    20082031#ifdef VBOX
    20092032    /* inform raw mode about TLB flush */
    20102033    remR3FlushTLB(env, flush_global);
    2011 #endif
    2012 #ifdef USE_KQEMU
    2013     if (env->kqemu_enabled) {
    2014         kqemu_flush(env, flush_global);
    2015     }
    20162034#endif
    20172035    tlb_flush_count++;
     
    20262044        addr == (tlb_entry->addr_code &
    20272045                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    2028         tlb_entry->addr_read = -1;
    2029         tlb_entry->addr_write = -1;
    2030         tlb_entry->addr_code = -1;
     2046        *tlb_entry = s_cputlb_empty_entry;
    20312047    }
    20322048}
     
    20352051{
    20362052    int i;
     2053    int mmu_idx;
    20372054
    20382055#if defined(DEBUG_TLB)
     
    20452062    addr &= TARGET_PAGE_MASK;
    20462063    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    2047     tlb_flush_entry(&env->tlb_table[0][i], addr);
    2048     tlb_flush_entry(&env->tlb_table[1][i], addr);
    2049 #if (NB_MMU_MODES >= 3)
    2050     tlb_flush_entry(&env->tlb_table[2][i], addr);
    2051 #if (NB_MMU_MODES == 4)
    2052     tlb_flush_entry(&env->tlb_table[3][i], addr);
    2053 #endif
    2054 #endif
     2064    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
     2065        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
    20552066
    20562067    tlb_flush_jmp_cache(env, addr);
    20572068
    2058 #ifdef USE_KQEMU
     2069#ifdef CONFIG_KQEMU
    20592070    if (env->kqemu_enabled) {
    20602071        kqemu_flush_page(env, addr);
     
    21042115}
    21052116
     2117/* Note: start and end must be within the same ram block.  */
    21062118void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
    21072119                                     int dirty_flags)
     
    21192131        return;
    21202132    len = length >> TARGET_PAGE_BITS;
    2121 #ifdef USE_KQEMU
     2133#ifdef CONFIG_KQEMU
    21222134    /* XXX: should not depend on cpu context */
    21232135    env = first_cpu;
     
    21442156    start1 = start;
    21452157#elif !defined(VBOX)
    2146     start1 = start + (unsigned long)phys_ram_base;
     2158    start1 = (unsigned long)qemu_get_ram_ptr(start);
     2159    /* Chek that we don't span multiple blocks - this breaks the
     2160       address comparisons below.  */
     2161    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
     2162            != (end - 1) - start) {
     2163        abort();
     2164    }
    21472165#else
    21482166    start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
    21492167#endif
     2168
    21502169    for(env = first_cpu; env != NULL; env = env->next_cpu) {
    2151         for(i = 0; i < CPU_TLB_SIZE; i++)
    2152             tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
    2153         for(i = 0; i < CPU_TLB_SIZE; i++)
    2154             tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
    2155 #if (NB_MMU_MODES >= 3)
    2156         for(i = 0; i < CPU_TLB_SIZE; i++)
    2157             tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
    2158 #if (NB_MMU_MODES == 4)
    2159         for(i = 0; i < CPU_TLB_SIZE; i++)
    2160             tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
    2161 #endif
    2162 #endif
     2170        int mmu_idx;
     2171        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
     2172            for(i = 0; i < CPU_TLB_SIZE; i++)
     2173                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
     2174                                      start1, length);
     2175        }
    21632176    }
    21642177}
     
    21682181{
    21692182    in_migration = enable;
     2183    if (kvm_enabled()) {
     2184        return kvm_set_migration_log(enable);
     2185    }
    21702186    return 0;
    21712187}
     
    21772193#endif /* !VBOX */
    21782194
    2179 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
    2180 {
     2195int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
     2196                                   target_phys_addr_t end_addr)
     2197{
     2198    int ret = 0;
     2199
    21812200    if (kvm_enabled())
    2182         kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
     2201        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
     2202    return ret;
    21832203}
    21842204
     
    21902210{
    21912211    ram_addr_t ram_addr;
     2212    void *p;
    21922213
    21932214    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
    2194         /* RAM case */
    21952215#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
    21962216        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
    21972217#elif !defined(VBOX)
    2198         ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
    2199             tlb_entry->addend - (unsigned long)phys_ram_base;
     2218        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
     2219            + tlb_entry->addend);
     2220        ram_addr = qemu_ram_addr_from_host(p);
    22002221#else
    22012222        Assert(phys_addend != -1);
     
    22122233{
    22132234    int i;
     2235    int mmu_idx;
     2236    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
     2237        for(i = 0; i < CPU_TLB_SIZE; i++)
    22142238#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
    2215     for(i = 0; i < CPU_TLB_SIZE; i++)
    2216         tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
    2217     for(i = 0; i < CPU_TLB_SIZE; i++)
    2218         tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
    2219 # if (NB_MMU_MODES >= 3)
    2220     for(i = 0; i < CPU_TLB_SIZE; i++)
    2221         tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
    2222 #  if (NB_MMU_MODES == 4)
    2223     for(i = 0; i < CPU_TLB_SIZE; i++)
    2224         tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
    2225 #  endif
    2226 # endif
    2227 #else /* VBOX */
    2228     for(i = 0; i < CPU_TLB_SIZE; i++)
    2229         tlb_update_dirty(&env->tlb_table[0][i]);
    2230     for(i = 0; i < CPU_TLB_SIZE; i++)
    2231         tlb_update_dirty(&env->tlb_table[1][i]);
    2232 #if (NB_MMU_MODES >= 3)
    2233     for(i = 0; i < CPU_TLB_SIZE; i++)
    2234         tlb_update_dirty(&env->tlb_table[2][i]);
    2235 #if (NB_MMU_MODES == 4)
    2236     for(i = 0; i < CPU_TLB_SIZE; i++)
    2237         tlb_update_dirty(&env->tlb_table[3][i]);
    2238 #endif
    2239 #endif
    2240 #endif /* VBOX */
     2239            tlb_update_dirty(&env->tlb_table[mmu_idx][i], env->phys_addends[mmu_idx][i]);
     2240#else
     2241            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
     2242#endif
     2243    }
    22412244}
    22422245
     
    22522255{
    22532256    int i;
     2257    int mmu_idx;
    22542258
    22552259    vaddr &= TARGET_PAGE_MASK;
    22562260    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    2257     tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
    2258     tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
    2259 #if (NB_MMU_MODES >= 3)
    2260     tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
    2261 #if (NB_MMU_MODES == 4)
    2262     tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
    2263 #endif
    2264 #endif
     2261    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
     2262        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
    22652263}
    22662264
     
    23072305    addend = pd & TARGET_PAGE_MASK;
    23082306#elif !defined(VBOX)
    2309     addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
     2307    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
    23102308#else
    23112309    /** @todo this is racing the phys_page_find call above since it may register
    23122310     *        a new chunk of memory...  */
    2313     addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
    2314                                                pd & TARGET_PAGE_MASK,
    2315                                                !!(prot & PAGE_WRITE));
     2311    addend = (unsigned long)remR3TlbGCPhys2Ptr(env, pd & TARGET_PAGE_MASK, !!(prot & PAGE_WRITE));
    23162312#endif
    23172313
     
    23242320            iotlb |= IO_MEM_ROM;
    23252321    } else {
    2326         /* IO handlers are currently passed a phsical address.
     2322        /* IO handlers are currently passed a physical address.
    23272323           It would be nice to pass an offset from the base address
    23282324           of that region.  This would avoid having to special case RAM,
     
    23452341
    23462342    code_address = address;
    2347 
    23482343#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
     2344
    23492345    if (addend & 0x3)
    23502346    {
     
    23692365        addend &= ~(target_ulong)0x3;
    23702366    }
    2371 #endif
    2372 
     2367
     2368#endif
    23732369    /* Make accesses to pages with watchpoints go via the
    23742370       watchpoint trap routines.  */
     
    24482444
    24492445#ifndef VBOX
    2450 /* dump memory mappings */
    2451 void page_dump(FILE *f)
     2446
     2447/*
     2448 * Walks guest process memory "regions" one by one
     2449 * and calls callback function 'fn' for each region.
     2450 */
     2451int walk_memory_regions(void *priv,
     2452    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
    24522453{
    24532454    unsigned long start, end;
     2455    PageDesc *p = NULL;
    24542456    int i, j, prot, prot1;
    2455     PageDesc *p;
    2456 
    2457     fprintf(f, "%-8s %-8s %-8s %s\n",
    2458             "start", "end", "size", "prot");
    2459     start = -1;
    2460     end = -1;
     2457    int rc = 0;
     2458
     2459    start = end = -1;
    24612460    prot = 0;
    2462     for(i = 0; i <= L1_SIZE; i++) {
    2463         if (i < L1_SIZE)
    2464             p = l1_map[i];
    2465         else
    2466             p = NULL;
    2467         for(j = 0;j < L2_SIZE; j++) {
    2468             if (!p)
    2469                 prot1 = 0;
    2470             else
    2471                 prot1 = p[j].flags;
     2461
     2462    for (i = 0; i <= L1_SIZE; i++) {
     2463        p = (i < L1_SIZE) ? l1_map[i] : NULL;
     2464        for (j = 0; j < L2_SIZE; j++) {
     2465            prot1 = (p == NULL) ? 0 : p[j].flags;
     2466            /*
     2467             * "region" is one continuous chunk of memory
     2468             * that has same protection flags set.
     2469             */
    24722470            if (prot1 != prot) {
    24732471                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
    24742472                if (start != -1) {
    2475                     fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
    2476                             start, end, end - start,
    2477                             prot & PAGE_READ ? 'r' : '-',
    2478                             prot & PAGE_WRITE ? 'w' : '-',
    2479                             prot & PAGE_EXEC ? 'x' : '-');
     2473                    rc = (*fn)(priv, start, end, prot);
     2474                    /* callback can stop iteration by returning != 0 */
     2475                    if (rc != 0)
     2476                        return (rc);
    24802477                }
    24812478                if (prot1 != 0)
     
    24852482                prot = prot1;
    24862483            }
    2487             if (!p)
     2484            if (p == NULL)
    24882485                break;
    24892486        }
    24902487    }
    2491 }
     2488    return (rc);
     2489}
     2490
     2491static int dump_region(void *priv, unsigned long start,
     2492    unsigned long end, unsigned long prot)
     2493{
     2494    FILE *f = (FILE *)priv;
     2495
     2496    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
     2497        start, end, end - start,
     2498        ((prot & PAGE_READ) ? 'r' : '-'),
     2499        ((prot & PAGE_WRITE) ? 'w' : '-'),
     2500        ((prot & PAGE_EXEC) ? 'x' : '-'));
     2501
     2502    return (0);
     2503}
     2504
     2505/* dump memory mappings */
     2506void page_dump(FILE *f)
     2507{
     2508    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
     2509            "start", "end", "size", "prot");
     2510    walk_memory_regions(f, dump_region);
     2511}
     2512
    24922513#endif /* !VBOX */
    24932514
     
    25032524
    25042525/* modify the flags of a page and invalidate the code if
    2505    necessary. The flag PAGE_WRITE_ORG is positionned automatically
     2526   necessary. The flag PAGE_WRITE_ORG is positioned automatically
    25062527   depending on PAGE_WRITE */
    25072528void page_set_flags(target_ulong start, target_ulong end, int flags)
     
    25732594
    25742595/* called from signal handler: invalidate the code and unprotect the
    2575    page. Return TRUE if the fault was succesfully handled. */
     2596   page. Return TRUE if the fault was successfully handled. */
    25762597int page_unprotect(target_ulong address, unsigned long pc, void *puc)
    25772598{
     
    26572678   io memory page.  The address used when calling the IO function is
    26582679   the offset from the start of the region, plus region_offset.  Both
    2659    start_region and regon_offset are rounded down to a page boundary
     2680   start_addr and region_offset are rounded down to a page boundary
    26602681   before calculating this offset.  This should not be a problem unless
    26612682   the low bits of start_addr and region_offset differ.  */
     
    26712692    void *subpage;
    26722693
    2673 #ifdef USE_KQEMU
     2694#ifdef CONFIG_KQEMU
    26742695    /* XXX: should not depend on cpu context */
    26752696    env = first_cpu;
     
    27732794}
    27742795
     2796#ifdef CONFIG_KQEMU
    27752797/* XXX: better than nothing */
     2798static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
     2799{
     2800    ram_addr_t addr;
     2801    if ((last_ram_offset + size) > kqemu_phys_ram_size) {
     2802        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
     2803                (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
     2804        abort();
     2805    }
     2806    addr = last_ram_offset;
     2807    last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
     2808    return addr;
     2809}
     2810#endif
     2811
    27762812ram_addr_t qemu_ram_alloc(ram_addr_t size)
    27772813{
    2778     ram_addr_t addr;
    2779     if ((phys_ram_alloc_offset + size) > phys_ram_size) {
    2780         fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
    2781                 (uint64_t)size, (uint64_t)phys_ram_size);
     2814    RAMBlock *new_block;
     2815
     2816#ifdef CONFIG_KQEMU
     2817    if (kqemu_phys_ram_base) {
     2818        return kqemu_ram_alloc(size);
     2819    }
     2820#endif
     2821
     2822    size = TARGET_PAGE_ALIGN(size);
     2823    new_block = qemu_malloc(sizeof(*new_block));
     2824
     2825    new_block->host = qemu_vmalloc(size);
     2826    new_block->offset = last_ram_offset;
     2827    new_block->length = size;
     2828
     2829    new_block->next = ram_blocks;
     2830    ram_blocks = new_block;
     2831
     2832    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
     2833        (last_ram_offset + size) >> TARGET_PAGE_BITS);
     2834    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
     2835           0xff, size >> TARGET_PAGE_BITS);
     2836
     2837    last_ram_offset += size;
     2838
     2839    if (kvm_enabled())
     2840        kvm_setup_guest_memory(new_block->host, size);
     2841
     2842    return new_block->offset;
     2843}
     2844
     2845void qemu_ram_free(ram_addr_t addr)
     2846{
     2847    /* TODO: implement this.  */
     2848}
     2849
     2850/* Return a host pointer to ram allocated with qemu_ram_alloc.
     2851   With the exception of the softmmu code in this file, this should
     2852   only be used for local memory (e.g. video ram) that the device owns,
     2853   and knows it isn't going to access beyond the end of the block.
     2854
     2855   It should not be used for general purpose DMA.
     2856   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
     2857 */
     2858void *qemu_get_ram_ptr(ram_addr_t addr)
     2859{
     2860    RAMBlock *prev;
     2861    RAMBlock **prevp;
     2862    RAMBlock *block;
     2863
     2864#ifdef CONFIG_KQEMU
     2865    if (kqemu_phys_ram_base) {
     2866        return kqemu_phys_ram_base + addr;
     2867    }
     2868#endif
     2869
     2870    prev = NULL;
     2871    prevp = &ram_blocks;
     2872    block = ram_blocks;
     2873    while (block && (block->offset > addr
     2874                     || block->offset + block->length <= addr)) {
     2875        if (prev)
     2876          prevp = &prev->next;
     2877        prev = block;
     2878        block = block->next;
     2879    }
     2880    if (!block) {
     2881        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
    27822882        abort();
    27832883    }
    2784     addr = phys_ram_alloc_offset;
    2785     phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
    2786     return addr;
    2787 }
    2788 
    2789 void qemu_ram_free(ram_addr_t addr)
    2790 {
    2791 }
     2884    /* Move this entry to to start of the list.  */
     2885    if (prev) {
     2886        prev->next = block->next;
     2887        block->next = *prevp;
     2888        *prevp = block;
     2889    }
     2890    return block->host + (addr - block->offset);
     2891}
     2892
     2893/* Some of the softmmu routines need to translate from a host pointer
     2894   (typically a TLB entry) back to a ram offset.  */
     2895ram_addr_t qemu_ram_addr_from_host(void *ptr)
     2896{
     2897    RAMBlock *prev;
     2898    RAMBlock **prevp;
     2899    RAMBlock *block;
     2900    uint8_t *host = ptr;
     2901
     2902#ifdef CONFIG_KQEMU
     2903    if (kqemu_phys_ram_base) {
     2904        return host - kqemu_phys_ram_base;
     2905    }
     2906#endif
     2907
     2908    prev = NULL;
     2909    prevp = &ram_blocks;
     2910    block = ram_blocks;
     2911    while (block && (block->host > host
     2912                     || block->host + block->length <= host)) {
     2913        if (prev)
     2914          prevp = &prev->next;
     2915        prev = block;
     2916        block = block->next;
     2917    }
     2918    if (!block) {
     2919        fprintf(stderr, "Bad ram pointer %p\n", ptr);
     2920        abort();
     2921    }
     2922    return block->offset + (host - block->host);
     2923}
     2924
    27922925#endif /* !VBOX */
    27932926
     
    28913024    remR3PhysWriteU8(ram_addr, val);
    28923025#else
    2893     stb_p(phys_ram_base + ram_addr, val);
    2894 #endif
    2895 #ifdef USE_KQEMU
     3026    stb_p(qemu_get_ram_ptr(ram_addr), val);
     3027#endif
     3028#ifdef CONFIG_KQEMU
    28963029    if (cpu_single_env->kqemu_enabled &&
    28973030        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
     
    29333066    remR3PhysWriteU16(ram_addr, val);
    29343067#else
    2935     stw_p(phys_ram_base + ram_addr, val);
    2936 #endif
    2937 #ifdef USE_KQEMU
     3068    stw_p(qemu_get_ram_ptr(ram_addr), val);
     3069#endif
     3070#ifdef CONFIG_KQEMU
    29383071    if (cpu_single_env->kqemu_enabled &&
    29393072        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
     
    29753108    remR3PhysWriteU32(ram_addr, val);
    29763109#else
    2977     stl_p(phys_ram_base + ram_addr, val);
    2978 #endif
    2979 #ifdef USE_KQEMU
     3110    stl_p(qemu_get_ram_ptr(ram_addr), val);
     3111#endif
     3112#ifdef CONFIG_KQEMU
    29803113    if (cpu_single_env->kqemu_enabled &&
    29813114        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
     
    32133346    eidx = SUBPAGE_IDX(end);
    32143347#if defined(DEBUG_SUBPAGE)
    3215     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
     3348    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
    32163349           mmio, start, end, idx, eidx, memory);
    32173350#endif
     
    32443377
    32453378    mmio->base = base;
    3246     subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
     3379    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
    32473380#if defined(DEBUG_SUBPAGE)
    32483381    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
     
    32693402}
    32703403
    3271 static void io_mem_init(void)
    3272 {
    3273     int i;
    3274 
    3275     cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
    3276     cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
    3277     cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
    3278     for (i=0; i<5; i++)
    3279         io_mem_used[i] = 1;
    3280 
    3281     io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
    3282                                           watch_mem_write, NULL);
    3283 
    3284 #ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
    3285     /* alloc dirty bits array */
    3286     phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
    3287     memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
    3288 #endif /* !VBOX */
    3289 }
    3290 
    32913404/* mem_read and mem_write are arrays of functions containing the
    32923405   function to access byte (index 0), word (index 1) and dword (index
    3293    2). Functions can be omitted with a NULL function pointer. The
    3294    registered functions may be modified dynamically later.
     3406   2). Functions can be omitted with a NULL function pointer.
    32953407   If io_index is non zero, the corresponding io zone is
    32963408   modified. If it is zero, a new io zone is allocated. The return
    32973409   value can be used with cpu_register_physical_memory(). (-1) is
    32983410   returned if error. */
    3299 int cpu_register_io_memory(int io_index,
    3300                            CPUReadMemoryFunc **mem_read,
    3301                            CPUWriteMemoryFunc **mem_write,
    3302                            void *opaque)
     3411static int cpu_register_io_memory_fixed(int io_index,
     3412                                        CPUReadMemoryFunc **mem_read,
     3413                                        CPUWriteMemoryFunc **mem_write,
     3414                                        void *opaque)
    33033415{
    33043416    int i, subwidth = 0;
     
    33093421            return io_index;
    33103422    } else {
     3423        io_index >>= IO_MEM_SHIFT;
    33113424        if (io_index >= IO_MEM_NB_ENTRIES)
    33123425            return -1;
     
    33233436}
    33243437
     3438int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
     3439                           CPUWriteMemoryFunc **mem_write,
     3440                           void *opaque)
     3441{
     3442    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
     3443}
     3444
    33253445void cpu_unregister_io_memory(int io_table_address)
    33263446{
     
    33363456}
    33373457
    3338 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
    3339 {
    3340     return io_mem_write[io_index >> IO_MEM_SHIFT];
    3341 }
    3342 
    3343 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
    3344 {
    3345     return io_mem_read[io_index >> IO_MEM_SHIFT];
     3458static void io_mem_init(void)
     3459{
     3460    int i;
     3461
     3462    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
     3463    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
     3464    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
     3465    for (i=0; i<5; i++)
     3466        io_mem_used[i] = 1;
     3467
     3468    io_mem_watch = cpu_register_io_memory(watch_mem_read,
     3469                                          watch_mem_write, NULL);
     3470#ifdef CONFIG_KQEMU
     3471    if (kqemu_phys_ram_base) {
     3472        /* alloc dirty bits array */
     3473        phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
     3474        memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
     3475    }
     3476#endif
    33463477}
    33473478
     
    34563587                remR3PhysWrite(addr1, buf, l); NOREF(ptr);
    34573588#else
    3458                 ptr = phys_ram_base + addr1;
     3589                ptr = qemu_get_ram_ptr(addr1);
    34593590                memcpy(ptr, buf, l);
    34603591#endif
     
    35113642                remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
    35123643#else
    3513                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
     3644                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
    35143645                    (addr & ~TARGET_PAGE_MASK);
    35153646                memcpy(buf, ptr, l);
     
    35553686            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
    35563687            /* ROM/RAM case */
    3557             ptr = phys_ram_base + addr1;
     3688            ptr = qemu_get_ram_ptr(addr1);
    35583689            memcpy(ptr, buf, l);
    35593690        }
     
    35963727
    35973728    LIST_REMOVE(client, link);
     3729    qemu_free(client);
    35983730}
    35993731
     
    36053737        client = LIST_FIRST(&map_client_list);
    36063738        client->callback(client->opaque);
    3607         LIST_REMOVE(client, link);
     3739        cpu_unregister_map_client(client);
    36083740    }
    36093741}
     
    36553787        } else {
    36563788            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
    3657             ptr = phys_ram_base + addr1;
     3789            ptr = qemu_get_ram_ptr(addr1);
    36583790        }
    36593791        if (!done) {
     
    36803812    if (buffer != bounce.buffer) {
    36813813        if (is_write) {
    3682             unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
     3814            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
    36833815            while (access_len) {
    36843816                unsigned l;
     
    37353867        /* RAM case */
    37363868#ifndef VBOX
    3737         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
     3869        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
    37383870            (addr & ~TARGET_PAGE_MASK);
    37393871        val = ldl_p(ptr);
     
    37773909        /* RAM case */
    37783910#ifndef VBOX
    3779         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
     3911        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
    37803912            (addr & ~TARGET_PAGE_MASK);
    37813913        val = ldq_p(ptr);
     
    38283960#ifndef VBOX
    38293961        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
    3830         ptr = phys_ram_base + addr1;
     3962        ptr = qemu_get_ram_ptr(addr1);
    38313963        stl_p(ptr, val);
    38323964#else
     
    38754007    } else {
    38764008#ifndef VBOX
    3877         ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
     4009        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
    38784010            (addr & ~TARGET_PAGE_MASK);
    38794011        stq_p(ptr, val);
     
    39094041        /* RAM case */
    39104042#ifndef VBOX
    3911         ptr = phys_ram_base + addr1;
     4043        ptr = qemu_get_ram_ptr(addr1);
    39124044        stl_p(ptr, val);
    39134045#else
     
    39504082#endif
    39514083
    3952 /* virtual memory access for debug */
     4084#ifndef VBOX
     4085/* virtual memory access for debug (includes writing to ROM) */
    39534086int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
    39544087                        uint8_t *buf, int len, int is_write)
     
    39674100        if (l > len)
    39684101            l = len;
    3969         cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
    3970                                buf, l, is_write);
     4102        phys_addr += (addr & ~TARGET_PAGE_MASK);
     4103#if !defined(CONFIG_USER_ONLY)
     4104        if (is_write)
     4105            cpu_physical_memory_write_rom(phys_addr, buf, l);
     4106        else
     4107#endif
     4108            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
    39714109        len -= l;
    39724110        buf += l;
     
    39754113    return 0;
    39764114}
     4115#endif /* !VBOX */
    39774116
    39784117/* in deterministic execution mode, instructions doing device I/Os
  • trunk/src/recompiler/fpu/softfloat-macros.h

    r36170 r36175  
    591591    index = ( a>>27 ) & 15;
    592592    if ( aExp & 1 ) {
    593         z = 0x4000 + ( a>>17 ) - sqrtOddAdjustments[ index ];
     593        z = 0x4000 + ( a>>17 ) - sqrtOddAdjustments[ (int)index ];
    594594        z = ( ( a / z )<<14 ) + ( z<<15 );
    595595        a >>= 1;
    596596    }
    597597    else {
    598         z = 0x8000 + ( a>>17 ) - sqrtEvenAdjustments[ index ];
     598        z = 0x8000 + ( a>>17 ) - sqrtEvenAdjustments[ (int)index ];
    599599        z = a / z + z;
    600600        z = ( 0x20000 <= z ) ? 0xFFFF8000 : ( z<<15 );
  • trunk/src/recompiler/fpu/softfloat-native.c

    r36170 r36175  
    33#include "softfloat.h"
    44#include <math.h>
     5#if defined(HOST_SOLARIS)
     6#include <fenv.h>
     7#endif
    58
    69void set_float_rounding_mode(int val STATUS_PARAM)
    710{
    811    STATUS(float_rounding_mode) = val;
    9 #if defined(_BSD) && !defined(__APPLE__) || (defined(HOST_SOLARIS) && (HOST_SOLARIS < 10 || HOST_SOLARIS == 11)) /* VBOX adds sol 11 */
     12#if defined(HOST_BSD) && !defined(__APPLE__) ||         \
     13    (defined(HOST_SOLARIS) && (HOST_SOLARIS < 10 || HOST_SOLARIS == 11)) /* VBOX adds sol 11 */
    1014    fpsetround(val);
    1115#elif defined(__arm__)
     
    2327#endif
    2428
    25 #if defined(_BSD) || (defined(HOST_SOLARIS) && HOST_SOLARIS < 10)
     29#if defined(HOST_BSD) || (defined(HOST_SOLARIS) && HOST_SOLARIS < 10)
    2630#define lrint(d)                ((int32_t)rint(d))
    2731#define llrint(d)               ((int64_t)rint(d))
     
    3236#define rintf(f)                ((float)rint(f))
    3337/* Some defines which only apply to *BSD */
    34 # if defined(VBOX) && defined(_BSD)
     38# if defined(VBOX) && defined(HOST_BSD)
    3539#  define lrintl(f)            ((int32_t)rint(f))
    3640#  define llrintl(f)           ((int64_t)rint(f))
  • trunk/src/recompiler/fpu/softfloat-native.h

    r36170 r36175  
    2121 */
    2222#if defined(HOST_SOLARIS) && (( HOST_SOLARIS <= 9 ) || ((HOST_SOLARIS >= 10) \
    23                                                         && (__GNUC__ <= 4))) \
     23                                                        && (__GNUC__ < 4))) \
    2424    || (defined(__OpenBSD__) && (OpenBSD < 200811))
    2525/*
     
    112112| Software IEC/IEEE floating-point rounding mode.
    113113*----------------------------------------------------------------------------*/
    114 #if (defined(_BSD) && !defined(__APPLE__)) || defined(HOST_SOLARIS)
     114#if (defined(HOST_BSD) && !defined(__APPLE__)) || defined(HOST_SOLARIS)
    115115#if defined(__OpenBSD__)
    116116#define FE_RM FP_RM
  • trunk/src/recompiler/fpu/softfloat-specialize.h

    r36170 r36175  
    167167    }
    168168    else if ( aIsNaN ) {
    169         if ( bIsSignalingNaN | ! bIsNaN )
     169        if ( bIsSignalingNaN || ! bIsNaN )
    170170            res = av;
    171171        else {
     
    302302    }
    303303    else if ( aIsNaN ) {
    304         if ( bIsSignalingNaN | ! bIsNaN )
     304        if ( bIsSignalingNaN || ! bIsNaN )
    305305            res = av;
    306306        else {
     
    442442    }
    443443    else if ( aIsNaN ) {
    444         if ( bIsSignalingNaN | ! bIsNaN ) return a;
     444        if ( bIsSignalingNaN || ! bIsNaN ) return a;
    445445 returnLargerSignificand:
    446446        if ( a.low < b.low ) return b;
     
    568568    }
    569569    else if ( aIsNaN ) {
    570         if ( bIsSignalingNaN | ! bIsNaN ) return a;
     570        if ( bIsSignalingNaN || ! bIsNaN ) return a;
    571571 returnLargerSignificand:
    572572        if ( lt128( a.high<<1, a.low, b.high<<1, b.low ) ) return b;
  • trunk/src/recompiler/fpu/softfloat.h

    r36170 r36175  
    9595#else
    9696/* native float support */
    97 #if (defined(__i386__) || defined(__x86_64__)) && (!defined(_BSD) || defined(VBOX)) /** @todo VBOX: not correct on windows */
     97#if (defined(__i386__) || defined(__x86_64__)) && (!defined(HOST_BSD) || defined(VBOX)) /** @todo VBOX: not correct on windows */
    9898#define FLOATX80
    9999#endif
  • trunk/src/recompiler/hostregs_helper.h

    r36170 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    6766#endif
    6867
    69 #ifdef AREG3
    70 DO_REG(3)
    71 #endif
    72 
    73 #ifdef AREG4
    74 DO_REG(4)
    75 #endif
    76 
    77 #ifdef AREG5
    78 DO_REG(5)
    79 #endif
    80 
    81 #ifdef AREG6
    82 DO_REG(6)
    83 #endif
    84 
    85 #ifdef AREG7
    86 DO_REG(7)
    87 #endif
    88 
    89 #ifdef AREG8
    90 DO_REG(8)
    91 #endif
    92 
    93 #ifdef AREG9
    94 DO_REG(9)
    95 #endif
    96 
    97 #ifdef AREG10
    98 DO_REG(10)
    99 #endif
    100 
    101 #ifdef AREG11
    102 DO_REG(11)
    103 #endif
    104 
    10568#undef SAVE_HOST_REGS
    10669#undef DECLARE_HOST_REGS
  • trunk/src/recompiler/kvm.h

    r36170 r36175  
    1616
    1717#include "config.h"
     18#include "sys-queue.h"
    1819
    1920#ifdef CONFIG_KVM
     
    3233
    3334int kvm_init_vcpu(CPUState *env);
    34 int kvm_sync_vcpus(void);
    3535
    3636int kvm_cpu_exec(CPUState *env);
     
    4040                      ram_addr_t phys_offset);
    4141
    42 void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr);
     42int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
     43                                   target_phys_addr_t end_addr);
    4344
    44 int kvm_log_start(target_phys_addr_t phys_addr, target_phys_addr_t len);
    45 int kvm_log_stop(target_phys_addr_t phys_addr, target_phys_addr_t len);
     45int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size);
     46int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size);
     47int kvm_set_migration_log(int enable);
    4648
    4749int kvm_has_sync_mmu(void);
    4850
     51void kvm_setup_guest_memory(void *start, size_t size);
     52
    4953int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
    5054int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
     55
     56int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
     57                          target_ulong len, int type);
     58int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
     59                          target_ulong len, int type);
     60void kvm_remove_all_breakpoints(CPUState *current_env);
     61int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap);
    5162
    5263/* internal API */
     
    7788int kvm_arch_init_vcpu(CPUState *env);
    7889
     90struct kvm_guest_debug;
     91struct kvm_debug_exit_arch;
     92
     93struct kvm_sw_breakpoint {
     94    target_ulong pc;
     95    target_ulong saved_insn;
     96    int use_count;
     97    TAILQ_ENTRY(kvm_sw_breakpoint) entry;
     98};
     99
     100TAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint);
     101
     102int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info);
     103
     104struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
     105                                                 target_ulong pc);
     106
     107int kvm_sw_breakpoints_active(CPUState *env);
     108
     109int kvm_arch_insert_sw_breakpoint(CPUState *current_env,
     110                                  struct kvm_sw_breakpoint *bp);
     111int kvm_arch_remove_sw_breakpoint(CPUState *current_env,
     112                                  struct kvm_sw_breakpoint *bp);
     113int kvm_arch_insert_hw_breakpoint(target_ulong addr,
     114                                  target_ulong len, int type);
     115int kvm_arch_remove_hw_breakpoint(target_ulong addr,
     116                                  target_ulong len, int type);
     117void kvm_arch_remove_all_hw_breakpoints(void);
     118
     119void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg);
     120
     121int kvm_check_extension(KVMState *s, unsigned int extension);
     122
     123uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
     124                                      int reg);
     125
     126/* generic hooks - to be moved/refactored once there are more users */
     127
     128static inline void cpu_synchronize_state(CPUState *env, int modified)
     129{
     130    if (kvm_enabled()) {
     131        if (modified)
     132            kvm_arch_put_registers(env);
     133        else
     134            kvm_arch_get_registers(env);
     135    }
     136}
     137
    79138#endif
  • trunk/src/recompiler/qemu-common.h

    r36170 r36175  
    3131#define qemu_toascii(c)         RT_C_TO_ASCII((unsigned char)(c))
    3232
     33#define qemu_init_vcpu(env)     do { } while (0) /* we don't need this :-) */
     34
     35
    3336#else /* !VBOX */
    3437#ifdef _WIN32
     
    5962#include <fcntl.h>
    6063#include <sys/stat.h>
     64#include <assert.h>
    6165#include "config-host.h"
    6266
     
    143147int strstart(const char *str, const char *val, const char **ptr);
    144148int stristart(const char *str, const char *val, const char **ptr);
     149int qemu_strnlen(const char *s, int max_len);
    145150time_t mktimegm(struct tm *tm);
    146151int qemu_fls(int i);
     
    198203typedef struct DisplayChangeListener DisplayChangeListener;
    199204typedef struct DisplaySurface DisplaySurface;
     205typedef struct DisplayAllocator DisplayAllocator;
    200206typedef struct PixelFormat PixelFormat;
    201207typedef struct TextConsole TextConsole;
     
    212218typedef struct SerialState SerialState;
    213219typedef struct IRQState *qemu_irq;
    214 struct pcmcia_card_s;
     220typedef struct PCMCIACardState PCMCIACardState;
     221typedef struct MouseTransformInfo MouseTransformInfo;
     222typedef struct uWireSlave uWireSlave;
     223typedef struct I2SCodec I2SCodec;
     224typedef struct DeviceState DeviceState;
     225typedef struct SSIBus SSIBus;
    215226
    216227/* CPU save/load.  */
     
    220231/* Force QEMU to stop what it's doing and service IO */
    221232void qemu_service_io(void);
     233
     234/* Force QEMU to process pending events */
     235void qemu_notify_event(void);
     236
     237/* Unblock cpu */
     238void qemu_cpu_kick(void *env);
     239int qemu_cpu_self(void *env);
     240
     241#ifdef CONFIG_USER_ONLY
     242#define qemu_init_vcpu(env) do { } while (0)
     243#else
     244void qemu_init_vcpu(void *env);
     245#endif
    222246
    223247typedef struct QEMUIOVector {
     
    229253
    230254void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint);
     255void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
    231256void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len);
    232257void qemu_iovec_destroy(QEMUIOVector *qiov);
     
    235260void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count);
    236261
     262struct Monitor;
     263typedef struct Monitor Monitor;
     264
     265#include "module.h"
     266
    237267#endif /* dyngen-exec.h hack */
    238268
  • trunk/src/recompiler/qemu-lock.h

    r36170 r36175  
    1313 *
    1414 * You should have received a copy of the GNU Lesser General Public
    15  * License along with this library; if not, write to the Free Software
    16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     15 * License along with this library; if not, see <http://www.gnu.org/licenses/>
    1716 */
    1817
  • trunk/src/recompiler/softmmu_exec.h

    r36170 r36175  
    6161#undef ACCESS_TYPE
    6262#undef MEMSUFFIX
     63#endif /* (NB_MMU_MODES >= 3) */
    6364
    6465#if (NB_MMU_MODES >= 4)
     
    7980#undef ACCESS_TYPE
    8081#undef MEMSUFFIX
     82#endif /* (NB_MMU_MODES >= 4) */
    8183
    82 #if (NB_MMU_MODES > 4)
    83 #error "NB_MMU_MODES > 4 is not supported for now"
    84 #endif /* (NB_MMU_MODES > 4) */
    85 #endif /* (NB_MMU_MODES == 4) */
    86 #endif /* (NB_MMU_MODES >= 3) */
     84#if (NB_MMU_MODES >= 5)
     85
     86#define ACCESS_TYPE 4
     87#define MEMSUFFIX MMU_MODE4_SUFFIX
     88#define DATA_SIZE 1
     89#include "softmmu_header.h"
     90
     91#define DATA_SIZE 2
     92#include "softmmu_header.h"
     93
     94#define DATA_SIZE 4
     95#include "softmmu_header.h"
     96
     97#define DATA_SIZE 8
     98#include "softmmu_header.h"
     99#undef ACCESS_TYPE
     100#undef MEMSUFFIX
     101#endif /* (NB_MMU_MODES >= 5) */
     102
     103#if (NB_MMU_MODES > 5)
     104#error "NB_MMU_MODES > 5 is not supported for now"
     105#endif /* (NB_MMU_MODES > 5) */
    87106
    88107/* these access are slower, they must be as rare as possible */
  • trunk/src/recompiler/softmmu_header.h

    r36170 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    8180#endif
    8281
    83 #if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \
    84     (ACCESS_TYPE < NB_MMU_MODES) && defined(ASM_SOFTMMU) && !defined(VBOX)
    85 
    86 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
    87 {
    88     int res;
    89 
    90     asm volatile ("movl %1, %%edx\n"
    91                   "movl %1, %%eax\n"
    92                   "shrl %3, %%edx\n"
    93                   "andl %4, %%eax\n"
    94                   "andl %2, %%edx\n"
    95                   "leal %5(%%edx, %%ebp), %%edx\n"
    96                   "cmpl (%%edx), %%eax\n"
    97                   "movl %1, %%eax\n"
    98                   "je 1f\n"
    99                   "movl %6, %%edx\n"
    100                   "call %7\n"
    101                   "movl %%eax, %0\n"
    102                   "jmp 2f\n"
    103                   "1:\n"
    104                   "addl 12(%%edx), %%eax\n"
    105 #if DATA_SIZE == 1
    106                   "movzbl (%%eax), %0\n"
    107 #elif DATA_SIZE == 2
    108                   "movzwl (%%eax), %0\n"
    109 #elif DATA_SIZE == 4
    110                   "movl (%%eax), %0\n"
    111 #else
    112 #error unsupported size
    113 #endif
    114                   "2:\n"
    115                   : "=r" (res)
    116                   : "r" (ptr),
    117                   "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
    118                   "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
    119                   "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
    120                   "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
    121                   "i" (CPU_MMU_INDEX),
    122                   "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
    123                   : "%eax", "%ecx", "%edx", "memory", "cc");
    124     return res;
    125 }
    126 
    127 #if DATA_SIZE <= 2
    128 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
    129 {
    130     int res;
    131 
    132     asm volatile ("movl %1, %%edx\n"
    133                   "movl %1, %%eax\n"
    134                   "shrl %3, %%edx\n"
    135                   "andl %4, %%eax\n"
    136                   "andl %2, %%edx\n"
    137                   "leal %5(%%edx, %%ebp), %%edx\n"
    138                   "cmpl (%%edx), %%eax\n"
    139                   "movl %1, %%eax\n"
    140                   "je 1f\n"
    141                   "movl %6, %%edx\n"
    142                   "call %7\n"
    143 #if DATA_SIZE == 1
    144                   "movsbl %%al, %0\n"
    145 #elif DATA_SIZE == 2
    146                   "movswl %%ax, %0\n"
    147 #else
    148 #error unsupported size
    149 #endif
    150                   "jmp 2f\n"
    151                   "1:\n"
    152                   "addl 12(%%edx), %%eax\n"
    153 #if DATA_SIZE == 1
    154                   "movsbl (%%eax), %0\n"
    155 #elif DATA_SIZE == 2
    156                   "movswl (%%eax), %0\n"
    157 #else
    158 #error unsupported size
    159 #endif
    160                   "2:\n"
    161                   : "=r" (res)
    162                   : "r" (ptr),
    163                   "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
    164                   "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
    165                   "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
    166                   "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
    167                   "i" (CPU_MMU_INDEX),
    168                   "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
    169                   : "%eax", "%ecx", "%edx", "memory", "cc");
    170     return res;
    171 }
    172 #endif
    173 
    174 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
    175 {
    176     asm volatile ("movl %0, %%edx\n"
    177                   "movl %0, %%eax\n"
    178                   "shrl %3, %%edx\n"
    179                   "andl %4, %%eax\n"
    180                   "andl %2, %%edx\n"
    181                   "leal %5(%%edx, %%ebp), %%edx\n"
    182                   "cmpl (%%edx), %%eax\n"
    183                   "movl %0, %%eax\n"
    184                   "je 1f\n"
    185 #if DATA_SIZE == 1
    186                   "movzbl %b1, %%edx\n"
    187 #elif DATA_SIZE == 2
    188                   "movzwl %w1, %%edx\n"
    189 #elif DATA_SIZE == 4
    190                   "movl %1, %%edx\n"
    191 #else
    192 #error unsupported size
    193 #endif
    194                   "movl %6, %%ecx\n"
    195                   "call %7\n"
    196                   "jmp 2f\n"
    197                   "1:\n"
    198                   "addl 8(%%edx), %%eax\n"
    199 #if DATA_SIZE == 1
    200                   "movb %b1, (%%eax)\n"
    201 #elif DATA_SIZE == 2
    202                   "movw %w1, (%%eax)\n"
    203 #elif DATA_SIZE == 4
    204                   "movl %1, (%%eax)\n"
    205 #else
    206 #error unsupported size
    207 #endif
    208                   "2:\n"
    209                   :
    210                   : "r" (ptr),
    211 #if DATA_SIZE == 1
    212                   "q" (v),
    213 #else
    214                   "r" (v),
    215 #endif
    216                   "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
    217                   "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
    218                   "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
    219                   "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_write)),
    220                   "i" (CPU_MMU_INDEX),
    221                   "m" (*(uint8_t *)&glue(glue(__st, SUFFIX), MMUSUFFIX))
    222                   : "%eax", "%ecx", "%edx", "memory", "cc");
    223 }
    224 
    225 #else
    226 
    22782/* generic load/store macros */
    22883
     
    295150#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
    296151
    297 #endif /* !asm */
    298 
    299152#if ACCESS_TYPE != (NB_MMU_MODES + 1)
    300153
  • trunk/src/recompiler/softmmu_template.h

    r36170 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    3433#define USUFFIX q
    3534#define DATA_TYPE uint64_t
    36 #define DATA_TYPE_PROMOTED uint64_t
     35#ifdef VBOX
     36# define DATA_TYPE_PROMOTED uint64_t
     37#endif
    3738#elif DATA_SIZE == 4
    3839#define SUFFIX l
     
    4041#define DATA_TYPE uint32_t
    4142#ifdef VBOX
    42 #define DATA_TYPE_PROMOTED RTCCUINTREG
     43# define DATA_TYPE_PROMOTED RTCCUINTREG
    4344#endif
    4445#elif DATA_SIZE == 2
     
    4748#define DATA_TYPE uint16_t
    4849#ifdef VBOX
    49 #define DATA_TYPE_PROMOTED RTCCUINTREG
     50# define DATA_TYPE_PROMOTED RTCCUINTREG
    5051#endif
    5152#elif DATA_SIZE == 1
     
    5455#define DATA_TYPE uint8_t
    5556#ifdef VBOX
    56 #define DATA_TYPE_PROMOTED RTCCUINTREG
     57# define DATA_TYPE_PROMOTED RTCCUINTREG
    5758#endif
    5859#else
     
    9798#endif
    9899#endif /* SHIFT > 2 */
    99 #ifdef USE_KQEMU
     100#ifdef CONFIG_KQEMU
    100101    env->last_io_time = cpu_get_time_fast();
    101102#endif
     
    251252#endif
    252253#endif /* SHIFT > 2 */
    253 #ifdef USE_KQEMU
     254#ifdef CONFIG_KQEMU
    254255    env->last_io_time = cpu_get_time_fast();
    255256#endif
     
    356357
    357358#ifdef VBOX
    358 #undef DATA_TYPE_PROMOTED
     359# undef DATA_TYPE_PROMOTED
    359360#endif
    360361#undef READ_ACCESS_TYPE
  • trunk/src/recompiler/target-i386/cpu.h

    r36170 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    5251#define ELF_MACHINE     EM_386
    5352#endif
     53
     54#define CPUState struct CPUX86State
    5455
    5556#include "cpu-defs.h"
     
    100101#define DESC_P_MASK     (1 << 15)
    101102#define DESC_DPL_SHIFT  13
    102 #define DESC_DPL_MASK   (1 << DESC_DPL_SHIFT)
     103#define DESC_DPL_MASK   (3 << DESC_DPL_SHIFT)
    103104#define DESC_S_MASK     (1 << 12)
    104105#define DESC_TYPE_SHIFT 8
     106#define DESC_TYPE_MASK  (15 << DESC_TYPE_SHIFT)
    105107#define DESC_A_MASK     (1 << 8)
    106108
     
    162164#define HF_LMA_SHIFT        14 /* only used on x86_64: long mode active */
    163165#define HF_CS64_SHIFT       15 /* only used on x86_64: 64 bit code segment  */
    164 #define HF_OSFXSR_SHIFT     16 /* CR4.OSFXSR */
     166#define HF_RF_SHIFT         16 /* must be same as eflags */
    165167#define HF_VM_SHIFT         17 /* must be same as eflags */
    166168#define HF_SMM_SHIFT        19 /* CPU in SMM mode */
    167169#define HF_SVME_SHIFT       20 /* SVME enabled (copy of EFER.SVME) */
    168170#define HF_SVMI_SHIFT       21 /* SVM intercepts are active */
     171#define HF_OSFXSR_SHIFT     22 /* CR4.OSFXSR */
    169172
    170173#define HF_CPL_MASK          (3 << HF_CPL_SHIFT)
     
    182185#define HF_LMA_MASK          (1 << HF_LMA_SHIFT)
    183186#define HF_CS64_MASK         (1 << HF_CS64_SHIFT)
    184 #define HF_OSFXSR_MASK       (1 << HF_OSFXSR_SHIFT)
     187#define HF_RF_MASK           (1 << HF_RF_SHIFT)
    185188#define HF_VM_MASK           (1 << HF_VM_SHIFT)
    186189#define HF_SMM_MASK          (1 << HF_SMM_SHIFT)
    187190#define HF_SVME_MASK         (1 << HF_SVME_SHIFT)
    188191#define HF_SVMI_MASK         (1 << HF_SVMI_SHIFT)
     192#define HF_OSFXSR_MASK       (1 << HF_OSFXSR_SHIFT)
    189193
    190194/* hflags2 */
     
    219223#define CR4_PSE_MASK  (1 << 4)
    220224#define CR4_PAE_MASK  (1 << 5)
     225#define CR4_MCE_MASK  (1 << 6)
    221226#define CR4_PGE_MASK  (1 << 7)
    222227#define CR4_PCE_MASK  (1 << 8)
     
    264269#define PG_ERROR_RSVD_MASK 0x08
    265270#define PG_ERROR_I_D_MASK  0x10
     271
     272#define MCG_CTL_P       (1UL<<8)   /* MCG_CAP register available */
     273
     274#define MCE_CAP_DEF     MCG_CTL_P
     275#define MCE_BANKS_DEF   10
     276
     277#define MCG_STATUS_MCIP (1ULL<<2)   /* machine check in progress */
     278
     279#define MCI_STATUS_VAL  (1ULL<<63)  /* valid error */
     280#define MCI_STATUS_OVER (1ULL<<62)  /* previous errors lost */
     281#define MCI_STATUS_UC   (1ULL<<61)  /* uncorrected error */
    266282
    267283#define MSR_IA32_TSC                    0x10
     
    304320
    305321#define MSR_MTRRdefType                 0x2ff
     322
     323#define MSR_MC0_CTL                     0x400
     324#define MSR_MC0_STATUS                  0x401
     325#define MSR_MC0_ADDR                    0x402
     326#define MSR_MC0_MISC                    0x403
    306327
    307328#define MSR_EFER                        0xc0000080
     
    382403#define CPUID_EXT_XSAVE    (1 << 26)
    383404#define CPUID_EXT_OSXSAVE  (1 << 27)
     405#define CPUID_EXT_HYPERVISOR  (1 << 31)
    384406
    385407#define CPUID_EXT2_SYSCALL (1 << 11)
     
    705727    uint32_t cpuid_ext3_features;
    706728    uint32_t cpuid_apic_id;
    707 
    708729#ifndef VBOX
     730    int cpuid_vendor_override;
     731
    709732    /* MTRRs */
    710733    uint64_t mtrr_fixed[11];
     
    715738    } mtrr_var[8];
    716739
    717 #ifdef USE_KQEMU
     740#ifdef CONFIG_KQEMU
    718741    int kqemu_enabled;
    719742    int last_io_time;
     
    722745    /* For KVM */
    723746    uint64_t interrupt_bitmap[256 / 64];
     747    uint32_t mp_state;
    724748
    725749    /* in order to simplify APIC support, we leave this pointer to the
    726750       user */
    727751    struct APICState *apic_state;
     752
     753    uint64 mcg_cap;
     754    uint64 mcg_status;
     755    uint64 mcg_ctl;
     756    uint64 *mce_banks;
    728757#else  /* VBOX */
     758
    729759    uint32_t alignment2[3];
    730760    /** Profiling tb_flush. */
     
    9981028#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
    9991029
    1000 #ifdef USE_KQEMU
     1030#ifdef CONFIG_KQEMU
    10011031static inline int cpu_get_time_fast(void)
    10021032{
     
    10291059#define TARGET_PAGE_BITS 12
    10301060
    1031 #define CPUState CPUX86State
    10321061#define cpu_init cpu_x86_init
    10331062#define cpu_exec cpu_x86_exec
     
    10361065#define cpu_list x86_cpu_list
    10371066
    1038 #define CPU_SAVE_VERSION 8
     1067#define CPU_SAVE_VERSION 10
    10391068
    10401069/* MMU modes definitions */
     
    10791108    *cs_base = env->segs[R_CS].base;
    10801109    *pc = *cs_base + env->eip;
    1081     *flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
     1110    *flags = env->hflags |
     1111        (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK));
    10821112}
    10831113
     1114#ifndef VBOX
     1115void apic_init_reset(CPUState *env);
     1116void apic_sipi(CPUState *env);
     1117void do_cpu_init(CPUState *env);
     1118void do_cpu_sipi(CPUState *env);
     1119#endif /* !VBOX */
    10841120#endif /* CPU_I386_H */
  • trunk/src/recompiler/target-i386/exec.h

    r36170 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    385384}
    386385
     386static inline int cpu_has_work(CPUState *env)
     387{
     388    int work;
     389
     390    work = (env->interrupt_request & CPU_INTERRUPT_HARD) &&
     391           (env->eflags & IF_MASK);
     392    work |= env->interrupt_request & CPU_INTERRUPT_NMI;
     393    work |= env->interrupt_request & CPU_INTERRUPT_INIT;
     394    work |= env->interrupt_request & CPU_INTERRUPT_SIPI;
     395
     396    return work;
     397}
     398
    387399static inline int cpu_halted(CPUState *env) {
    388400    /* handle exit of HALTED state */
     
    390402        return 0;
    391403    /* disable halt condition */
    392     if (((env->interrupt_request & CPU_INTERRUPT_HARD) &&
    393          (env->eflags & IF_MASK)) ||
    394         (env->interrupt_request & CPU_INTERRUPT_NMI)) {
     404    if (cpu_has_work(env)) {
    395405        env->halted = 0;
    396406        return 0;
  • trunk/src/recompiler/target-i386/helper.c

    r36171 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    3534#include <inttypes.h>
    3635#include <signal.h>
    37 #include <assert.h>
    3836#endif /* !VBOX */
    3937
     
    4644
    4745#ifndef VBOX
    48 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
     46/* feature flags taken from "Intel Processor Identification and the CPUID
     47 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
     48 * about feature names, the Linux name is used. */
     49static const char *feature_name[] = {
     50    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
     51    "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
     52    "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
     53    "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
     54};
     55static const char *ext_feature_name[] = {
     56    "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
     57    "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
     58    NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
     59    NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
     60};
     61static const char *ext2_feature_name[] = {
     62    "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
     63    "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
     64    "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
     65    "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
     66};
     67static const char *ext3_feature_name[] = {
     68    "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
     69    "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
     70    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
     71    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
     72};
     73
     74static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
    4975                                    uint32_t *ext_features,
    5076                                    uint32_t *ext2_features,
     
    5278{
    5379    int i;
    54     /* feature flags taken from "Intel Processor Identification and the CPUID
    55      * Instruction" and AMD's "CPUID Specification". In cases of disagreement
    56      * about feature names, the Linux name is used. */
    57     static const char *feature_name[] = {
    58         "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
    59         "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
    60         "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
    61         "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
    62     };
    63     static const char *ext_feature_name[] = {
    64        "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
    65        "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
    66        NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
    67        NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
    68     };
    69     static const char *ext2_feature_name[] = {
    70        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
    71        "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
    72        "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
    73        "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
    74     };
    75     static const char *ext3_feature_name[] = {
    76        "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
    77        "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
    78        NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
    79        NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
    80     };
     80    int found = 0;
    8181
    8282    for ( i = 0 ; i < 32 ; i++ )
    8383        if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
    8484            *features |= 1 << i;
    85             return;
     85            found = 1;
    8686        }
    8787    for ( i = 0 ; i < 32 ; i++ )
    8888        if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
    8989            *ext_features |= 1 << i;
    90             return;
     90            found = 1;
    9191        }
    9292    for ( i = 0 ; i < 32 ; i++ )
    9393        if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
    9494            *ext2_features |= 1 << i;
    95             return;
     95            found = 1;
    9696        }
    9797    for ( i = 0 ; i < 32 ; i++ )
    9898        if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
    9999            *ext3_features |= 1 << i;
    100             return;
    101         }
    102     fprintf(stderr, "CPU feature %s not found\n", flagname);
     100            found = 1;
     101        }
     102    if (!found) {
     103        fprintf(stderr, "CPU feature %s not found\n", flagname);
     104    }
    103105}
    104106#endif /* !VBOX */
     
    114116    uint32_t xlevel;
    115117    char model_id[48];
     118    int vendor_override;
    116119} x86_def_t;
    117120
     
    146149        .ext_features = CPUID_EXT_SSE3,
    147150        .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
    148             CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
    149             CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
     151            CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
    150152        .ext3_features = CPUID_EXT3_SVM,
    151153        .xlevel = 0x8000000A,
     
    271273        .name = "athlon",
    272274        .level = 2,
    273         .vendor1 = 0x68747541, /* "Auth" */
    274         .vendor2 = 0x69746e65, /* "enti" */
    275         .vendor3 = 0x444d4163, /* "cAMD" */
     275        .vendor1 = CPUID_VENDOR_AMD_1,
     276        .vendor2 = CPUID_VENDOR_AMD_2,
     277        .vendor3 = CPUID_VENDOR_AMD_3,
    276278        .family = 6,
    277279        .model = 2,
     
    306308};
    307309
     310static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
     311                               uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
     312
     313static int cpu_x86_fill_model_id(char *str)
     314{
     315    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
     316    int i;
     317
     318    for (i = 0; i < 3; i++) {
     319        host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
     320        memcpy(str + i * 16 +  0, &eax, 4);
     321        memcpy(str + i * 16 +  4, &ebx, 4);
     322        memcpy(str + i * 16 +  8, &ecx, 4);
     323        memcpy(str + i * 16 + 12, &edx, 4);
     324    }
     325    return 0;
     326}
     327
     328static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
     329{
     330    uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
     331
     332    x86_cpu_def->name = "host";
     333    host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
     334    x86_cpu_def->level = eax;
     335    x86_cpu_def->vendor1 = ebx;
     336    x86_cpu_def->vendor2 = edx;
     337    x86_cpu_def->vendor3 = ecx;
     338
     339    host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
     340    x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
     341    x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
     342    x86_cpu_def->stepping = eax & 0x0F;
     343    x86_cpu_def->ext_features = ecx;
     344    x86_cpu_def->features = edx;
     345
     346    host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
     347    x86_cpu_def->xlevel = eax;
     348
     349    host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
     350    x86_cpu_def->ext2_features = edx;
     351    x86_cpu_def->ext3_features = ecx;
     352    cpu_x86_fill_model_id(x86_cpu_def->model_id);
     353    x86_cpu_def->vendor_override = 0;
     354
     355    return 0;
     356}
     357
    308358static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
    309359{
     
    324374        }
    325375    }
    326     if (!def)
     376    if (kvm_enabled() && strcmp(name, "host") == 0) {
     377        cpu_x86_fill_host(x86_cpu_def);
     378    } else if (!def) {
    327379        goto error;
    328     memcpy(x86_cpu_def, def, sizeof(*def));
     380    } else {
     381        memcpy(x86_cpu_def, def, sizeof(*def));
     382    }
     383
     384    add_flagname_to_bitmaps("hypervisor", &plus_features,
     385        &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
    329386
    330387    featurestr = strtok(NULL, ",");
     
    375432                    x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
    376433                }
     434                x86_cpu_def->vendor_override = 1;
    377435            } else if (!strcmp(featurestr, "model_id")) {
    378436                pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
     
    429487        env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
    430488    }
     489    env->cpuid_vendor_override = def->vendor_override;
    431490    env->cpuid_level = def->level;
    432491    if (def->family > 0x0f)
     
    495554
    496555    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
    497                            DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
     556                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
     557                           DESC_R_MASK | DESC_A_MASK);
    498558    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
    499                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
     559                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
     560                           DESC_A_MASK);
    500561    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
    501                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
     562                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
     563                           DESC_A_MASK);
    502564    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
    503                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
     565                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
     566                           DESC_A_MASK);
    504567    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
    505                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
     568                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
     569                           DESC_A_MASK);
    506570    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
    507                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
     571                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
     572                           DESC_A_MASK);
    508573
    509574    env->eip = 0xfff0;
     
    595660    "SARQ",
    596661};
     662
     663static void
     664cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
     665                       int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
     666                       const char *name, struct SegmentCache *sc)
     667{
     668#ifdef TARGET_X86_64
     669    if (env->hflags & HF_CS64_MASK) {
     670        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
     671                    sc->selector, sc->base, sc->limit, sc->flags);
     672    } else
     673#endif
     674    {
     675        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
     676                    (uint32_t)sc->base, sc->limit, sc->flags);
     677    }
     678
     679    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
     680        goto done;
     681
     682    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
     683    if (sc->flags & DESC_S_MASK) {
     684        if (sc->flags & DESC_CS_MASK) {
     685            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
     686                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
     687            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
     688                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
     689        } else {
     690            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
     691            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
     692                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
     693        }
     694        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
     695    } else {
     696        static const char *sys_type_name[2][16] = {
     697            { /* 32 bit mode */
     698                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
     699                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
     700                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
     701                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
     702            },
     703            { /* 64 bit mode */
     704                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
     705                "Reserved", "Reserved", "Reserved", "Reserved",
     706                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
     707                "Reserved", "IntGate64", "TrapGate64"
     708            }
     709        };
     710        cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
     711                                    [(sc->flags & DESC_TYPE_MASK)
     712                                     >> DESC_TYPE_SHIFT]);
     713    }
     714done:
     715    cpu_fprintf(f, "\n");
     716}
    597717
    598718void cpu_dump_state(CPUState *env, FILE *f,
     
    674794    }
    675795
     796    for(i = 0; i < 6; i++) {
     797        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
     798                               &env->segs[i]);
     799    }
     800    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
     801    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
     802
    676803#ifdef TARGET_X86_64
    677804    if (env->hflags & HF_LMA_MASK) {
    678         for(i = 0; i < 6; i++) {
    679             SegmentCache *sc = &env->segs[i];
    680             cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
    681                         seg_name[i],
    682                         sc->selector,
    683                         sc->base,
    684                         sc->limit,
    685                         sc->flags);
    686         }
    687         cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
    688                     env->ldt.selector,
    689                     env->ldt.base,
    690                     env->ldt.limit,
    691                     env->ldt.flags);
    692         cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
    693                     env->tr.selector,
    694                     env->tr.base,
    695                     env->tr.limit,
    696                     env->tr.flags);
    697805        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
    698806                    env->gdt.base, env->gdt.limit);
     
    711819#endif
    712820    {
    713         for(i = 0; i < 6; i++) {
    714             SegmentCache *sc = &env->segs[i];
    715             cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
    716                         seg_name[i],
    717                         sc->selector,
    718                         (uint32_t)sc->base,
    719                         sc->limit,
    720                         sc->flags);
    721         }
    722         cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
    723                     env->ldt.selector,
    724                     (uint32_t)env->ldt.base,
    725                     env->ldt.limit,
    726                     env->ldt.flags);
    727         cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
    728                     env->tr.selector,
    729                     (uint32_t)env->tr.base,
    730                     env->tr.limit,
    731                     env->tr.flags);
    732821        cpu_fprintf(f, "GDT=     %08x %08x\n",
    733822                    (uint32_t)env->gdt.base, env->gdt.limit);
     
    9391028/* XXX: This value should match the one returned by CPUID
    9401029 * and in exec.c */
    941 #if defined(USE_KQEMU)
     1030#if defined(CONFIG_KQEMU)
    9421031#define PHYS_ADDR_MASK 0xfffff000LL
    9431032#else
     
    14371526        prev_debug_excp_handler(env);
    14381527}
     1528
     1529
     1530#ifndef VBOX
     1531/* This should come from sysemu.h - if we could include it here... */
     1532void qemu_system_reset_request(void);
     1533
     1534void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
     1535                        uint64_t mcg_status, uint64_t addr, uint64_t misc)
     1536{
     1537    uint64_t mcg_cap = cenv->mcg_cap;
     1538    unsigned bank_num = mcg_cap & 0xff;
     1539    uint64_t *banks = cenv->mce_banks;
     1540
     1541    if (bank >= bank_num || !(status & MCI_STATUS_VAL))
     1542        return;
     1543
     1544    /*
     1545     * if MSR_MCG_CTL is not all 1s, the uncorrected error
     1546     * reporting is disabled
     1547     */
     1548    if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
     1549        cenv->mcg_ctl != ~(uint64_t)0)
     1550        return;
     1551    banks += 4 * bank;
     1552    /*
     1553     * if MSR_MCi_CTL is not all 1s, the uncorrected error
     1554     * reporting is disabled for the bank
     1555     */
     1556    if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
     1557        return;
     1558    if (status & MCI_STATUS_UC) {
     1559        if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
     1560            !(cenv->cr[4] & CR4_MCE_MASK)) {
     1561            fprintf(stderr, "injects mce exception while previous "
     1562                    "one is in progress!\n");
     1563            qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
     1564            qemu_system_reset_request();
     1565            return;
     1566        }
     1567        if (banks[1] & MCI_STATUS_VAL)
     1568            status |= MCI_STATUS_OVER;
     1569        banks[2] = addr;
     1570        banks[3] = misc;
     1571        cenv->mcg_status = mcg_status;
     1572        banks[1] = status;
     1573        cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
     1574    } else if (!(banks[1] & MCI_STATUS_VAL)
     1575               || !(banks[1] & MCI_STATUS_UC)) {
     1576        if (banks[1] & MCI_STATUS_VAL)
     1577            status |= MCI_STATUS_OVER;
     1578        banks[2] = addr;
     1579        banks[3] = misc;
     1580        banks[1] = status;
     1581    } else
     1582        banks[1] |= MCI_STATUS_OVER;
     1583}
     1584#endif /* !VBOX */
    14391585#endif /* !CONFIG_USER_ONLY */
    14401586
    14411587#ifndef VBOX
     1588
     1589static void mce_init(CPUX86State *cenv)
     1590{
     1591    unsigned int bank, bank_num;
     1592
     1593    if (((cenv->cpuid_version >> 8)&0xf) >= 6
     1594        && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
     1595        cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
     1596        cenv->mcg_ctl = ~(uint64_t)0;
     1597        bank_num = cenv->mcg_cap & 0xff;
     1598        cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4);
     1599        for (bank = 0; bank < bank_num; bank++)
     1600            cenv->mce_banks[bank*4] = ~(uint64_t)0;
     1601    }
     1602}
     1603
    14421604static void host_cpuid(uint32_t function, uint32_t count,
    14431605                       uint32_t *eax, uint32_t *ebx,
     
    14991661         * actuall cpu, and say goodbye to migration between different vendors
    15001662         * is you use compatibility mode. */
    1501         if (kvm_enabled())
     1663        if (kvm_enabled() && !env->cpuid_vendor_override)
    15021664            host_cpuid(0, 0, NULL, ebx, ecx, edx);
    15031665        break;
     
    15071669        *ecx = env->cpuid_ext_features;
    15081670        *edx = env->cpuid_features;
    1509 
    1510         /* "Hypervisor present" bit required for Microsoft SVVP */
    1511         if (kvm_enabled())
    1512             *ecx |= (1 << 31);
    15131671        break;
    15141672    case 2:
     
    15831741        break;
    15841742    case 0x80000001:
    1585         *eax = env->cpuid_features;
     1743        *eax = env->cpuid_version;
    15861744        *ebx = 0;
    15871745        *ecx = env->cpuid_ext3_features;
     
    15891747
    15901748        if (kvm_enabled()) {
    1591             uint32_t h_eax, h_edx;
    1592 
    1593             host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
    1594 
    1595             /* disable CPU features that the host does not support */
    1596 
    1597             /* long mode */
    1598             if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
    1599                 *edx &= ~0x20000000;
    1600             /* syscall */
    1601             if ((h_edx & 0x00000800) == 0)
    1602                 *edx &= ~0x00000800;
    1603             /* nx */
    1604             if ((h_edx & 0x00100000) == 0)
    1605                 *edx &= ~0x00100000;
    1606 
    1607             /* disable CPU features that KVM cannot support */
    1608 
    1609             /* svm */
    1610             *ecx &= ~4UL;
    1611             /* 3dnow */
    1612             *edx &= ~0xc0000000;
     1749            /* Nested SVM not yet supported in KVM */
     1750            *ecx &= ~CPUID_EXT3_SVM;
     1751        } else {
     1752            /* AMD 3DNow! is not supported in QEMU */
     1753            *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT);
    16131754        }
    16141755        break;
     
    16401781        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
    16411782            /* 64 bit processor */
    1642 #if defined(USE_KQEMU)
     1783#if defined(CONFIG_KQEMU)
    16431784            *eax = 0x00003020;  /* 48 bits virtual, 32 bits physical */
    16441785#else
     
    16471788#endif
    16481789        } else {
    1649 #if defined(USE_KQEMU)
     1790#if defined(CONFIG_KQEMU)
    16501791            *eax = 0x00000020;  /* 32 bits physical */
    16511792#else
     
    16751816    }
    16761817}
     1818
     1819int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
     1820                            target_ulong *base, unsigned int *limit,
     1821                            unsigned int *flags)
     1822{
     1823    SegmentCache *dt;
     1824    target_ulong ptr;
     1825    uint32_t e1, e2;
     1826    int index;
     1827
     1828    if (selector & 0x4)
     1829        dt = &env->ldt;
     1830    else
     1831        dt = &env->gdt;
     1832    index = selector & ~7;
     1833    ptr = dt->base + index;
     1834    if ((index + 7) > dt->limit
     1835        || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
     1836        || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
     1837        return 0;
     1838
     1839    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
     1840    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
     1841    if (e2 & DESC_G_MASK)
     1842        *limit = (*limit << 12) | 0xfff;
     1843    *flags = e2;
     1844
     1845    return 1;
     1846}
     1847
    16771848#endif /* !VBOX */
    16781849
     
    17071878        return NULL;
    17081879    }
     1880#ifndef VBOX
     1881    mce_init(env);
     1882#endif
    17091883    cpu_reset(env);
    1710 #ifdef USE_KQEMU
     1884#ifdef CONFIG_KQEMU
    17111885    kqemu_init(env);
    17121886#endif
    1713     if (kvm_enabled())
    1714         kvm_init_vcpu(env);
     1887
     1888    qemu_init_vcpu(env);
     1889
    17151890    return env;
    17161891}
     1892
     1893#ifndef VBOX
     1894#if !defined(CONFIG_USER_ONLY)
     1895void do_cpu_init(CPUState *env)
     1896{
     1897    int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
     1898    cpu_reset(env);
     1899    env->interrupt_request = sipi;
     1900    apic_init_reset(env);
     1901}
     1902
     1903void do_cpu_sipi(CPUState *env)
     1904{
     1905    apic_sipi(env);
     1906}
     1907#else
     1908void do_cpu_init(CPUState *env)
     1909{
     1910}
     1911void do_cpu_sipi(CPUState *env)
     1912{
     1913}
     1914#endif
     1915#endif /* !VBOX */
  • trunk/src/recompiler/target-i386/helper.h

    r36170 r36175  
    6363DEF_HELPER_1(mwait, void, int)
    6464DEF_HELPER_0(debug, void)
     65DEF_HELPER_0(reset_rf, void)
    6566DEF_HELPER_2(raise_interrupt, void, int, int)
    6667DEF_HELPER_1(raise_exception, void, int)
  • trunk/src/recompiler/target-i386/helper_template.h

    r36170 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
  • trunk/src/recompiler/target-i386/op_helper.c

    r36171 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    751750}
    752751
     752static int exeption_has_error_code(int intno)
     753{
     754        switch(intno) {
     755        case 8:
     756        case 10:
     757        case 11:
     758        case 12:
     759        case 13:
     760        case 14:
     761        case 17:
     762            return 1;
     763        }
     764        return 0;
     765}
     766
    753767#ifdef TARGET_X86_64
    754768#define SET_ESP(val, sp_mask)\
     
    811825
    812826    has_error_code = 0;
    813     if (!is_int && !is_hw) {
    814         switch(intno) {
    815         case 8:
    816         case 10:
    817         case 11:
    818         case 12:
    819         case 13:
    820         case 14:
    821         case 17:
    822             has_error_code = 1;
    823             break;
    824         }
    825     }
     827    if (!is_int && !is_hw)
     828        has_error_code = exeption_has_error_code(intno);
    826829    if (is_int)
    827830        old_eip = next_eip;
     
    10161019#else
    10171020     /*
    1018      * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
    1019      * gets confused by seemingly changed EFLAGS. See #3491 and
    1020      * public bug #2341.
    1021      */
     1021      * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
     1022      * gets confused by seemingly changed EFLAGS. See #3491 and
     1023      * public bug #2341.
     1024      */
    10221025    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
    10231026#endif
     
    11721175
    11731176    has_error_code = 0;
    1174     if (!is_int && !is_hw) {
    1175         switch(intno) {
    1176         case 8:
    1177         case 10:
    1178         case 11:
    1179         case 12:
    1180         case 13:
    1181         case 14:
    1182         case 17:
    1183             has_error_code = 1;
    1184             break;
    1185         }
    1186     }
     1177    if (!is_int && !is_hw)
     1178        has_error_code = exeption_has_error_code(intno);
    11871179    if (is_int)
    11881180        old_eip = next_eip;
     
    14141406        cpu_x86_set_cpl(env, 3);
    14151407    }
    1416 #ifdef USE_KQEMU
     1408#ifdef CONFIG_KQEMU
    14171409    if (kqemu_is_ok(env)) {
    14181410        if (env->hflags & HF_LMA_MASK)
     
    14521444        ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
    14531445                        ~CPU_INTERRUPT_EXTERNAL_EXIT);
    1454         cpu_interrupt(env, CPU_INTERRUPT_EXIT);
     1446        cpu_exit(env);
    14551447    }
    14561448    if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
     
    15481540        EIP = next_eip;
    15491541}
     1542
     1543#if !defined(CONFIG_USER_ONLY)
     1544static void handle_even_inj(int intno, int is_int, int error_code,
     1545                int is_hw, int rm)
     1546{
     1547    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
     1548    if (!(event_inj & SVM_EVTINJ_VALID)) {
     1549            int type;
     1550            if (is_int)
     1551                    type = SVM_EVTINJ_TYPE_SOFT;
     1552            else
     1553                    type = SVM_EVTINJ_TYPE_EXEPT;
     1554            event_inj = intno | type | SVM_EVTINJ_VALID;
     1555            if (!rm && exeption_has_error_code(intno)) {
     1556                    event_inj |= SVM_EVTINJ_VALID_ERR;
     1557                    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
     1558            }
     1559            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
     1560    }
     1561}
     1562#endif
    15501563
    15511564/*
     
    16001613#endif
    16011614    if (env->cr[0] & CR0_PE_MASK) {
     1615#if !defined(CONFIG_USER_ONLY)
     1616        if (env->hflags & HF_SVMI_MASK)
     1617            handle_even_inj(intno, is_int, error_code, is_hw, 0);
     1618#endif
    16021619#ifdef TARGET_X86_64
    16031620        if (env->hflags & HF_LMA_MASK) {
     
    16201637        }
    16211638    } else {
     1639#if !defined(CONFIG_USER_ONLY)
     1640        if (env->hflags & HF_SVMI_MASK)
     1641            handle_even_inj(intno, is_int, error_code, is_hw, 1);
     1642#endif
    16221643        do_interrupt_real(intno, is_int, error_code, next_eip);
    16231644    }
     1645
     1646#if !defined(CONFIG_USER_ONLY)
     1647    if (env->hflags & HF_SVMI_MASK) {
     1648            uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
     1649            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
     1650    }
     1651#endif
    16241652}
    16251653
     
    29172945        EIP = offset;
    29182946    }
    2919 #ifdef USE_KQEMU
     2947#ifdef CONFIG_KQEMU
    29202948    if (kqemu_is_ok(env)) {
    29212949        env->exception_index = -1;
     
    32983326    }
    32993327    env->hflags2 &= ~HF2_NMI_MASK;
    3300 #ifdef USE_KQEMU
     3328#ifdef CONFIG_KQEMU
    33013329    if (kqemu_is_ok(env)) {
    33023330        CC_OP = CC_OP_EFLAGS;
     
    33103338{
    33113339    helper_ret_protected(shift, 0, addend);
    3312 #ifdef USE_KQEMU
     3340#ifdef CONFIG_KQEMU
    33133341    if (kqemu_is_ok(env)) {
    33143342        env->exception_index = -1;
     
    33883416    ESP = ECX;
    33893417    EIP = EDX;
    3390 #ifdef USE_KQEMU
     3418#ifdef CONFIG_KQEMU
    33913419    if (kqemu_is_ok(env)) {
    33923420        env->exception_index = -1;
     
    36693697        env->mtrr_deftype = val;
    36703698        break;
     3699    case MSR_MCG_STATUS:
     3700        env->mcg_status = val;
     3701        break;
     3702    case MSR_MCG_CTL:
     3703        if ((env->mcg_cap & MCG_CTL_P)
     3704            && (val == 0 || val == ~(uint64_t)0))
     3705            env->mcg_ctl = val;
     3706        break;
    36713707# endif /* !VBOX */
    36723708    default:
    36733709# ifndef VBOX
     3710        if ((uint32_t)ECX >= MSR_MC0_CTL
     3711            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
     3712            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
     3713            if ((offset & 0x3) != 0
     3714                || (val == 0 || val == ~(uint64_t)0))
     3715                env->mce_banks[offset] = val;
     3716            break;
     3717        }
    36743718        /* XXX: exception ? */
    36753719# endif
     
    37453789        break;
    37463790#endif
    3747 #ifdef USE_KQEMU
     3791#ifdef CONFIG_KQEMU
    37483792    case MSR_QPI_COMMBASE:
    37493793        if (env->kqemu_enabled) {
     
    38023846            val = 0;
    38033847        break;
     3848    case MSR_MCG_CAP:
     3849        val = env->mcg_cap;
     3850        break;
     3851    case MSR_MCG_CTL:
     3852        if (env->mcg_cap & MCG_CTL_P)
     3853            val = env->mcg_ctl;
     3854        else
     3855            val = 0;
     3856        break;
     3857    case MSR_MCG_STATUS:
     3858        val = env->mcg_status;
     3859        break;
    38043860# endif /* !VBOX */
    38053861    default:
    38063862# ifndef VBOX
     3863        if ((uint32_t)ECX >= MSR_MC0_CTL
     3864            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
     3865            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
     3866            val = env->mce_banks[offset];
     3867            break;
     3868        }
    38073869        /* XXX: exception ? */
    38083870        val = 0;
     
    53135375}
    53145376
     5377void helper_reset_rf(void)
     5378{
     5379    env->eflags &= ~RF_MASK;
     5380}
     5381
    53155382void helper_raise_interrupt(int intno, int next_eip_addend)
    53165383{
     
    62216288        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
    62226289        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
    6223         stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
    62246290
    62256291        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
     
    65636629    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
    65646630
     6631    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
     6632             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
     6633    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
     6634             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
     6635
    65656636    env->hflags2 &= ~HF2_GIF_MASK;
    65666637    /* FIXME: Resets the current ASID register to zero (host ASID). */
  • trunk/src/recompiler/target-i386/ops_sse.h

    r36170 r36175  
    1616 *
    1717 * You should have received a copy of the GNU Lesser General Public
    18  * License along with this library; if not, write to the Free Software
    19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    2019 */
    2120
     
    3130#if SHIFT == 0
    3231#define Reg MMXReg
    33 #define XMM_ONLY(x...)
     32#define XMM_ONLY(...)
    3433#define B(n) MMX_B(n)
    3534#define W(n) MMX_W(n)
     
    3938#else
    4039#define Reg XMMReg
    41 #define XMM_ONLY(x...) x
     40#define XMM_ONLY(...) __VA_ARGS__
    4241#define B(n) XMM_B(n)
    4342#define W(n) XMM_W(n)
  • trunk/src/recompiler/target-i386/ops_sse_header.h

    r36170 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
  • trunk/src/recompiler/target-i386/translate.c

    r36171 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
     
    3534#include <inttypes.h>
    3635#include <signal.h>
    37 #include <assert.h>
    3836#endif /* !VBOX */
    3937
     
    5553#ifdef TARGET_X86_64
    5654#define X86_64_ONLY(x) x
    57 #define X86_64_DEF(x...) x
     55#define X86_64_DEF(...)  __VA_ARGS__
    5856#define CODE64(s) ((s)->code64)
    5957#define REX_X(s) ((s)->rex_x)
     
    6563#else
    6664#define X86_64_ONLY(x) NULL
    67 #define X86_64_DEF(x...)
     65#define X86_64_DEF(...)
    6866#define CODE64(s) 0
    6967#define REX_X(s) 0
     
    17271725}
    17281726
    1729 /* XXX: add faster immediate case */
    17301727static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
    17311728                          int is_right)
     
    18241821}
    18251822
     1823static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
     1824                          int is_right)
     1825{
     1826    int mask;
     1827    int data_bits;
     1828    TCGv t0, t1, a0;
     1829
     1830    /* XXX: inefficient, but we must use local temps */
     1831    t0 = tcg_temp_local_new();
     1832    t1 = tcg_temp_local_new();
     1833    a0 = tcg_temp_local_new();
     1834
     1835    if (ot == OT_QUAD)
     1836        mask = 0x3f;
     1837    else
     1838        mask = 0x1f;
     1839
     1840    /* load */
     1841    if (op1 == OR_TMP0) {
     1842        tcg_gen_mov_tl(a0, cpu_A0);
     1843        gen_op_ld_v(ot + s->mem_index, t0, a0);
     1844    } else {
     1845        gen_op_mov_v_reg(ot, t0, op1);
     1846    }
     1847
     1848    gen_extu(ot, t0);
     1849    tcg_gen_mov_tl(t1, t0);
     1850
     1851    op2 &= mask;
     1852    data_bits = 8 << ot;
     1853    if (op2 != 0) {
     1854        int shift = op2 & ((1 << (3 + ot)) - 1);
     1855        if (is_right) {
     1856            tcg_gen_shri_tl(cpu_tmp4, t0, shift);
     1857            tcg_gen_shli_tl(t0, t0, data_bits - shift);
     1858        }
     1859        else {
     1860            tcg_gen_shli_tl(cpu_tmp4, t0, shift);
     1861            tcg_gen_shri_tl(t0, t0, data_bits - shift);
     1862        }
     1863        tcg_gen_or_tl(t0, t0, cpu_tmp4);
     1864    }
     1865
     1866    /* store */
     1867    if (op1 == OR_TMP0) {
     1868        gen_op_st_v(ot + s->mem_index, t0, a0);
     1869    } else {
     1870        gen_op_mov_reg_v(ot, op1, t0);
     1871    }
     1872
     1873    if (op2 != 0) {
     1874        /* update eflags */
     1875        if (s->cc_op != CC_OP_DYNAMIC)
     1876            gen_op_set_cc_op(s->cc_op);
     1877
     1878        gen_compute_eflags(cpu_cc_src);
     1879        tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
     1880        tcg_gen_xor_tl(cpu_tmp0, t1, t0);
     1881        tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
     1882        tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
     1883        tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
     1884        if (is_right) {
     1885            tcg_gen_shri_tl(t0, t0, data_bits - 1);
     1886        }
     1887        tcg_gen_andi_tl(t0, t0, CC_C);
     1888        tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
     1889
     1890        tcg_gen_discard_tl(cpu_cc_dst);
     1891        tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
     1892        s->cc_op = CC_OP_EFLAGS;
     1893    }
     1894
     1895    tcg_temp_free(t0);
     1896    tcg_temp_free(t1);
     1897    tcg_temp_free(a0);
     1898}
     1899
    18261900/* XXX: add faster immediate = 1 case */
    18271901static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
     
    20382112{
    20392113    switch(op) {
     2114    case OP_ROL:
     2115        gen_rot_rm_im(s1, ot, d, c, 0);
     2116        break;
     2117    case OP_ROR:
     2118        gen_rot_rm_im(s1, ot, d, c, 1);
     2119        break;
    20402120    case OP_SHL:
    20412121    case OP_SHL1:
     
    28162896    if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
    28172897        gen_helper_reset_inhibit_irq();
     2898    }
     2899    if (s->tb->flags & HF_RF_MASK) {
     2900        gen_helper_reset_rf();
    28182901    }
    28192902    if (   s->singlestep_enabled
     
    70587141        if (dflag == 2) {
    70597142            gen_op_mov_TN_reg(OT_QUAD, 0, reg);
    7060             tcg_gen_bswap_i64(cpu_T[0], cpu_T[0]);
     7143            tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
    70617144            gen_op_mov_reg_T0(OT_QUAD, reg);
    70627145        } else
    7063         {
    7064             TCGv_i32 tmp0;
    7065             gen_op_mov_TN_reg(OT_LONG, 0, reg);
    7066 
    7067             tmp0 = tcg_temp_new_i32();
    7068             tcg_gen_trunc_i64_i32(tmp0, cpu_T[0]);
    7069             tcg_gen_bswap_i32(tmp0, tmp0);
    7070             tcg_gen_extu_i32_i64(cpu_T[0], tmp0);
    7071             gen_op_mov_reg_T0(OT_LONG, reg);
    7072         }
    7073 #else
     7146#endif
    70747147        {
    70757148            gen_op_mov_TN_reg(OT_LONG, 0, reg);
    7076             tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]);
     7149            tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
     7150            tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
    70777151            gen_op_mov_reg_T0(OT_LONG, reg);
    70787152        }
    7079 #endif
    70807153        break;
    70817154    case 0xd6: /* salc */
     
    79978070    int num_insns;
    79988071    int max_insns;
     8072#ifdef VBOX
     8073    int const singlestep = env->state & CPU_EMULATE_SINGLE_STEP;
     8074#endif
    79998075
    80008076    /* generate intermediate code */
     
    80898165        if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
    80908166            TAILQ_FOREACH(bp, &env->breakpoints, entry) {
    8091                 if (bp->pc == pc_ptr) {
     8167                if (bp->pc == pc_ptr &&
     8168                    !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
    80928169                    gen_debug(dc, pc_ptr - dc->cs_base);
    80938170                    break;
     
    81498226            (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
    81508227            num_insns >= max_insns) {
     8228            gen_jmp_im(pc_ptr - dc->cs_base);
     8229            gen_eob(dc);
     8230            break;
     8231        }
     8232        if (singlestep) {
    81518233            gen_jmp_im(pc_ptr - dc->cs_base);
    81528234            gen_eob(dc);
  • trunk/src/recompiler/tcg/README

    r36170 r36175  
    2642648, 16 or 32 bit sign/zero extension (both operands must have the same type)
    265265
    266 * bswap16_i32 t0, t1
    267 
    268 16 bit byte swap on a 32 bit value. The two high order bytes must be set
    269 to zero.
    270 
    271 * bswap_i32 t0, t1
    272 
    273 32 bit byte swap
    274 
    275 * bswap_i64 t0, t1
     266* bswap16_i32/i64 t0, t1
     267
     26816 bit byte swap on a 32/64 bit value. The two/six high order bytes must be
     269set to zero.
     270
     271* bswap32_i32/i64 t0, t1
     272
     27332 bit byte swap on a 32/64 bit value. With a 64 bit value, the four high
     274order bytes must be set to zero.
     275
     276* bswap64_i64 t0, t1
    276277
    27727864 bit byte swap
  • trunk/src/recompiler/tcg/TODO

    r14542 r36175  
    1 - Add new instructions such as: andnot, ror, rol, setcond, clz, ctz,
    2   popcnt.
     1- Add new instructions such as: setcond, clz, ctz, popcnt.
    32
    4 - See if it is worth exporting mul2, mulu2, div2, divu2. 
     3- See if it is worth exporting mul2, mulu2, div2, divu2.
    54
    65- Support of globals saved in fixed registers between TBs.
  • trunk/src/recompiler/tcg/i386/tcg-target.c

    r36170 r36175  
    175175#define ARITH_CMP 7
    176176
     177#define SHIFT_ROL 0
     178#define SHIFT_ROR 1
    177179#define SHIFT_SHL 4
    178180#define SHIFT_SHR 5
     
    12051207        c = SHIFT_SAR;
    12061208        goto gen_shift32;
     1209    case INDEX_op_rotl_i32:
     1210        c = SHIFT_ROL;
     1211        goto gen_shift32;
     1212    case INDEX_op_rotr_i32:
     1213        c = SHIFT_ROR;
     1214        goto gen_shift32;
    12071215
    12081216    case INDEX_op_add2_i32:
     
    12311239    case INDEX_op_brcond2_i32:
    12321240        tcg_out_brcond2(s, args, const_args);
     1241        break;
     1242
     1243    case INDEX_op_bswap16_i32:
     1244        tcg_out8(s, 0x66);
     1245        tcg_out_modrm(s, 0xc1, SHIFT_ROL, args[0]);
     1246        tcg_out8(s, 8);
     1247        break;
     1248    case INDEX_op_bswap32_i32:
     1249        tcg_out_opc(s, (0xc8 + args[0]) | P_EXT);
     1250        break;
     1251
     1252    case INDEX_op_neg_i32:
     1253        tcg_out_modrm(s, 0xf7, 3, args[0]);
     1254        break;
     1255
     1256    case INDEX_op_not_i32:
     1257        tcg_out_modrm(s, 0xf7, 2, args[0]);
     1258        break;
     1259
     1260    case INDEX_op_ext8s_i32:
     1261        tcg_out_modrm(s, 0xbe | P_EXT, args[0], args[1]);
     1262        break;
     1263    case INDEX_op_ext16s_i32:
     1264        tcg_out_modrm(s, 0xbf | P_EXT, args[0], args[1]);
    12331265        break;
    12341266
     
    13001332    { INDEX_op_shr_i32, { "r", "0", "ci" } },
    13011333    { INDEX_op_sar_i32, { "r", "0", "ci" } },
     1334    { INDEX_op_sar_i32, { "r", "0", "ci" } },
     1335    { INDEX_op_rotl_i32, { "r", "0", "ci" } },
     1336    { INDEX_op_rotr_i32, { "r", "0", "ci" } },
    13021337
    13031338    { INDEX_op_brcond_i32, { "r", "ri" } },
     
    13061341    { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
    13071342    { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
     1343
     1344    { INDEX_op_bswap16_i32, { "r", "0" } },
     1345    { INDEX_op_bswap32_i32, { "r", "0" } },
     1346
     1347    { INDEX_op_neg_i32, { "r", "0" } },
     1348
     1349    { INDEX_op_not_i32, { "r", "0" } },
     1350
     1351    { INDEX_op_ext8s_i32, { "r", "q" } },
     1352    { INDEX_op_ext16s_i32, { "r", "r" } },
    13081353
    13091354#if TARGET_LONG_BITS == 32
  • trunk/src/recompiler/tcg/i386/tcg-target.h

    r36170 r36175  
    4545#define TCG_TARGET_CALL_STACK_OFFSET 0
    4646
     47/* optional instructions */
     48#define TCG_TARGET_HAS_bswap16_i32
     49#define TCG_TARGET_HAS_bswap32_i32
     50#define TCG_TARGET_HAS_neg_i32
     51#define TCG_TARGET_HAS_not_i32
     52#define TCG_TARGET_HAS_ext8s_i32
     53#define TCG_TARGET_HAS_ext16s_i32
     54#define TCG_TARGET_HAS_rot_i32
     55
    4756/* Note: must be synced with dyngen-exec.h */
    4857#ifndef VBOX
     
    5059#define TCG_AREG1 TCG_REG_EBX
    5160#define TCG_AREG2 TCG_REG_ESI
    52 #define TCG_AREG3 TCG_REG_EDI
    5361#else
    5462# define TCG_AREG0 TCG_REG_ESI
  • trunk/src/recompiler/tcg/tcg-op.h

    r36170 r36175  
    319319static inline void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg)
    320320{
    321     if (GET_TCGV_I32(ret) != GET_TCGV_I32(arg))
     321    if (!TCGV_EQUAL_I32(ret, arg))
    322322        tcg_gen_op2_i32(INDEX_op_mov_i32, ret, arg);
    323323}
     
    437437static inline void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
    438438{
    439     tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2);
     439    if (TCGV_EQUAL_I32(arg1, arg2)) {
     440        tcg_gen_mov_i32(ret, arg1);
     441    } else {
     442        tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2);
     443    }
    440444}
    441445
     
    456460static inline void tcg_gen_or_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
    457461{
    458     tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2);
     462    if (TCGV_EQUAL_I32(arg1, arg2)) {
     463        tcg_gen_mov_i32(ret, arg1);
     464    } else {
     465        tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2);
     466    }
    459467}
    460468
     
    475483static inline void tcg_gen_xor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
    476484{
    477     tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2);
     485    if (TCGV_EQUAL_I32(arg1, arg2)) {
     486        tcg_gen_movi_i32(ret, 0);
     487    } else {
     488        tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2);
     489    }
    478490}
    479491
     
    626638static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
    627639{
    628     if (GET_TCGV_I64(ret) != GET_TCGV_I64(arg)) {
     640    if (!TCGV_EQUAL_I64(ret, arg)) {
    629641        tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
    630642        tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
     
    859871static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
    860872{
    861     if (GET_TCGV_I64(ret) != GET_TCGV_I64(arg))
     873    if (!TCGV_EQUAL_I64(ret, arg))
    862874        tcg_gen_op2_i64(INDEX_op_mov_i64, ret, arg);
    863875}
     
    944956static inline void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
    945957{
    946     tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2);
     958    if (TCGV_EQUAL_I64(arg1, arg2)) {
     959        tcg_gen_mov_i64(ret, arg1);
     960    } else {
     961        tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2);
     962    }
    947963}
    948964
     
    956972static inline void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
    957973{
    958     tcg_gen_op3_i64(INDEX_op_or_i64, ret, arg1, arg2);
     974    if (TCGV_EQUAL_I64(arg1, arg2)) {
     975        tcg_gen_mov_i64(ret, arg1);
     976    } else {
     977        tcg_gen_op3_i64(INDEX_op_or_i64, ret, arg1, arg2);
     978    }
    959979}
    960980
     
    968988static inline void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
    969989{
    970     tcg_gen_op3_i64(INDEX_op_xor_i64, ret, arg1, arg2);
     990    if (TCGV_EQUAL_I64(arg1, arg2)) {
     991        tcg_gen_movi_i64(ret, 0);
     992    } else {
     993        tcg_gen_op3_i64(INDEX_op_xor_i64, ret, arg1, arg2);
     994    }
    971995}
    972996
     
    11841208    tcg_gen_op2_i32(INDEX_op_bswap16_i32, ret, arg);
    11851209#else
    1186     TCGv_i32 t0, t1;
    1187     t0 = tcg_temp_new_i32();
    1188     t1 = tcg_temp_new_i32();
    1189 
    1190     tcg_gen_shri_i32(t0, arg, 8);
    1191     tcg_gen_andi_i32(t1, arg, 0x000000ff);
    1192     tcg_gen_shli_i32(t1, t1, 8);
    1193     tcg_gen_or_i32(ret, t0, t1);
     1210    TCGv_i32 t0 = tcg_temp_new_i32();
     1211
     1212    tcg_gen_ext8u_i32(t0, arg);
     1213    tcg_gen_shli_i32(t0, t0, 8);
     1214    tcg_gen_shri_i32(ret, arg, 8);
     1215    tcg_gen_or_i32(ret, ret, t0);
    11941216    tcg_temp_free_i32(t0);
    1195     tcg_temp_free_i32(t1);
    1196 #endif
    1197 }
    1198 
    1199 static inline void tcg_gen_bswap_i32(TCGv_i32 ret, TCGv_i32 arg)
    1200 {
    1201 #ifdef TCG_TARGET_HAS_bswap_i32
    1202     tcg_gen_op2_i32(INDEX_op_bswap_i32, ret, arg);
     1217#endif
     1218}
     1219
     1220static inline void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
     1221{
     1222#ifdef TCG_TARGET_HAS_bswap32_i32
     1223    tcg_gen_op2_i32(INDEX_op_bswap32_i32, ret, arg);
    12031224#else
    12041225    TCGv_i32 t0, t1;
     
    12771298}
    12781299
    1279 static inline void tcg_gen_bswap_i64(TCGv_i64 ret, TCGv_i64 arg)
     1300/* Note: we assume the six high bytes are set to zero */
     1301static inline void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg)
     1302{
     1303    tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
     1304    tcg_gen_bswap16_i32(TCGV_LOW(ret), TCGV_LOW(arg));
     1305}
     1306
     1307/* Note: we assume the four high bytes are set to zero */
     1308static inline void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg)
     1309{
     1310    tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
     1311    tcg_gen_bswap32_i32(TCGV_LOW(ret), TCGV_LOW(arg));
     1312}
     1313
     1314static inline void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
    12801315{
    12811316    TCGv_i32 t0, t1;
     
    12831318    t1 = tcg_temp_new_i32();
    12841319
    1285     tcg_gen_bswap_i32(t0, TCGV_LOW(arg));
    1286     tcg_gen_bswap_i32(t1, TCGV_HIGH(arg));
     1320    tcg_gen_bswap32_i32(t0, TCGV_LOW(arg));
     1321    tcg_gen_bswap32_i32(t1, TCGV_HIGH(arg));
    12871322    tcg_gen_mov_i32(TCGV_LOW(ret), t1);
    12881323    tcg_gen_mov_i32(TCGV_HIGH(ret), t0);
     
    13581393}
    13591394
    1360 static inline void tcg_gen_bswap_i64(TCGv_i64 ret, TCGv_i64 arg)
    1361 {
    1362 #ifdef TCG_TARGET_HAS_bswap_i64
    1363     tcg_gen_op2_i64(INDEX_op_bswap_i64, ret, arg);
    1364 #else
    1365     TCGv_i32 t0, t1;
    1366     t0 = tcg_temp_new_i32();
    1367     t1 = tcg_temp_new_i32();
     1395/* Note: we assume the six high bytes are set to zero */
     1396static inline void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg)
     1397{
     1398#ifdef TCG_TARGET_HAS_bswap16_i64
     1399    tcg_gen_op2_i64(INDEX_op_bswap16_i64, ret, arg);
     1400#else
     1401    TCGv_i64 t0 = tcg_temp_new_i64();
     1402
     1403    tcg_gen_ext8u_i64(t0, arg);
     1404    tcg_gen_shli_i64(t0, t0, 8);
     1405    tcg_gen_shri_i64(ret, arg, 8);
     1406    tcg_gen_or_i64(ret, ret, t0);
     1407    tcg_temp_free_i64(t0);
     1408#endif
     1409}
     1410
     1411/* Note: we assume the four high bytes are set to zero */
     1412static inline void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg)
     1413{
     1414#ifdef TCG_TARGET_HAS_bswap32_i64
     1415    tcg_gen_op2_i64(INDEX_op_bswap32_i64, ret, arg);
     1416#else
     1417    TCGv_i64 t0, t1;
     1418    t0 = tcg_temp_new_i64();
     1419    t1 = tcg_temp_new_i64();
     1420
     1421    tcg_gen_shli_i64(t0, arg, 24);
     1422    tcg_gen_ext32u_i64(t0, t0);
     1423
     1424    tcg_gen_andi_i64(t1, arg, 0x0000ff00);
     1425    tcg_gen_shli_i64(t1, t1, 8);
     1426    tcg_gen_or_i64(t0, t0, t1);
     1427
     1428    tcg_gen_shri_i64(t1, arg, 8);
     1429    tcg_gen_andi_i64(t1, t1, 0x0000ff00);
     1430    tcg_gen_or_i64(t0, t0, t1);
     1431
     1432    tcg_gen_shri_i64(t1, arg, 24);
     1433    tcg_gen_or_i64(ret, t0, t1);
     1434    tcg_temp_free_i64(t0);
     1435    tcg_temp_free_i64(t1);
     1436#endif
     1437}
     1438
     1439static inline void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
     1440{
     1441#ifdef TCG_TARGET_HAS_bswap64_i64
     1442    tcg_gen_op2_i64(INDEX_op_bswap64_i64, ret, arg);
     1443#else
     1444    TCGv_i64 t0 = tcg_temp_new_i64();
     1445    TCGv_i64 t1 = tcg_temp_new_i64();
    13681446
    13691447    tcg_gen_shli_i64(t0, arg, 56);
     
    13951473    tcg_gen_shri_i64(t1, arg, 56);
    13961474    tcg_gen_or_i64(ret, t0, t1);
    1397     tcg_temp_free_i32(t0);
    1398     tcg_temp_free_i32(t1);
     1475    tcg_temp_free_i64(t0);
     1476    tcg_temp_free_i64(t1);
    13991477#endif
    14001478}
     
    14261504static inline void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg)
    14271505{
     1506#ifdef TCG_TARGET_HAS_not_i32
     1507    tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg);
     1508#else
    14281509    tcg_gen_xori_i32(ret, arg, -1);
     1510#endif
    14291511}
    14301512
    14311513static inline void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg)
    14321514{
     1515#ifdef TCG_TARGET_HAS_not_i64
     1516    tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg);
     1517#else
    14331518    tcg_gen_xori_i64(ret, arg, -1);
     1519#endif
    14341520}
    14351521
     
    15021588static inline void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
    15031589{
    1504     TCGv_i32 t0;
    1505     t0 = tcg_temp_new_i32();
    1506     tcg_gen_xor_i32(t0, arg1, arg2);
    1507     tcg_gen_not_i32(ret, t0);
    1508     tcg_temp_free_i32(t0);
     1590    tcg_gen_xor_i32(ret, arg1, arg2);
     1591    tcg_gen_not_i32(ret, ret);
    15091592}
    15101593
    15111594static inline void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
    15121595{
    1513     TCGv_i64 t0;
    1514     t0 = tcg_temp_new_i64();
    1515     tcg_gen_xor_i64(t0, arg1, arg2);
    1516     tcg_gen_not_i64(ret, t0);
    1517     tcg_temp_free_i64(t0);
     1596    tcg_gen_xor_i64(ret, arg1, arg2);
     1597    tcg_gen_not_i64(ret, ret);
    15181598}
    15191599
    15201600static inline void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
    15211601{
    1522     TCGv_i32 t0;
    1523     t0 = tcg_temp_new_i32();
    1524     tcg_gen_and_i32(t0, arg1, arg2);
    1525     tcg_gen_not_i32(ret, t0);
    1526     tcg_temp_free_i32(t0);
     1602    tcg_gen_and_i32(ret, arg1, arg2);
     1603    tcg_gen_not_i32(ret, ret);
    15271604}
    15281605
    15291606static inline void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
    15301607{
    1531     TCGv_i64 t0;
    1532     t0 = tcg_temp_new_i64();
    1533     tcg_gen_and_i64(t0, arg1, arg2);
    1534     tcg_gen_not_i64(ret, t0);
    1535     tcg_temp_free_i64(t0);
     1608    tcg_gen_and_i64(ret, arg1, arg2);
     1609    tcg_gen_not_i64(ret, ret);
    15361610}
    15371611
    15381612static inline void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
    15391613{
    1540     TCGv_i32 t0;
    1541     t0 = tcg_temp_new_i32();
    1542     tcg_gen_or_i32(t0, arg1, arg2);
    1543     tcg_gen_not_i32(ret, t0);
    1544     tcg_temp_free_i32(t0);
     1614    tcg_gen_or_i32(ret, arg1, arg2);
     1615    tcg_gen_not_i32(ret, ret);
    15451616}
    15461617
    15471618static inline void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
    15481619{
    1549     TCGv_i64 t0;
    1550     t0 = tcg_temp_new_i64();
    1551     tcg_gen_or_i64(t0, arg1, arg2);
    1552     tcg_gen_not_i64(ret, t0);
    1553     tcg_temp_free_i64(t0);
     1620    tcg_gen_or_i64(ret, arg1, arg2);
     1621    tcg_gen_not_i64(ret, ret);
    15541622}
    15551623
     
    15741642static inline void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
    15751643{
     1644#ifdef TCG_TARGET_HAS_rot_i32
     1645    tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, arg2);
     1646#else
    15761647    TCGv_i32 t0, t1;
    15771648
     
    15841655    tcg_temp_free_i32(t0);
    15851656    tcg_temp_free_i32(t1);
     1657#endif
    15861658}
    15871659
    15881660static inline void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
    15891661{
     1662#ifdef TCG_TARGET_HAS_rot_i64
     1663    tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2);
     1664#else
    15901665    TCGv_i64 t0, t1;
    15911666
     
    15981673    tcg_temp_free_i64(t0);
    15991674    tcg_temp_free_i64(t1);
     1675#endif
    16001676}
    16011677
     
    16061682        tcg_gen_mov_i32(ret, arg1);
    16071683    } else {
     1684#ifdef TCG_TARGET_HAS_rot_i32
     1685        TCGv_i32 t0 = tcg_const_i32(arg2);
     1686        tcg_gen_rotl_i32(ret, arg1, t0);
     1687        tcg_temp_free_i32(t0);
     1688#else
    16081689        TCGv_i32 t0, t1;
    16091690        t0 = tcg_temp_new_i32();
     
    16141695        tcg_temp_free_i32(t0);
    16151696        tcg_temp_free_i32(t1);
     1697#endif
    16161698    }
    16171699}
     
    16231705        tcg_gen_mov_i64(ret, arg1);
    16241706    } else {
     1707#ifdef TCG_TARGET_HAS_rot_i64
     1708        TCGv_i64 t0 = tcg_const_i64(arg2);
     1709        tcg_gen_rotl_i64(ret, arg1, t0);
     1710        tcg_temp_free_i64(t0);
     1711#else
    16251712        TCGv_i64 t0, t1;
    16261713        t0 = tcg_temp_new_i64();
     
    16311718        tcg_temp_free_i64(t0);
    16321719        tcg_temp_free_i64(t1);
     1720#endif
    16331721    }
    16341722}
     
    16361724static inline void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
    16371725{
     1726#ifdef TCG_TARGET_HAS_rot_i32
     1727    tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, arg2);
     1728#else
    16381729    TCGv_i32 t0, t1;
    16391730
     
    16461737    tcg_temp_free_i32(t0);
    16471738    tcg_temp_free_i32(t1);
     1739#endif
    16481740}
    16491741
    16501742static inline void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
    16511743{
     1744#ifdef TCG_TARGET_HAS_rot_i64
     1745    tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2);
     1746#else
    16521747    TCGv_i64 t0, t1;
    16531748
    16541749    t0 = tcg_temp_new_i64();
    16551750    t1 = tcg_temp_new_i64();
    1656     tcg_gen_shl_i64(t0, arg1, arg2);
     1751    tcg_gen_shr_i64(t0, arg1, arg2);
    16571752    tcg_gen_subfi_i64(t1, 64, arg2);
    16581753    tcg_gen_shl_i64(t1, arg1, t1);
     
    16601755    tcg_temp_free_i64(t0);
    16611756    tcg_temp_free_i64(t1);
     1757#endif
    16621758}
    16631759
     
    16991795#define tcg_gen_qemu_ldst_op_i64 tcg_gen_qemu_ldst_op_i64_i32
    17001796#define TCGV_UNUSED(x) TCGV_UNUSED_I32(x)
    1701 #define TCGV_EQUAL(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b))
     1797#define TCGV_EQUAL(a, b) TCGV_EQUAL_I32(a, b)
    17021798#else
    17031799#define TCGv TCGv_i64
     
    17101806#define tcg_gen_qemu_ldst_op_i64 tcg_gen_qemu_ldst_op_i64_i64
    17111807#define TCGV_UNUSED(x) TCGV_UNUSED_I64(x)
    1712 #define TCGV_EQUAL(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b))
     1808#define TCGV_EQUAL(a, b) TCGV_EQUAL_I64(a, b)
    17131809#endif
    17141810
     
    19562052#define tcg_gen_mul_tl tcg_gen_mul_i64
    19572053#define tcg_gen_muli_tl tcg_gen_muli_i64
     2054#define tcg_gen_div_tl tcg_gen_div_i64
     2055#define tcg_gen_rem_tl tcg_gen_rem_i64
     2056#define tcg_gen_divu_tl tcg_gen_divu_i64
     2057#define tcg_gen_remu_tl tcg_gen_remu_i64
    19582058#define tcg_gen_discard_tl tcg_gen_discard_i64
    19592059#define tcg_gen_trunc_tl_i32 tcg_gen_trunc_i64_i32
     
    19692069#define tcg_gen_ext32u_tl tcg_gen_ext32u_i64
    19702070#define tcg_gen_ext32s_tl tcg_gen_ext32s_i64
     2071#define tcg_gen_bswap16_tl tcg_gen_bswap16_i64
     2072#define tcg_gen_bswap32_tl tcg_gen_bswap32_i64
     2073#define tcg_gen_bswap64_tl tcg_gen_bswap64_i64
    19712074#define tcg_gen_concat_tl_i64 tcg_gen_concat32_i64
    19722075#define tcg_gen_andc_tl tcg_gen_andc_i64
     
    20192122#define tcg_gen_mul_tl tcg_gen_mul_i32
    20202123#define tcg_gen_muli_tl tcg_gen_muli_i32
     2124#define tcg_gen_div_tl tcg_gen_div_i32
     2125#define tcg_gen_rem_tl tcg_gen_rem_i32
     2126#define tcg_gen_divu_tl tcg_gen_divu_i32
     2127#define tcg_gen_remu_tl tcg_gen_remu_i32
    20212128#define tcg_gen_discard_tl tcg_gen_discard_i32
    20222129#define tcg_gen_trunc_tl_i32 tcg_gen_mov_i32
     
    20322139#define tcg_gen_ext32u_tl tcg_gen_mov_i32
    20332140#define tcg_gen_ext32s_tl tcg_gen_mov_i32
     2141#define tcg_gen_bswap16_tl tcg_gen_bswap16_i32
     2142#define tcg_gen_bswap32_tl tcg_gen_bswap32_i32
    20342143#define tcg_gen_concat_tl_i64 tcg_gen_concat_i32_i64
    20352144#define tcg_gen_andc_tl tcg_gen_andc_i32
  • trunk/src/recompiler/tcg/tcg-opc.h

    r36170 r36175  
    6868DEF2(or_i32, 1, 2, 0, 0)
    6969DEF2(xor_i32, 1, 2, 0, 0)
    70 /* shifts */
     70/* shifts/rotates */
    7171DEF2(shl_i32, 1, 2, 0, 0)
    7272DEF2(shr_i32, 1, 2, 0, 0)
    7373DEF2(sar_i32, 1, 2, 0, 0)
     74#ifdef TCG_TARGET_HAS_rot_i32
     75DEF2(rotl_i32, 1, 2, 0, 0)
     76DEF2(rotr_i32, 1, 2, 0, 0)
     77#endif
    7478
    7579DEF2(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS)
     
    8690DEF2(ext16s_i32, 1, 1, 0, 0)
    8791#endif
    88 #ifdef TCG_TARGET_HAS_bswap_i32
    89 DEF2(bswap_i32, 1, 1, 0, 0)
     92#ifdef TCG_TARGET_HAS_bswap16_i32
     93DEF2(bswap16_i32, 1, 1, 0, 0)
     94#endif
     95#ifdef TCG_TARGET_HAS_bswap32_i32
     96DEF2(bswap32_i32, 1, 1, 0, 0)
     97#endif
     98#ifdef TCG_TARGET_HAS_not_i32
     99DEF2(not_i32, 1, 1, 0, 0)
     100#endif
     101#ifdef TCG_TARGET_HAS_neg_i32
     102DEF2(neg_i32, 1, 1, 0, 0)
    90103#endif
    91104
     
    121134DEF2(or_i64, 1, 2, 0, 0)
    122135DEF2(xor_i64, 1, 2, 0, 0)
    123 /* shifts */
     136/* shifts/rotates */
    124137DEF2(shl_i64, 1, 2, 0, 0)
    125138DEF2(shr_i64, 1, 2, 0, 0)
    126139DEF2(sar_i64, 1, 2, 0, 0)
     140#ifdef TCG_TARGET_HAS_rot_i64
     141DEF2(rotl_i64, 1, 2, 0, 0)
     142DEF2(rotr_i64, 1, 2, 0, 0)
     143#endif
    127144
    128145DEF2(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS)
     
    136153DEF2(ext32s_i64, 1, 1, 0, 0)
    137154#endif
    138 #ifdef TCG_TARGET_HAS_bswap_i64
    139 DEF2(bswap_i64, 1, 1, 0, 0)
    140 #endif
    141 #endif
    142 #ifdef TCG_TARGET_HAS_neg_i32
    143 DEF2(neg_i32, 1, 1, 0, 0)
     155#ifdef TCG_TARGET_HAS_bswap16_i64
     156DEF2(bswap16_i64, 1, 1, 0, 0)
     157#endif
     158#ifdef TCG_TARGET_HAS_bswap32_i64
     159DEF2(bswap32_i64, 1, 1, 0, 0)
     160#endif
     161#ifdef TCG_TARGET_HAS_bswap64_i64
     162DEF2(bswap64_i64, 1, 1, 0, 0)
     163#endif
     164#ifdef TCG_TARGET_HAS_not_i64
     165DEF2(not_i64, 1, 1, 0, 0)
    144166#endif
    145167#ifdef TCG_TARGET_HAS_neg_i64
    146168DEF2(neg_i64, 1, 1, 0, 0)
     169#endif
    147170#endif
    148171
  • trunk/src/recompiler/tcg/tcg.c

    r36170 r36175  
    2323 */
    2424
     25/* define it to use liveness analysis (better code) */
     26#define USE_LIVENESS_ANALYSIS
     27
     28#include "config.h"
     29
     30#ifndef DEBUG_TCG
    2531/* define it to suppress various consistency checks (faster) */
    2632#define NDEBUG
    27 
    28 /* define it to use liveness analysis (better code) */
    29 #define USE_LIVENESS_ANALYSIS
     33#endif
    3034
    3135#ifndef VBOX
    32 #include <assert.h>
    3336#include <stdarg.h>
    3437#include <stdlib.h>
     
    3639#include <string.h>
    3740#include <inttypes.h>
     41#ifdef _WIN32
     42#include <malloc.h>
     43#endif
     44#ifdef _AIX
     45#include <alloca.h>
     46#endif
    3847#else  /* VBOX */
    3948# include <stdio.h>
    4049# include "osdep.h"
    4150#endif /* VBOX */
    42 #ifdef _WIN32
    43 #include <malloc.h>
    44 #endif
    45 #ifdef _AIX
    46 #include <alloca.h>
    47 #endif
    48 
    49 #include "config.h"
     51
    5052#include "qemu-common.h"
    5153#include "cache-utils.h"
     
    11571159                    }
    11581160
    1159                     /* globals are live (they may be used by the call) */
    1160                     memset(dead_temps, 0, s->nb_globals);
     1161                    if (!(call_flags & TCG_CALL_CONST)) {
     1162                        /* globals are live (they may be used by the call) */
     1163                        memset(dead_temps, 0, s->nb_globals);
     1164                    }
    11611165
    11621166                    /* input args are live */
     
    18601864    /* store globals and free associated registers (we assume the call
    18611865       can modify any global. */
    1862     save_globals(s, allocated_regs);
     1866    if (!(flags & TCG_CALL_CONST)) {
     1867        save_globals(s, allocated_regs);
     1868    }
    18631869
    18641870    tcg_out_op(s, opc, &func_arg, &const_func_arg);
  • trunk/src/recompiler/tcg/tcg.h

    r36170 r36175  
    2222 * THE SOFTWARE.
    2323 */
     24#include "qemu-common.h"
    2425#include "tcg-target.h"
    2526
     
    121122 */
    122123
    123 //#define DEBUG_TCGV 1
     124#ifdef DEBUG_TCG
     125#define DEBUG_TCGV 1
     126#endif
    124127
    125128#ifdef DEBUG_TCGV
     
    154157#define GET_TCGV_I32(t) (t)
    155158#define GET_TCGV_I64(t) (t)
     159
    156160#if TCG_TARGET_REG_BITS == 32
    157161#define TCGV_LOW(t) (t)
     
    160164
    161165#endif /* DEBUG_TCGV */
     166
     167#define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b))
     168#define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b))
    162169
    163170/* Dummy definition to avoid compiler warnings.  */
     
    171178#define TCG_CALL_TYPE_REGPARM_2 0x0002 /* i386 style regparm call (2 regs) */
    172179#define TCG_CALL_TYPE_REGPARM   0x0003 /* i386 style regparm call (3 regs) */
    173 /* A pure function only reads its arguments and globals variables and
    174    cannot raise exceptions. Hence a call to a pure function can be
     180/* A pure function only reads its arguments and TCG global variables
     181   and cannot raise exceptions. Hence a call to a pure function can be
    175182   safely suppressed if the return value is not used. */
    176183#define TCG_CALL_PURE           0x0010
     184/* A const function only reads its arguments and does not use TCG
     185   global variables. Hence a call to such a function does not
     186   save TCG global variables back to their canonical location. */
     187#define TCG_CALL_CONST          0x0020
    177188
    178189/* used to align parameters */
     
    482493#define tcg_qemu_tb_exec(tb_ptr) ((long REGPARM (*)(void *))code_gen_prologue)(tb_ptr)
    483494# endif /* !VBOX || !GCC_WITH_BUGGY_REG_PARAM */
    484 
    485 #endif
     495#endif
  • trunk/src/recompiler/tcg/x86_64/tcg-target.c

    r36170 r36175  
    4545
    4646static const int tcg_target_reg_alloc_order[] = {
    47     TCG_REG_RDI,
    48     TCG_REG_RSI,
    49     TCG_REG_RDX,
    50     TCG_REG_RCX,
    51     TCG_REG_R8,
    52     TCG_REG_R9,
    53     TCG_REG_RAX,
    54     TCG_REG_R10,
    55     TCG_REG_R11,
    56 
    5747    TCG_REG_RBP,
    5848    TCG_REG_RBX,
     
    6151    TCG_REG_R14,
    6252    TCG_REG_R15,
     53    TCG_REG_R10,
     54    TCG_REG_R11,
     55    TCG_REG_R9,
     56    TCG_REG_R8,
     57    TCG_REG_RCX,
     58    TCG_REG_RDX,
     59    TCG_REG_RSI,
     60    TCG_REG_RDI,
     61    TCG_REG_RAX,
    6362};
    6463
     
    195194#define ARITH_CMP 7
    196195
     196#define SHIFT_ROL 0
     197#define SHIFT_ROR 1
    197198#define SHIFT_SHL 4
    198199#define SHIFT_SHR 5
     
    244245    if (opc & P_EXT)
    245246        tcg_out8(s, 0x0f);
    246     tcg_out8(s, opc);
     247    tcg_out8(s, opc & 0xff);
    247248}
    248249
     
    12021203        c = SHIFT_SAR;
    12031204        goto gen_shift32;
     1205    case INDEX_op_rotl_i32:
     1206        c = SHIFT_ROL;
     1207        goto gen_shift32;
     1208    case INDEX_op_rotr_i32:
     1209        c = SHIFT_ROR;
     1210        goto gen_shift32;
    12041211
    12051212    case INDEX_op_shl_i64:
     
    12231230        c = SHIFT_SAR;
    12241231        goto gen_shift64;
     1232    case INDEX_op_rotl_i64:
     1233        c = SHIFT_ROL;
     1234        goto gen_shift64;
     1235    case INDEX_op_rotr_i64:
     1236        c = SHIFT_ROR;
     1237        goto gen_shift64;
    12251238
    12261239    case INDEX_op_brcond_i32:
     
    12331246        break;
    12341247
    1235     case INDEX_op_bswap_i32:
     1248    case INDEX_op_bswap16_i32:
     1249    case INDEX_op_bswap16_i64:
     1250        tcg_out8(s, 0x66);
     1251        tcg_out_modrm(s, 0xc1, SHIFT_ROL, args[0]);
     1252        tcg_out8(s, 8);
     1253        break;
     1254    case INDEX_op_bswap32_i32:
     1255    case INDEX_op_bswap32_i64:
    12361256        tcg_out_opc(s, (0xc8 + (args[0] & 7)) | P_EXT, 0, args[0], 0);
    12371257        break;
    1238     case INDEX_op_bswap_i64:
     1258    case INDEX_op_bswap64_i64:
    12391259        tcg_out_opc(s, (0xc8 + (args[0] & 7)) | P_EXT | P_REXW, 0, args[0], 0);
    12401260        break;
     
    12451265    case INDEX_op_neg_i64:
    12461266        tcg_out_modrm(s, 0xf7 | P_REXW, 3, args[0]);
     1267        break;
     1268
     1269    case INDEX_op_not_i32:
     1270        tcg_out_modrm(s, 0xf7, 2, args[0]);
     1271        break;
     1272    case INDEX_op_not_i64:
     1273        tcg_out_modrm(s, 0xf7 | P_REXW, 2, args[0]);
    12471274        break;
    12481275
     
    13831410    { INDEX_op_shr_i32, { "r", "0", "ci" } },
    13841411    { INDEX_op_sar_i32, { "r", "0", "ci" } },
     1412    { INDEX_op_rotl_i32, { "r", "0", "ci" } },
     1413    { INDEX_op_rotr_i32, { "r", "0", "ci" } },
    13851414
    13861415    { INDEX_op_brcond_i32, { "r", "ri" } },
     
    14121441    { INDEX_op_shr_i64, { "r", "0", "ci" } },
    14131442    { INDEX_op_sar_i64, { "r", "0", "ci" } },
     1443    { INDEX_op_rotl_i64, { "r", "0", "ci" } },
     1444    { INDEX_op_rotr_i64, { "r", "0", "ci" } },
    14141445
    14151446    { INDEX_op_brcond_i64, { "r", "re" } },
    14161447
    1417     { INDEX_op_bswap_i32, { "r", "0" } },
    1418     { INDEX_op_bswap_i64, { "r", "0" } },
     1448    { INDEX_op_bswap16_i32, { "r", "0" } },
     1449    { INDEX_op_bswap16_i64, { "r", "0" } },
     1450    { INDEX_op_bswap32_i32, { "r", "0" } },
     1451    { INDEX_op_bswap32_i64, { "r", "0" } },
     1452    { INDEX_op_bswap64_i64, { "r", "0" } },
    14191453
    14201454    { INDEX_op_neg_i32, { "r", "0" } },
    14211455    { INDEX_op_neg_i64, { "r", "0" } },
     1456
     1457    { INDEX_op_not_i32, { "r", "0" } },
     1458    { INDEX_op_not_i64, { "r", "0" } },
    14221459
    14231460    { INDEX_op_ext8s_i32, { "r", "r"} },
  • trunk/src/recompiler/tcg/x86_64/tcg-target.h

    r36140 r36175  
    2222 * THE SOFTWARE.
    2323 */
    24 
    2524#define TCG_TARGET_X86_64 1
    2625
     
    5857
    5958/* optional instructions */
    60 #define TCG_TARGET_HAS_bswap_i32
    61 #define TCG_TARGET_HAS_bswap_i64
     59#define TCG_TARGET_HAS_bswap16_i32
     60#define TCG_TARGET_HAS_bswap16_i64
     61#define TCG_TARGET_HAS_bswap32_i32
     62#define TCG_TARGET_HAS_bswap32_i64
     63#define TCG_TARGET_HAS_bswap64_i64
    6264#define TCG_TARGET_HAS_neg_i32
    6365#define TCG_TARGET_HAS_neg_i64
     66#define TCG_TARGET_HAS_not_i32
     67#define TCG_TARGET_HAS_not_i64
    6468#define TCG_TARGET_HAS_ext8s_i32
    6569#define TCG_TARGET_HAS_ext16s_i32
     
    6771#define TCG_TARGET_HAS_ext16s_i64
    6872#define TCG_TARGET_HAS_ext32s_i64
     73#define TCG_TARGET_HAS_rot_i32
     74#define TCG_TARGET_HAS_rot_i64
    6975
    7076/* Note: must be synced with dyngen-exec.h */
     
    7278#define TCG_AREG1 TCG_REG_R15
    7379#define TCG_AREG2 TCG_REG_R12
    74 #define TCG_AREG3 TCG_REG_R13
    7580
    7681static inline void flush_icache_range(unsigned long start, unsigned long stop)
  • trunk/src/recompiler/tests/Makefile

    r36140 r36175  
    7979
    8080# NOTE: -fomit-frame-pointer is currently needed : this is a bug in libqemu
    81 qruncom: qruncom.c ../i386-user/libqemu.a
     81qruncom: qruncom.c ../ioport-user.c ../i386-user/libqemu.a
    8282        $(CC) $(CFLAGS) -fomit-frame-pointer $(LDFLAGS) -I../target-i386 -I.. -I../i386-user -I../fpu \
    83               -o $@ $< -L../i386-user -lqemu -lm
     83              -o $@ $(filter %.c, $^) -L../i386-user -lqemu -lm
    8484
    8585# arm test
  • trunk/src/recompiler/tests/linux-test.c

    r36170 r36175  
    1515 *
    1616 *  You should have received a copy of the GNU General Public License
    17  *  along with this program; if not, write to the Free Software
    18  *  Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
    19  *  MA 02110-1301, USA.
     17 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
    2018 */
    2119
     
    7674}
    7775
    78 #define error(fmt, args...) error1(__FILE__, __LINE__, fmt, ##args)
     76#define error(fmt, ...) error1(__FILE__, __LINE__, fmt, ## __VA_ARGS__)
    7977
    8078#define chk_error(ret) __chk_error(__FILE__, __LINE__, (ret))
  • trunk/src/recompiler/tests/qruncom.c

    r36170 r36175  
    1616
    1717//#define SIGTEST
    18 
    19 void cpu_outb(CPUState *env, int addr, int val)
    20 {
    21     fprintf(stderr, "outb: port=0x%04x, data=%02x\n", addr, val);
    22 }
    23 
    24 void cpu_outw(CPUState *env, int addr, int val)
    25 {
    26     fprintf(stderr, "outw: port=0x%04x, data=%04x\n", addr, val);
    27 }
    28 
    29 void cpu_outl(CPUState *env, int addr, int val)
    30 {
    31     fprintf(stderr, "outl: port=0x%04x, data=%08x\n", addr, val);
    32 }
    33 
    34 int cpu_inb(CPUState *env, int addr)
    35 {
    36     fprintf(stderr, "inb: port=0x%04x\n", addr);
    37     return 0;
    38 }
    39 
    40 int cpu_inw(CPUState *env, int addr)
    41 {
    42     fprintf(stderr, "inw: port=0x%04x\n", addr);
    43     return 0;
    44 }
    45 
    46 int cpu_inl(CPUState *env, int addr)
    47 {
    48     fprintf(stderr, "inl: port=0x%04x\n", addr);
    49     return 0;
    50 }
    5118
    5219int cpu_get_pic_interrupt(CPUState *env)
  • trunk/src/recompiler/tests/test-i386.c

    r36170 r36175  
    1515 *
    1616 *  You should have received a copy of the GNU General Public License
    17  *  along with this program; if not, write to the Free Software
    18  *  Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
    19  *  MA 02110-1301, USA.
     17 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
    2018 */
    2119
  • trunk/src/recompiler/tests/test-mmap.c

    r36170 r36175  
    2020 *
    2121 * You should have received a copy of the GNU General Public License
    22  * along with this program; if not, write to the Free Software
    23  * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
    24  *  MA 02110-1301, USA.
     22 * along with this program; if not, see <http://www.gnu.org/licenses/>.
    2523 */
    2624
  • trunk/src/recompiler/translate-all.c

    r36170 r36175  
    1515 *
    1616 * You should have received a copy of the GNU Lesser General Public
    17  * License along with this library; if not, write to the Free Software
    18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
    1918 */
    2019
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette