- Timestamp:
- Jun 29, 2011 7:07:14 AM (13 years ago)
- Location:
- trunk/src/recompiler
- Files:
-
- 3 added
- 3 deleted
- 47 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler/Makefile.kmk
r36768 r37675 19 19 SUB_DEPTH = ../.. 20 20 include $(KBUILD_PATH)/subheader.kmk 21 22 ifn1of ($(KBUILD_TARGET).$(KBUILD_TARGET_ARCH), darwin.x86 darwin.amd64 linux.amd64)23 include $(PATH_SUB_CURRENT)/Makefile-old.kmk24 else # new stuff25 21 26 22 # … … 63 59 VBoxRemPrimary_DEFS += IN_REM_R3 REM_INCLUDE_CPU_H NEED_CPU_H 64 60 #VBoxRemPrimary_DEFS += REM_PHYS_ADDR_IN_TLB 65 #VBoxRemPrimary_DEFS += DEBUG_ALL_LOGGING DEBUG_DISAS DEBUG_PCALL DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB # Enables huge amounts of debug logging. 66 #VBoxRemPrimary_DEFS += DEBUG_TMP_LOGGING # log qemu parts to "/tmp/vbox-qemu.log" - does not work with VBoxREM2. 61 #VBoxRemPrimary_DEFS += DEBUG_ALL_LOGGING DEBUG_DISAS DEBUG_PCALL CONFIG_DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB # Enables huge amounts of debug logging. 67 62 ifdef IEM_VERIFICATION_MODE 68 63 VBoxRemPrimary_DEFS += IEM_VERIFICATION_MODE … … 70 65 VBoxRemPrimary_DEFS.linux = _GNU_SOURCE 71 66 ifdef VBOX_SOLARIS_10 72 VBoxRemPrimary_DEFS.solaris = HOST_SOLARIS=1073 else 74 VBoxRemPrimary_DEFS.solaris = HOST_SOLARIS=1167 VBoxRemPrimary_DEFS.solaris = CONFIG_SOLARIS_VERSION=10 68 else 69 VBoxRemPrimary_DEFS.solaris = CONFIG_SOLARIS_VERSION=11 75 70 endif 76 71 VBoxRemPrimary_DEFS.freebsd += _BSD … … 101 96 host-utils.c \ 102 97 cutils.c \ 98 tcg-runtime.c \ 103 99 tcg/tcg.c \ 104 100 tcg/tcg-dyngen.c \ 105 tcg/tcg-runtime.c \106 101 fpu/softfloat-native.c \ 107 102 target-i386/op_helper.c \ … … 273 268 274 269 275 endif # new stuff276 270 include $(KBUILD_PATH)/subfooter.kmk 277 271 -
trunk/src/recompiler/Sun/config-host.h
r36170 r37675 38 38 # endif 39 39 #endif 40 #define QEMU_VERSION "0. 8.1"40 #define QEMU_VERSION "0.12.5" 41 41 #define CONFIG_UNAME_RELEASE "" 42 42 #define CONFIG_QEMU_SHAREDIR "." -
trunk/src/recompiler/Sun/kvm.h
r36211 r37675 23 23 #define kvm_set_phys_mem(a, b, c) AssertFailed() 24 24 #define kvm_arch_get_registers(a) AssertFailed() 25 #define cpu_synchronize_state(a) do { } while (0) 25 26 26 27 #endif -
trunk/src/recompiler/VBoxRecompiler.c
r36811 r37675 310 310 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features); 311 311 312 cpu_reset(&pVM->rem.s.Env); 313 312 314 /* allocate code buffer for single instruction emulation. */ 313 315 pVM->rem.s.Env.cbCodeBuffer = 4096; … … 315 317 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY); 316 318 317 /* finally, set the cpu_single_env global. */319 /* Finally, set the cpu_single_env global. */ 318 320 cpu_single_env = &pVM->rem.s.Env; 319 321 … … 416 418 #ifdef DEBUG_ALL_LOGGING 417 419 loglevel = ~0; 418 # ifdef DEBUG_TMP_LOGGING 419 logfile = fopen("/tmp/vbox-qemu.log", "w"); 420 # endif 421 #endif 420 #endif 421 //loglevel = CPU_LOG_EXEC | CPU_LOG_INT | CPU_LOG_PCALL | CPU_LOG_TB_CPU; /// DONT COMMIT ME 422 422 423 423 /* … … 999 999 CPUBreakpoint *pBP; 1000 1000 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base; 1001 TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)1001 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry) 1002 1002 if (pBP->pc == GCPtrPC) 1003 1003 break; … … 1182 1182 CPUBreakpoint *pBP; 1183 1183 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base; 1184 TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)1184 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry) 1185 1185 if (pBP->pc == GCPtrPC) 1186 1186 break; … … 1340 1340 CPUBreakpoint *pBP; 1341 1341 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base; 1342 TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)1342 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry) 1343 1343 if (pBP->pc == GCPtrPC) 1344 1344 break; … … 1531 1531 } 1532 1532 1533 if (! TAILQ_EMPTY(&env->breakpoints))1533 if (!QTAILQ_EMPTY(&env->breakpoints)) 1534 1534 { 1535 1535 //Log2(("raw mode refused: Breakpoints\n")); … … 1537 1537 } 1538 1538 1539 if (! TAILQ_EMPTY(&env->watchpoints))1539 if (!QTAILQ_EMPTY(&env->watchpoints)) 1540 1540 { 1541 1541 //Log2(("raw mode refused: Watchpoints\n")); … … 4002 4002 void disas(FILE *phFile, void *pvCode, unsigned long cb) 4003 4003 { 4004 #ifdef DEBUG_TMP_LOGGING4005 # define DISAS_PRINTF(x...) fprintf(phFile, x)4006 #else4007 # define DISAS_PRINTF(x...) RTLogPrintf(x)4008 4004 if (LogIs2Enabled()) 4009 #endif4010 4005 { 4011 4006 unsigned off = 0; … … 4020 4015 #endif 4021 4016 4022 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);4017 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb); 4023 4018 while (off < cb) 4024 4019 { 4025 4020 uint32_t cbInstr; 4026 4021 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput))) 4027 DISAS_PRINTF("%s", szOutput);4022 RTLogPrintf("%s", szOutput); 4028 4023 else 4029 4024 { 4030 DISAS_PRINTF("disas error\n");4025 RTLogPrintf("disas error\n"); 4031 4026 cbInstr = 1; 4032 4027 #ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */ … … 4037 4032 } 4038 4033 } 4039 4040 #undef DISAS_PRINTF4041 4034 } 4042 4035 … … 4052 4045 void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags) 4053 4046 { 4054 #ifdef DEBUG_TMP_LOGGING4055 # define DISAS_PRINTF(x...) fprintf(phFile, x)4056 #else4057 # define DISAS_PRINTF(x...) RTLogPrintf(x)4058 4047 if (LogIs2Enabled()) 4059 #endif4060 4048 { 4061 4049 PVM pVM = cpu_single_env->pVM; … … 4074 4062 * Do the disassembling. 4075 4063 */ 4076 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);4064 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags); 4077 4065 cs = cpu_single_env->segs[R_CS].selector; 4078 4066 eip = uCode - cpu_single_env->segs[R_CS].base; … … 4089 4077 &cbInstr); 4090 4078 if (RT_SUCCESS(rc)) 4091 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);4079 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf); 4092 4080 else 4093 4081 { 4094 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);4082 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf); 4095 4083 cbInstr = 1; 4096 4084 } … … 4104 4092 } 4105 4093 } 4106 #undef DISAS_PRINTF4107 4094 } 4108 4095 -
trunk/src/recompiler/bswap.h
r36175 r37675 6 6 #include <inttypes.h> 7 7 8 #ifdef HAVE_MACHINE_BSWAP_H8 #ifdef CONFIG_MACHINE_BSWAP_H 9 9 #include <sys/endian.h> 10 10 #include <sys/types.h> … … 12 12 #else 13 13 14 #ifdef HAVE_BYTESWAP_H14 #ifdef CONFIG_BYTESWAP_H 15 15 #include <byteswap.h> 16 16 #else … … 48 48 }) 49 49 50 #endif /* ! HAVE_BYTESWAP_H */50 #endif /* !CONFIG_BYTESWAP_H */ 51 51 52 52 static inline uint16_t bswap16(uint16_t x) … … 65 65 } 66 66 67 #endif /* ! HAVE_MACHINE_BSWAP_H */67 #endif /* ! CONFIG_MACHINE_BSWAP_H */ 68 68 69 69 static inline void bswap16s(uint16_t *s) … … 82 82 } 83 83 84 #if defined( WORDS_BIGENDIAN)84 #if defined(HOST_WORDS_BIGENDIAN) 85 85 #define be_bswap(v, size) (v) 86 86 #define le_bswap(v, size) bswap ## size(v) … … 204 204 #endif 205 205 206 #ifdef WORDS_BIGENDIAN206 #ifdef HOST_WORDS_BIGENDIAN 207 207 #define cpu_to_32wu cpu_to_be32wu 208 208 #else -
trunk/src/recompiler/cpu-all.h
r36175 r37675 45 45 * memory accesses. 46 46 * 47 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and47 * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and 48 48 * otherwise little endian. 49 49 * … … 55 55 #include "softfloat.h" 56 56 57 #if defined( WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)57 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 58 58 #define BSWAP_NEEDED 59 59 #endif … … 141 141 typedef union { 142 142 float64 d; 143 #if defined( WORDS_BIGENDIAN) \143 #if defined(HOST_WORDS_BIGENDIAN) \ 144 144 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) 145 145 struct { … … 159 159 typedef union { 160 160 float128 q; 161 #if defined( WORDS_BIGENDIAN) \161 #if defined(HOST_WORDS_BIGENDIAN) \ 162 162 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) 163 163 struct { … … 240 240 void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val); 241 241 242 # ifndef REM_PHYS_ADDR_IN_TLB242 # ifndef REM_PHYS_ADDR_IN_TLB 243 243 void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable); 244 # endif244 # endif 245 245 246 246 #endif /* VBOX */ … … 308 308 } 309 309 310 # undef VBOX_CHECK_ADDR310 # undef VBOX_CHECK_ADDR 311 311 312 312 /* float access */ … … 368 368 kernel handles unaligned load/stores may give better results, but 369 369 it is a system wide setting : bad */ 370 #if defined( WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)370 #if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) 371 371 372 372 /* conservative code for little endian unaligned accesses */ … … 546 546 #endif /* !VBOX */ 547 547 548 #if !defined( WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)548 #if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) 549 549 550 550 static inline int lduw_be_p(const void *ptr) … … 772 772 * This allows the guest address space to be offset to a convenient location. 773 773 */ 774 //#define GUEST_BASE 0x20000000 775 #define GUEST_BASE 0 774 #if defined(CONFIG_USE_GUEST_BASE) 775 extern unsigned long guest_base; 776 extern int have_guest_base; 777 #define GUEST_BASE guest_base 778 #else 779 #define GUEST_BASE 0ul 780 #endif 776 781 777 782 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ … … 1042 1047 #define VGA_DIRTY_FLAG 0x01 1043 1048 #define CODE_DIRTY_FLAG 0x02 1044 #define KQEMU_DIRTY_FLAG 0x041045 1049 #define MIGRATION_DIRTY_FLAG 0x08 1046 1050 … … 1211 1215 } 1212 1216 1213 #elif defined(__mips__) 1217 #elif defined(__mips__) && \ 1218 ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__)) 1219 /* 1220 * binutils wants to use rdhwr only on mips32r2 1221 * but as linux kernel emulate it, it's fine 1222 * to use it. 1223 * 1224 */ 1225 #define MIPS_RDHWR(rd, value) { \ 1226 __asm__ __volatile__ ( \ 1227 ".set push\n\t" \ 1228 ".set mips32r2\n\t" \ 1229 "rdhwr %0, "rd"\n\t" \ 1230 ".set pop" \ 1231 : "=r" (value)); \ 1232 } 1214 1233 1215 1234 static inline int64_t cpu_get_real_ticks(void) 1216 1235 { 1217 #if __mips_isa_rev >= 2 1236 /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */ 1218 1237 uint32_t count; 1219 1238 static uint32_t cyc_per_count = 0; 1220 1239 1221 1240 if (!cyc_per_count) 1222 __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));1223 1224 __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));1241 MIPS_RDHWR("$3", cyc_per_count); 1242 1243 MIPS_RDHWR("$2", count); 1225 1244 return (int64_t)(count * cyc_per_count); 1226 #else1227 /* FIXME */1228 static int64_t ticks = 0;1229 return ticks++;1230 #endif1231 1245 } 1232 1246 … … 1249 1263 } 1250 1264 1251 extern int64_t kqemu_time, kqemu_time_start;1252 1265 extern int64_t qemu_time, qemu_time_start; 1253 1266 extern int64_t tlb_flush_time; 1254 extern int64_t kqemu_exec_count;1255 1267 extern int64_t dev_time; 1256 extern int64_t kqemu_ret_int_count;1257 extern int64_t kqemu_ret_excp_count;1258 extern int64_t kqemu_ret_intr_count;1259 1268 #endif 1260 1269 -
trunk/src/recompiler/cpu-common.h
r36175 r37675 11 11 12 12 /* address in the RAM (different from a physical address) */ 13 #ifdef CONFIG_KQEMU14 /* FIXME: This is wrong. */15 typedef uint32_t ram_addr_t;16 #else17 13 typedef unsigned long ram_addr_t; 18 #endif19 14 20 15 /* memory API */ … … 42 37 ram_addr_t qemu_ram_addr_from_host(void *ptr); 43 38 44 int cpu_register_io_memory(CPUReadMemoryFunc * *mem_read,45 CPUWriteMemoryFunc * *mem_write,39 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read, 40 CPUWriteMemoryFunc * const *mem_write, 46 41 void *opaque); 47 42 void cpu_unregister_io_memory(int table_address); -
trunk/src/recompiler/cpu-defs.h
r36177 r37675 41 41 #endif 42 42 #include "osdep.h" 43 #include " sys-queue.h"43 #include "qemu-queue.h" 44 44 #include "targphys.h" 45 45 … … 125 125 } CPUTLBEntry; 126 126 127 #ifdef WORDS_BIGENDIAN127 #ifdef HOST_WORDS_BIGENDIAN 128 128 typedef struct icount_decr_u16 { 129 129 uint16_t high; … … 143 143 target_ulong pc; 144 144 int flags; /* BP_* */ 145 TAILQ_ENTRY(CPUBreakpoint) entry;145 QTAILQ_ENTRY(CPUBreakpoint) entry; 146 146 } CPUBreakpoint; 147 147 … … 150 150 target_ulong len_mask; 151 151 int flags; /* BP_* */ 152 TAILQ_ENTRY(CPUWatchpoint) entry;152 QTAILQ_ENTRY(CPUWatchpoint) entry; 153 153 } CPUWatchpoint; 154 154 … … 171 171 /* The meaning of the MMU modes is defined in the target code. */ \ 172 172 CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ 173 target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; 173 target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ 174 174 /** addends for HVA -> GPA translations */ \ 175 175 VBOX_ONLY(target_phys_addr_t phys_addends[NB_MMU_MODES][CPU_TLB_SIZE]); \ … … 190 190 /* from this point: preserved by CPU reset */ \ 191 191 /* ice debug support */ \ 192 TAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \192 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \ 193 193 int singlestep_enabled; \ 194 194 \ 195 TAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \195 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \ 196 196 CPUWatchpoint *watchpoint_hit; \ 197 197 \ … … 206 206 uint32_t host_tid; /* host thread ID */ \ 207 207 int numa_node; /* NUMA node this cpu is belonging to */ \ 208 int nr_cores; /* number of cores within this CPU package */ \ 209 int nr_threads;/* number of threads within this CPU */ \ 208 210 int running; /* Nonzero if cpu is currently running(usermode). */ \ 209 211 /* user data */ \ -
trunk/src/recompiler/cpu-exec.c
r36768 r37675 49 49 #endif 50 50 51 #if defined(__sparc__) && !defined( HOST_SOLARIS)51 #if defined(__sparc__) && !defined(CONFIG_SOLARIS) 52 52 // Work around ugly bugs in glibc that mangle global register contents 53 53 #undef env … … 57 57 int tb_invalidated_flag; 58 58 59 //#define DEBUG_EXEC59 //#define CONFIG_DEBUG_EXEC 60 60 //#define DEBUG_SIGNAL 61 61 … … 217 217 218 218 if (!env->watchpoint_hit) 219 TAILQ_FOREACH(wp, &env->watchpoints, entry)219 QTAILQ_FOREACH(wp, &env->watchpoints, entry) 220 220 wp->flags &= ~BP_WATCHPOINT_HIT; 221 221 … … 425 425 RAWEx_ProfileStop(env, STATS_RAW_CHECK); 426 426 427 { 428 RTGCPTR mypc = env->eip + env->segs[R_CS].base; 429 if (mypc == 0x00fe0d2 || mypc == 0x00f19e9 || mypc == 0x000f0827 || mypc == 0x000fe090) { 430 RTLogFlags(NULL, "enabled"); 431 loglevel = ~0; 432 Log(("BANG CRASH!\n")); 433 } 434 } 435 #ifdef CONFIG_DEBUG_EXEC 436 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { 437 /* restore flags in standard format */ 438 regs_to_env(); 439 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); 440 log_cpu_state(env, X86_DUMP_CCOP); 441 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 442 } 443 #endif 427 444 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP); 428 445 spin_lock(&tb_lock); … … 437 454 tb_invalidated_flag = 0; 438 455 } 456 #ifdef CONFIG_DEBUG_EXEC 457 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s [sp=%RGv, bp=%RGv\n", 458 (long)tb->tc_ptr, tb->pc, lookup_symbol(tb->pc), (RTGCPTR)env->regs[R_ESP], (RTGCPTR)env->regs[R_EBP]); 459 #endif 460 439 461 440 462 /* see if we can patch the calling TB. When the TB … … 562 584 env_to_regs(); 563 585 #if defined(TARGET_I386) 564 /* put eflags in CPU temporary format */ 565 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 566 DF = 1 - (2 * ((env->eflags >> 10) & 1)); 567 CC_OP = CC_OP_EFLAGS; 568 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 586 if (!kvm_enabled()) { 587 /* put eflags in CPU temporary format */ 588 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 589 DF = 1 - (2 * ((env->eflags >> 10) & 1)); 590 CC_OP = CC_OP_EFLAGS; 591 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 592 } 569 593 #elif defined(TARGET_SPARC) 570 594 #elif defined(TARGET_M68K) … … 579 603 #elif defined(TARGET_SH4) 580 604 #elif defined(TARGET_CRIS) 605 #elif defined(TARGET_S390X) 581 606 /* XXXXX */ 582 607 #else … … 588 613 for(;;) { 589 614 if (setjmp(env->jmp_env) == 0) { 590 #if defined(__sparc__) && !defined( HOST_SOLARIS)615 #if defined(__sparc__) && !defined(CONFIG_SOLARIS) 591 616 #undef env 592 617 env = cpu_single_env; … … 651 676 env->exception_index = -1; 652 677 } 653 #ifdef CONFIG_KQEMU654 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {655 int ret;656 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);657 ret = kqemu_cpu_exec(env);658 /* put eflags in CPU temporary format */659 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);660 DF = 1 - (2 * ((env->eflags >> 10) & 1));661 CC_OP = CC_OP_EFLAGS;662 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);663 if (ret == 1) {664 /* exception */665 longjmp(env->jmp_env, 1);666 } else if (ret == 2) {667 /* softmmu execution needed */668 } else {669 if (env->interrupt_request != 0 || env->exit_request != 0) {670 /* hardware interrupt will be executed just after */671 } else {672 /* otherwise, we restart */673 longjmp(env->jmp_env, 1);674 }675 }676 }677 #endif678 678 679 679 if (kvm_enabled()) { … … 744 744 intno = cpu_get_pic_interrupt(env); 745 745 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno); 746 #if defined(__sparc__) && !defined( HOST_SOLARIS)746 #if defined(__sparc__) && !defined(CONFIG_SOLARIS) 747 747 #undef env 748 748 env = cpu_single_env; … … 771 771 #if 0 772 772 if ((interrupt_request & CPU_INTERRUPT_RESET)) { 773 cpu_ ppc_reset(env);773 cpu_reset(env); 774 774 } 775 775 #endif … … 815 815 do_interrupt(env); 816 816 env->interrupt_index = 0; 817 #if !defined(CONFIG_USER_ONLY)818 cpu_check_irqs(env);819 #endif820 817 next_tb = 0; 821 818 } … … 898 895 cpu_loop_exit(); 899 896 } 900 #ifdef DEBUG_EXEC897 #ifdef CONFIG_DEBUG_EXEC 901 898 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { 902 899 /* restore flags in standard format */ … … 944 941 tb_invalidated_flag = 0; 945 942 } 946 #ifdef DEBUG_EXEC943 #ifdef CONFIG_DEBUG_EXEC 947 944 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n", 948 945 (long)tb->tc_ptr, tb->pc, … … 953 950 jump. */ 954 951 { 955 if (next_tb != 0 && 956 #ifdef CONFIG_KQEMU 957 (env->kqemu_enabled != 2) && 958 #endif 959 tb->page_addr[1] == -1) { 952 if (next_tb != 0 && tb->page_addr[1] == -1) { 960 953 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb); 961 954 } … … 974 967 tc_ptr = tb->tc_ptr; 975 968 /* execute the generated code */ 976 #if defined(__sparc__) && !defined( HOST_SOLARIS)969 #if defined(__sparc__) && !defined(CONFIG_SOLARIS) 977 970 #undef env 978 971 env = cpu_single_env; … … 1011 1004 /* reset soft MMU for next block (it can currently 1012 1005 only be set by a memory fault) */ 1013 #if defined(CONFIG_KQEMU)1014 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)1015 if (kqemu_is_ok(env) &&1016 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {1017 cpu_loop_exit();1018 }1019 #endif1020 1006 } /* for(;;) */ 1021 1007 } else { … … 1042 1028 #elif defined(TARGET_ALPHA) 1043 1029 #elif defined(TARGET_CRIS) 1030 #elif defined(TARGET_S390X) 1044 1031 /* XXXXX */ 1045 1032 #else … … 1117 1104 1118 1105 #if defined(TARGET_I386) 1106 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code) 1107 #else 1108 #define EXCEPTION_ACTION cpu_loop_exit() 1109 #endif 1119 1110 1120 1111 /* 'pc' is the host PC at which the exception was raised. 'address' is … … 1141 1132 1142 1133 /* see if it is an MMU fault */ 1143 ret = cpu_ x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);1134 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); 1144 1135 if (ret < 0) 1145 1136 return 0; /* not an MMU fault */ … … 1153 1144 cpu_restore_state(tb, env, pc, puc); 1154 1145 } 1155 if (ret == 1) { 1156 #if 0 1157 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n", 1158 env->eip, env->cr[2], env->error_code); 1159 #endif 1160 /* we restore the process signal mask as the sigreturn should 1161 do it (XXX: use sigsetjmp) */ 1162 sigprocmask(SIG_SETMASK, old_set, NULL); 1163 raise_exception_err(env->exception_index, env->error_code); 1164 } else { 1165 /* activate soft MMU for this block */ 1166 env->hflags |= HF_SOFTMMU_MASK; 1167 cpu_resume_from_signal(env, puc); 1168 } 1169 /* never comes here */ 1170 return 1; 1171 } 1172 1173 #elif defined(TARGET_ARM) 1174 static inline int handle_cpu_signal(unsigned long pc, unsigned long address, 1175 int is_write, sigset_t *old_set, 1176 void *puc) 1177 { 1178 TranslationBlock *tb; 1179 int ret; 1180 1181 if (cpu_single_env) 1182 env = cpu_single_env; /* XXX: find a correct solution for multithread */ 1183 #if defined(DEBUG_SIGNAL) 1184 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1185 pc, address, is_write, *(unsigned long *)old_set); 1186 #endif 1187 /* XXX: locking issue */ 1188 if (is_write && page_unprotect(h2g(address), pc, puc)) { 1189 return 1; 1190 } 1191 /* see if it is an MMU fault */ 1192 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); 1193 if (ret < 0) 1194 return 0; /* not an MMU fault */ 1195 if (ret == 0) 1196 return 1; /* the MMU fault was handled without causing real CPU fault */ 1197 /* now we have a real cpu fault */ 1198 tb = tb_find_pc(pc); 1199 if (tb) { 1200 /* the PC is inside the translated code. It means that we have 1201 a virtual CPU fault */ 1202 cpu_restore_state(tb, env, pc, puc); 1203 } 1146 1204 1147 /* we restore the process signal mask as the sigreturn should 1205 1148 do it (XXX: use sigsetjmp) */ 1206 1149 sigprocmask(SIG_SETMASK, old_set, NULL); 1207 cpu_loop_exit(); 1150 EXCEPTION_ACTION; 1151 1208 1152 /* never comes here */ 1209 1153 return 1; 1210 1154 } 1211 #elif defined(TARGET_SPARC)1212 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,1213 int is_write, sigset_t *old_set,1214 void *puc)1215 {1216 TranslationBlock *tb;1217 int ret;1218 1219 if (cpu_single_env)1220 env = cpu_single_env; /* XXX: find a correct solution for multithread */1221 #if defined(DEBUG_SIGNAL)1222 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",1223 pc, address, is_write, *(unsigned long *)old_set);1224 #endif1225 /* XXX: locking issue */1226 if (is_write && page_unprotect(h2g(address), pc, puc)) {1227 return 1;1228 }1229 /* see if it is an MMU fault */1230 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);1231 if (ret < 0)1232 return 0; /* not an MMU fault */1233 if (ret == 0)1234 return 1; /* the MMU fault was handled without causing real CPU fault */1235 /* now we have a real cpu fault */1236 tb = tb_find_pc(pc);1237 if (tb) {1238 /* the PC is inside the translated code. It means that we have1239 a virtual CPU fault */1240 cpu_restore_state(tb, env, pc, puc);1241 }1242 /* we restore the process signal mask as the sigreturn should1243 do it (XXX: use sigsetjmp) */1244 sigprocmask(SIG_SETMASK, old_set, NULL);1245 cpu_loop_exit();1246 /* never comes here */1247 return 1;1248 }1249 #elif defined (TARGET_PPC)1250 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,1251 int is_write, sigset_t *old_set,1252 void *puc)1253 {1254 TranslationBlock *tb;1255 int ret;1256 1257 if (cpu_single_env)1258 env = cpu_single_env; /* XXX: find a correct solution for multithread */1259 #if defined(DEBUG_SIGNAL)1260 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",1261 pc, address, is_write, *(unsigned long *)old_set);1262 #endif1263 /* XXX: locking issue */1264 if (is_write && page_unprotect(h2g(address), pc, puc)) {1265 return 1;1266 }1267 1268 /* see if it is an MMU fault */1269 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);1270 if (ret < 0)1271 return 0; /* not an MMU fault */1272 if (ret == 0)1273 return 1; /* the MMU fault was handled without causing real CPU fault */1274 1275 /* now we have a real cpu fault */1276 tb = tb_find_pc(pc);1277 if (tb) {1278 /* the PC is inside the translated code. It means that we have1279 a virtual CPU fault */1280 cpu_restore_state(tb, env, pc, puc);1281 }1282 if (ret == 1) {1283 #if 01284 printf("PF exception: NIP=0x%08x error=0x%x %p\n",1285 env->nip, env->error_code, tb);1286 #endif1287 /* we restore the process signal mask as the sigreturn should1288 do it (XXX: use sigsetjmp) */1289 sigprocmask(SIG_SETMASK, old_set, NULL);1290 cpu_loop_exit();1291 } else {1292 /* activate soft MMU for this block */1293 cpu_resume_from_signal(env, puc);1294 }1295 /* never comes here */1296 return 1;1297 }1298 1299 #elif defined(TARGET_M68K)1300 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,1301 int is_write, sigset_t *old_set,1302 void *puc)1303 {1304 TranslationBlock *tb;1305 int ret;1306 1307 if (cpu_single_env)1308 env = cpu_single_env; /* XXX: find a correct solution for multithread */1309 #if defined(DEBUG_SIGNAL)1310 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",1311 pc, address, is_write, *(unsigned long *)old_set);1312 #endif1313 /* XXX: locking issue */1314 if (is_write && page_unprotect(address, pc, puc)) {1315 return 1;1316 }1317 /* see if it is an MMU fault */1318 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);1319 if (ret < 0)1320 return 0; /* not an MMU fault */1321 if (ret == 0)1322 return 1; /* the MMU fault was handled without causing real CPU fault */1323 /* now we have a real cpu fault */1324 tb = tb_find_pc(pc);1325 if (tb) {1326 /* the PC is inside the translated code. It means that we have1327 a virtual CPU fault */1328 cpu_restore_state(tb, env, pc, puc);1329 }1330 /* we restore the process signal mask as the sigreturn should1331 do it (XXX: use sigsetjmp) */1332 sigprocmask(SIG_SETMASK, old_set, NULL);1333 cpu_loop_exit();1334 /* never comes here */1335 return 1;1336 }1337 1338 #elif defined (TARGET_MIPS)1339 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,1340 int is_write, sigset_t *old_set,1341 void *puc)1342 {1343 TranslationBlock *tb;1344 int ret;1345 1346 if (cpu_single_env)1347 env = cpu_single_env; /* XXX: find a correct solution for multithread */1348 #if defined(DEBUG_SIGNAL)1349 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",1350 pc, address, is_write, *(unsigned long *)old_set);1351 #endif1352 /* XXX: locking issue */1353 if (is_write && page_unprotect(h2g(address), pc, puc)) {1354 return 1;1355 }1356 1357 /* see if it is an MMU fault */1358 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);1359 if (ret < 0)1360 return 0; /* not an MMU fault */1361 if (ret == 0)1362 return 1; /* the MMU fault was handled without causing real CPU fault */1363 1364 /* now we have a real cpu fault */1365 tb = tb_find_pc(pc);1366 if (tb) {1367 /* the PC is inside the translated code. It means that we have1368 a virtual CPU fault */1369 cpu_restore_state(tb, env, pc, puc);1370 }1371 if (ret == 1) {1372 #if 01373 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",1374 env->PC, env->error_code, tb);1375 #endif1376 /* we restore the process signal mask as the sigreturn should1377 do it (XXX: use sigsetjmp) */1378 sigprocmask(SIG_SETMASK, old_set, NULL);1379 cpu_loop_exit();1380 } else {1381 /* activate soft MMU for this block */1382 cpu_resume_from_signal(env, puc);1383 }1384 /* never comes here */1385 return 1;1386 }1387 1388 #elif defined (TARGET_MICROBLAZE)1389 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,1390 int is_write, sigset_t *old_set,1391 void *puc)1392 {1393 TranslationBlock *tb;1394 int ret;1395 1396 if (cpu_single_env)1397 env = cpu_single_env; /* XXX: find a correct solution for multithread */1398 #if defined(DEBUG_SIGNAL)1399 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",1400 pc, address, is_write, *(unsigned long *)old_set);1401 #endif1402 /* XXX: locking issue */1403 if (is_write && page_unprotect(h2g(address), pc, puc)) {1404 return 1;1405 }1406 1407 /* see if it is an MMU fault */1408 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);1409 if (ret < 0)1410 return 0; /* not an MMU fault */1411 if (ret == 0)1412 return 1; /* the MMU fault was handled without causing real CPU fault */1413 1414 /* now we have a real cpu fault */1415 tb = tb_find_pc(pc);1416 if (tb) {1417 /* the PC is inside the translated code. It means that we have1418 a virtual CPU fault */1419 cpu_restore_state(tb, env, pc, puc);1420 }1421 if (ret == 1) {1422 #if 01423 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",1424 env->PC, env->error_code, tb);1425 #endif1426 /* we restore the process signal mask as the sigreturn should1427 do it (XXX: use sigsetjmp) */1428 sigprocmask(SIG_SETMASK, old_set, NULL);1429 cpu_loop_exit();1430 } else {1431 /* activate soft MMU for this block */1432 cpu_resume_from_signal(env, puc);1433 }1434 /* never comes here */1435 return 1;1436 }1437 1438 #elif defined (TARGET_SH4)1439 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,1440 int is_write, sigset_t *old_set,1441 void *puc)1442 {1443 TranslationBlock *tb;1444 int ret;1445 1446 if (cpu_single_env)1447 env = cpu_single_env; /* XXX: find a correct solution for multithread */1448 #if defined(DEBUG_SIGNAL)1449 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",1450 pc, address, is_write, *(unsigned long *)old_set);1451 #endif1452 /* XXX: locking issue */1453 if (is_write && page_unprotect(h2g(address), pc, puc)) {1454 return 1;1455 }1456 1457 /* see if it is an MMU fault */1458 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);1459 if (ret < 0)1460 return 0; /* not an MMU fault */1461 if (ret == 0)1462 return 1; /* the MMU fault was handled without causing real CPU fault */1463 1464 /* now we have a real cpu fault */1465 tb = tb_find_pc(pc);1466 if (tb) {1467 /* the PC is inside the translated code. It means that we have1468 a virtual CPU fault */1469 cpu_restore_state(tb, env, pc, puc);1470 }1471 #if 01472 printf("PF exception: NIP=0x%08x error=0x%x %p\n",1473 env->nip, env->error_code, tb);1474 #endif1475 /* we restore the process signal mask as the sigreturn should1476 do it (XXX: use sigsetjmp) */1477 sigprocmask(SIG_SETMASK, old_set, NULL);1478 cpu_loop_exit();1479 /* never comes here */1480 return 1;1481 }1482 1483 #elif defined (TARGET_ALPHA)1484 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,1485 int is_write, sigset_t *old_set,1486 void *puc)1487 {1488 TranslationBlock *tb;1489 int ret;1490 1491 if (cpu_single_env)1492 env = cpu_single_env; /* XXX: find a correct solution for multithread */1493 #if defined(DEBUG_SIGNAL)1494 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",1495 pc, address, is_write, *(unsigned long *)old_set);1496 #endif1497 /* XXX: locking issue */1498 if (is_write && page_unprotect(h2g(address), pc, puc)) {1499 return 1;1500 }1501 1502 /* see if it is an MMU fault */1503 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);1504 if (ret < 0)1505 return 0; /* not an MMU fault */1506 if (ret == 0)1507 return 1; /* the MMU fault was handled without causing real CPU fault */1508 1509 /* now we have a real cpu fault */1510 tb = tb_find_pc(pc);1511 if (tb) {1512 /* the PC is inside the translated code. It means that we have1513 a virtual CPU fault */1514 cpu_restore_state(tb, env, pc, puc);1515 }1516 #if 01517 printf("PF exception: NIP=0x%08x error=0x%x %p\n",1518 env->nip, env->error_code, tb);1519 #endif1520 /* we restore the process signal mask as the sigreturn should1521 do it (XXX: use sigsetjmp) */1522 sigprocmask(SIG_SETMASK, old_set, NULL);1523 cpu_loop_exit();1524 /* never comes here */1525 return 1;1526 }1527 #elif defined (TARGET_CRIS)1528 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,1529 int is_write, sigset_t *old_set,1530 void *puc)1531 {1532 TranslationBlock *tb;1533 int ret;1534 1535 if (cpu_single_env)1536 env = cpu_single_env; /* XXX: find a correct solution for multithread */1537 #if defined(DEBUG_SIGNAL)1538 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",1539 pc, address, is_write, *(unsigned long *)old_set);1540 #endif1541 /* XXX: locking issue */1542 if (is_write && page_unprotect(h2g(address), pc, puc)) {1543 return 1;1544 }1545 1546 /* see if it is an MMU fault */1547 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);1548 if (ret < 0)1549 return 0; /* not an MMU fault */1550 if (ret == 0)1551 return 1; /* the MMU fault was handled without causing real CPU fault */1552 1553 /* now we have a real cpu fault */1554 tb = tb_find_pc(pc);1555 if (tb) {1556 /* the PC is inside the translated code. It means that we have1557 a virtual CPU fault */1558 cpu_restore_state(tb, env, pc, puc);1559 }1560 /* we restore the process signal mask as the sigreturn should1561 do it (XXX: use sigsetjmp) */1562 sigprocmask(SIG_SETMASK, old_set, NULL);1563 cpu_loop_exit();1564 /* never comes here */1565 return 1;1566 }1567 1568 #else1569 #error unsupported target CPU1570 #endif1571 1155 1572 1156 #if defined(__i386__) … … 1578 1162 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno) 1579 1163 # define ERROR_sig(context) ((context)->uc_mcontext->es.err) 1164 # define MASK_sig(context) ((context)->uc_sigmask) 1165 #elif defined (__NetBSD__) 1166 # include <ucontext.h> 1167 1168 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP]) 1169 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) 1170 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) 1171 # define MASK_sig(context) ((context)->uc_sigmask) 1172 #elif defined (__FreeBSD__) || defined(__DragonFly__) 1173 # include <ucontext.h> 1174 1175 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip)) 1176 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) 1177 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err) 1580 1178 # define MASK_sig(context) ((context)->uc_sigmask) 1581 1179 #elif defined(__OpenBSD__) … … 1595 1193 { 1596 1194 siginfo_t *info = pinfo; 1597 #if defined(__OpenBSD__) 1195 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__) 1196 ucontext_t *uc = puc; 1197 #elif defined(__OpenBSD__) 1598 1198 struct sigcontext *uc = puc; 1599 1199 #else … … 1629 1229 #define ERROR_sig(context) ((context)->sc_err) 1630 1230 #define MASK_sig(context) ((context)->sc_mask) 1231 #elif defined (__FreeBSD__) || defined(__DragonFly__) 1232 #include <ucontext.h> 1233 1234 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip)) 1235 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) 1236 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err) 1237 #define MASK_sig(context) ((context)->uc_sigmask) 1631 1238 #else 1632 1239 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP]) … … 1641 1248 siginfo_t *info = pinfo; 1642 1249 unsigned long pc; 1643 #if def __NetBSD__1250 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__) 1644 1251 ucontext_t *uc = puc; 1645 1252 #elif defined(__OpenBSD__) … … 1767 1374 int is_write; 1768 1375 uint32_t insn; 1769 #if !defined(__arch64__) || defined( HOST_SOLARIS)1376 #if !defined(__arch64__) || defined(CONFIG_SOLARIS) 1770 1377 uint32_t *regs = (uint32_t *)(info + 1); 1771 1378 void *sigmask = (regs + 20); -
trunk/src/recompiler/cutils.c
r36175 r37675 26 26 27 27 #ifdef VBOX 28 # include "osdep.h"28 # include "osdep.h" 29 29 30 30 … … 612 612 613 613 #ifndef VBOX 614 /* 615 * Make sure data goes on disk, but if possible do not bother to 616 * write out the inode just for timestamp updates. 617 * 618 * Unfortunately even in 2009 many operating systems do not support 619 * fdatasync and have to fall back to fsync. 620 */ 621 int qemu_fdatasync(int fd) 622 { 623 #ifdef CONFIG_FDATASYNC 624 return fdatasync(fd); 625 #else 626 return fsync(fd); 627 #endif 628 } 629 614 630 /* io vectors */ 615 631 … … 646 662 qiov->size += len; 647 663 ++qiov->niov; 664 } 665 666 /* 667 * Copies iovecs from src to the end dst until src is completely copied or the 668 * total size of the copied iovec reaches size. The size of the last copied 669 * iovec is changed in order to fit the specified total size if it isn't a 670 * perfect fit already. 671 */ 672 void qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t size) 673 { 674 int i; 675 size_t done; 676 677 assert(dst->nalloc != -1); 678 679 done = 0; 680 for (i = 0; (i < src->niov) && (done != size); i++) { 681 if (done + src->iov[i].iov_len > size) { 682 qemu_iovec_add(dst, src->iov[i].iov_base, size - done); 683 break; 684 } else { 685 qemu_iovec_add(dst, src->iov[i].iov_base, src->iov[i].iov_len); 686 } 687 done += src->iov[i].iov_len; 688 } 648 689 } 649 690 -
trunk/src/recompiler/def-helper.h
r36170 r37675 79 79 #define dh_retvar_decl0_i32 TCGv_i32 retval 80 80 #define dh_retvar_decl0_i64 TCGv_i64 retval 81 #define dh_retvar_decl0_ptr TCGv_ iptr retval81 #define dh_retvar_decl0_ptr TCGv_ptr retval 82 82 #define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t)) 83 83 … … 85 85 #define dh_retvar_decl_i32 TCGv_i32 retval, 86 86 #define dh_retvar_decl_i64 TCGv_i64 retval, 87 #define dh_retvar_decl_ptr TCGv_ iptr retval,87 #define dh_retvar_decl_ptr TCGv_ptr retval, 88 88 #define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t)) 89 89 -
trunk/src/recompiler/disas.h
r36175 r37675 4 4 #include "qemu-common.h" 5 5 6 #ifdef NEED_CPU_H 6 7 /* Disassemble this for me please... (debugging). */ 7 8 void disas(FILE *out, void *code, unsigned long size); … … 14 15 target_ulong pc, int nb_insn, int is_physical, int flags); 15 16 #endif 16 #endif 17 #endif /*!VBOX*/ 17 18 18 19 /* Look up symbol for debugging purpose. Returns "" if unknown. */ 19 20 const char *lookup_symbol(target_ulong orig_addr); 21 #endif 20 22 21 23 struct syminfo; … … 23 25 struct elf64_sym; 24 26 25 typedef const char *(*lookup_symbol_t)(struct syminfo *s, target_ ulongorig_addr);27 typedef const char *(*lookup_symbol_t)(struct syminfo *s, target_phys_addr_t orig_addr); 26 28 27 29 struct syminfo { -
trunk/src/recompiler/dyngen-exec.h
r36176 r37675 46 46 #include <stdint.h> 47 47 48 49 48 #ifdef __OpenBSD__ 50 49 #include <sys/types.h> … … 54 53 typedef void * host_reg_t; 55 54 56 #ifdef HOST_BSD55 #ifdef CONFIG_BSD 57 56 typedef struct __sFILE FILE; 58 57 #else … … 62 61 extern int fputs(const char *, FILE *); 63 62 extern int printf(const char *, ...); 64 #undef NULL65 #define NULL 066 63 67 64 #else /* VBOX */ … … 101 98 #define AREG2 "r15" 102 99 #elif defined(__mips__) 103 #define AREG0 " fp"104 #define AREG1 "s 0"105 #define AREG2 " s1"100 #define AREG0 "s0" 101 #define AREG1 "s1" 102 #define AREG2 "fp" 106 103 #elif defined(__sparc__) 107 #ifdef HOST_SOLARIS104 #ifdef CONFIG_SOLARIS 108 105 #define AREG0 "g2" 109 106 #define AREG1 "g3" … … 149 146 /* The return address may point to the start of the next instruction. 150 147 Subtracting one gets us the call instruction itself. */ 151 #if defined(__s390__) 148 #if defined(__s390__) && !defined(__s390x__) 152 149 # define GETPC() ((void*)(((unsigned long)__builtin_return_address(0) & 0x7fffffffUL) - 1)) 153 150 #elif defined(__arm__) -
trunk/src/recompiler/elf.h
r36175 r37675 455 455 #define R_PPC_SECTOFF_HA 36 456 456 /* Keep this the last entry. */ 457 #ifndef R_PPC_NUM 457 458 #define R_PPC_NUM 37 459 #endif 458 460 459 461 /* ARM specific declarations */ -
trunk/src/recompiler/exec-all.h
r36175 r37675 60 60 /* A Call op needs up to 6 + 2N parameters (N = number of arguments). */ 61 61 #define MAX_OPC_PARAM 10 62 #define OPC_BUF_SIZE 51262 #define OPC_BUF_SIZE 640 63 63 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) 64 64 65 65 /* Maximum size a TCG op can expand to. This is complicated because a 66 single op may require several host instructions and regi rster reloads.67 For now take a wild guess at 1 28bytes, which should allow at least66 single op may require several host instructions and register reloads. 67 For now take a wild guess at 192 bytes, which should allow at least 68 68 a couple of fixup instructions per argument. */ 69 #define TCG_MAX_OP_SIZE 1 2869 #define TCG_MAX_OP_SIZE 192 70 70 71 71 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM) … … 137 137 #endif 138 138 139 #if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__) 140 #define USE_DIRECT_JUMP 141 #endif 142 #if defined(__i386__) && !defined(_WIN32) 139 #if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__) || defined(__i386__) 143 140 #define USE_DIRECT_JUMP 144 141 #endif … … 400 397 #endif 401 398 402 #ifdef CONFIG_KQEMU403 #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))404 405 #define MSR_QPI_COMMBASE 0xfabe0010406 407 int kqemu_init(CPUState *env);408 int kqemu_cpu_exec(CPUState *env);409 void kqemu_flush_page(CPUState *env, target_ulong addr);410 void kqemu_flush(CPUState *env, int global);411 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);412 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);413 void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size,414 ram_addr_t phys_offset);415 void kqemu_cpu_interrupt(CPUState *env);416 void kqemu_record_dump(void);417 418 extern uint32_t kqemu_comm_base;419 420 extern ram_addr_t kqemu_phys_ram_size;421 extern uint8_t *kqemu_phys_ram_base;422 423 static inline int kqemu_is_ok(CPUState *env)424 {425 return(env->kqemu_enabled &&426 (env->cr[0] & CR0_PE_MASK) &&427 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&428 (env->eflags & IF_MASK) &&429 !(env->eflags & VM_MASK) &&430 (env->kqemu_enabled == 2 ||431 ((env->hflags & HF_CPL_MASK) == 3 &&432 (env->eflags & IOPL_MASK) != IOPL_MASK)));433 }434 435 #endif436 437 399 typedef void (CPUDebugExcpHandler)(CPUState *env); 438 400 … … 442 404 #ifndef VBOX 443 405 extern int singlestep; 444 #endif 445 446 #endif 406 #endif /*!VBOX*/ 407 408 #endif -
trunk/src/recompiler/exec.c
r36490 r37675 92 92 #elif defined(TARGET_PPC64) 93 93 #define TARGET_PHYS_ADDR_SPACE_BITS 42 94 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)94 #elif defined(TARGET_X86_64) 95 95 #define TARGET_PHYS_ADDR_SPACE_BITS 42 96 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)96 #elif defined(TARGET_I386) 97 97 #define TARGET_PHYS_ADDR_SPACE_BITS 36 98 98 #else 99 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */100 99 #define TARGET_PHYS_ADDR_SPACE_BITS 32 101 100 #endif … … 261 260 typedef struct subpage_t { 262 261 target_phys_addr_t base; 263 CPUReadMemoryFunc * *mem_read[TARGET_PAGE_SIZE][4];264 CPUWriteMemoryFunc * *mem_write[TARGET_PAGE_SIZE][4];262 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4]; 263 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4]; 265 264 void *opaque[TARGET_PAGE_SIZE][2][4]; 266 265 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4]; … … 514 513 515 514 #ifdef VBOX /* We don't need such huge codegen buffer size, as execute 516 most of the code 515 most of the code in raw or hwacc mode. */ 517 516 #define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024) 518 517 #else /* !VBOX */ … … 554 553 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 555 554 #else 556 /* XXX: needs a justments */557 code_gen_buffer_size = (unsigned long)( phys_ram_size / 4);555 /* XXX: needs adjustments */ 556 code_gen_buffer_size = (unsigned long)(ram_size / 4); 558 557 #endif 559 558 } … … 604 603 } 605 604 } 606 #elif defined(__FreeBSD__) || defined(__ DragonFly__)605 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) 607 606 { 608 607 int flags; … … 632 631 # endif /* !VBOX */ 633 632 #endif /* !USE_STATIC_CODE_GEN_BUFFER */ 634 #ifndef VBOX 633 #ifndef VBOX /** @todo r=bird: why are we different? */ 635 634 map_exec(code_gen_prologue, sizeof(code_gen_prologue)); 636 635 #else … … 660 659 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) 661 660 662 #define CPU_COMMON_SAVE_VERSION 1 663 664 static void cpu_common_save(QEMUFile *f, void *opaque) 661 static void cpu_common_pre_save(void *opaque) 665 662 { 666 663 CPUState *env = opaque; 667 664 668 cpu_synchronize_state(env, 0); 669 670 qemu_put_be32s(f, &env->halted); 671 qemu_put_be32s(f, &env->interrupt_request); 672 } 673 674 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id) 665 cpu_synchronize_state(env); 666 } 667 668 static int cpu_common_pre_load(void *opaque) 675 669 { 676 670 CPUState *env = opaque; 677 671 678 if (version_id != CPU_COMMON_SAVE_VERSION) 679 return -EINVAL; 680 681 qemu_get_be32s(f, &env->halted); 682 qemu_get_be32s(f, &env->interrupt_request); 672 cpu_synchronize_state(env); 673 return 0; 674 } 675 676 static int cpu_common_post_load(void *opaque, int version_id) 677 { 678 CPUState *env = opaque; 679 683 680 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the 684 681 version_id is increased. */ 685 682 env->interrupt_request &= ~0x01; 686 683 tlb_flush(env, 1); 687 cpu_synchronize_state(env, 1);688 684 689 685 return 0; 690 686 } 687 688 static const VMStateDescription vmstate_cpu_common = { 689 .name = "cpu_common", 690 .version_id = 1, 691 .minimum_version_id = 1, 692 .minimum_version_id_old = 1, 693 .pre_save = cpu_common_pre_save, 694 .pre_load = cpu_common_pre_load, 695 .post_load = cpu_common_post_load, 696 .fields = (VMStateField []) { 697 VMSTATE_UINT32(halted, CPUState), 698 VMSTATE_UINT32(interrupt_request, CPUState), 699 VMSTATE_END_OF_LIST() 700 } 701 }; 691 702 #endif 692 703 … … 723 734 env->cpu_index = cpu_index; 724 735 env->numa_node = 0; 725 TAILQ_INIT(&env->breakpoints);726 TAILQ_INIT(&env->watchpoints);736 QTAILQ_INIT(&env->breakpoints); 737 QTAILQ_INIT(&env->watchpoints); 727 738 *penv = env; 728 739 #ifndef VBOX … … 731 742 #endif 732 743 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) 733 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION, 734 cpu_common_save, cpu_common_load, env); 744 vmstate_register(cpu_index, &vmstate_cpu_common, env); 735 745 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION, 736 746 cpu_save, cpu_load, env); … … 1562 1572 /* keep all GDB-injected watchpoints in front */ 1563 1573 if (flags & BP_GDB) 1564 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);1574 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry); 1565 1575 else 1566 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);1576 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry); 1567 1577 1568 1578 tlb_flush_page(env, addr); … … 1580 1590 CPUWatchpoint *wp; 1581 1591 1582 TAILQ_FOREACH(wp, &env->watchpoints, entry) {1592 QTAILQ_FOREACH(wp, &env->watchpoints, entry) { 1583 1593 if (addr == wp->vaddr && len_mask == wp->len_mask 1584 1594 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { … … 1597 1607 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint) 1598 1608 { 1599 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);1609 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); 1600 1610 1601 1611 tlb_flush_page(env, watchpoint->vaddr); … … 1609 1619 CPUWatchpoint *wp, *next; 1610 1620 1611 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {1621 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) { 1612 1622 if (wp->flags & mask) 1613 1623 cpu_watchpoint_remove_by_ref(env, wp); … … 1629 1639 /* keep all GDB-injected breakpoints in front */ 1630 1640 if (flags & BP_GDB) 1631 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);1641 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry); 1632 1642 else 1633 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);1643 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry); 1634 1644 1635 1645 breakpoint_invalidate(env, pc); … … 1649 1659 CPUBreakpoint *bp; 1650 1660 1651 TAILQ_FOREACH(bp, &env->breakpoints, entry) {1661 QTAILQ_FOREACH(bp, &env->breakpoints, entry) { 1652 1662 if (bp->pc == pc && bp->flags == flags) { 1653 1663 cpu_breakpoint_remove_by_ref(env, bp); … … 1669 1679 { 1670 1680 #if defined(TARGET_HAS_ICE) 1671 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);1681 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); 1672 1682 1673 1683 breakpoint_invalidate(env, breakpoint->pc); … … 1683 1693 CPUBreakpoint *bp, *next; 1684 1694 1685 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {1695 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) { 1686 1696 if (bp->flags & mask) 1687 1697 cpu_breakpoint_remove_by_ref(env, bp); … … 1726 1736 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); 1727 1737 } 1728 #else 1738 #elif !defined(_WIN32) 1739 /* Win32 doesn't support line-buffering and requires size >= 2 */ 1729 1740 setvbuf(logfile, NULL, _IOLBF, 0); 1730 1741 #endif … … 1751 1762 static void cpu_unlink_tb(CPUState *env) 1752 1763 { 1753 #if defined( USE_NPTL)1764 #if defined(CONFIG_USE_NPTL) 1754 1765 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the 1755 1766 problem and hope the cpu will stop of its own accord. For userspace … … 1937 1948 #endif /* !VBOX */ 1938 1949 1939 #ifndef VBOX 1950 #ifndef VBOX /* not needed */ 1940 1951 CPUState *cpu_copy(CPUState *env) 1941 1952 { … … 1957 1968 Note: Once we support ptrace with hw-debug register access, make sure 1958 1969 BP_CPU break/watchpoints are handled correctly on clone. */ 1959 TAILQ_INIT(&env->breakpoints);1960 TAILQ_INIT(&env->watchpoints);1970 QTAILQ_INIT(&env->breakpoints); 1971 QTAILQ_INIT(&env->watchpoints); 1961 1972 #if defined(TARGET_HAS_ICE) 1962 TAILQ_FOREACH(bp, &env->breakpoints, entry) {1973 QTAILQ_FOREACH(bp, &env->breakpoints, entry) { 1963 1974 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL); 1964 1975 } 1965 TAILQ_FOREACH(wp, &env->watchpoints, entry) {1976 QTAILQ_FOREACH(wp, &env->watchpoints, entry) { 1966 1977 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1, 1967 1978 wp->flags, NULL); … … 2024 2035 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); 2025 2036 2026 #ifdef CONFIG_KQEMU2027 if (env->kqemu_enabled) {2028 kqemu_flush(env, flush_global);2029 }2030 #endif2031 2037 #ifdef VBOX 2032 2038 /* inform raw mode about TLB flush */ … … 2066 2072 2067 2073 tlb_flush_jmp_cache(env, addr); 2068 2069 #ifdef CONFIG_KQEMU2070 if (env->kqemu_enabled) {2071 kqemu_flush_page(env, addr);2072 }2073 #endif2074 2074 } 2075 2075 … … 2131 2131 return; 2132 2132 len = length >> TARGET_PAGE_BITS; 2133 #ifdef CONFIG_KQEMU2134 /* XXX: should not depend on cpu context */2135 env = first_cpu;2136 if (env->kqemu_enabled) {2137 ram_addr_t addr;2138 addr = start;2139 for(i = 0; i < len; i++) {2140 kqemu_set_notdirty(env, addr);2141 addr += TARGET_PAGE_SIZE;2142 }2143 }2144 #endif2145 2133 mask = ~dirty_flags; 2146 2134 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); … … 2367 2355 /* Make accesses to pages with watchpoints go via the 2368 2356 watchpoint trap routines. */ 2369 TAILQ_FOREACH(wp, &env->watchpoints, entry) {2357 QTAILQ_FOREACH(wp, &env->watchpoints, entry) { 2370 2358 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { 2371 2359 iotlb = io_mem_watch + paddr; … … 2672 2660 } while (0) 2673 2661 2674 /* register physical memory. 'size' must be a multiple of the target 2675 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an 2662 /* register physical memory. 2663 For RAM, 'size' must be a multiple of the target page size. 2664 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an 2676 2665 io memory page. The address used when calling the IO function is 2677 2666 the offset from the start of the region, plus region_offset. Both … … 2690 2679 void *subpage; 2691 2680 2692 #ifdef CONFIG_KQEMU2693 /* XXX: should not depend on cpu context */2694 env = first_cpu;2695 if (env->kqemu_enabled) {2696 kqemu_set_phys_mem(start_addr, size, phys_offset);2697 }2698 #endif2699 2681 if (kvm_enabled()) 2700 2682 kvm_set_phys_mem(start_addr, size, phys_offset); … … 2792 2774 } 2793 2775 2794 #ifdef CONFIG_KQEMU2795 /* XXX: better than nothing */2796 static ram_addr_t kqemu_ram_alloc(ram_addr_t size)2797 {2798 ram_addr_t addr;2799 if ((last_ram_offset + size) > kqemu_phys_ram_size) {2800 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",2801 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);2802 abort();2803 }2804 addr = last_ram_offset;2805 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);2806 return addr;2807 }2808 #endif2809 2810 2776 ram_addr_t qemu_ram_alloc(ram_addr_t size) 2811 2777 { 2812 2778 RAMBlock *new_block; 2813 2814 #ifdef CONFIG_KQEMU2815 if (kqemu_phys_ram_base) {2816 return kqemu_ram_alloc(size);2817 }2818 #endif2819 2779 2820 2780 size = TARGET_PAGE_ALIGN(size); 2821 2781 new_block = qemu_malloc(sizeof(*new_block)); 2822 2782 2783 #if defined(TARGET_S390X) && defined(CONFIG_KVM) 2784 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */ 2785 new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE, 2786 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 2787 #else 2823 2788 new_block->host = qemu_vmalloc(size); 2789 #endif 2790 #ifdef MADV_MERGEABLE 2791 madvise(new_block->host, size, MADV_MERGEABLE); 2792 #endif 2824 2793 new_block->offset = last_ram_offset; 2825 2794 new_block->length = size; … … 2859 2828 RAMBlock **prevp; 2860 2829 RAMBlock *block; 2861 2862 #ifdef CONFIG_KQEMU2863 if (kqemu_phys_ram_base) {2864 return kqemu_phys_ram_base + addr;2865 }2866 #endif2867 2830 2868 2831 prev = NULL; … … 2898 2861 uint8_t *host = ptr; 2899 2862 2900 #ifdef CONFIG_KQEMU2901 if (kqemu_phys_ram_base) {2902 return host - kqemu_phys_ram_base;2903 }2904 #endif2905 2906 2863 prev = NULL; 2907 2864 prevp = &ram_blocks; … … 2928 2885 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 2929 2886 #endif 2930 #if defined(TARGET_SPARC) 2887 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) 2931 2888 do_unassigned_access(addr, 0, 0, 0, 1); 2932 2889 #endif … … 2939 2896 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 2940 2897 #endif 2941 #if defined(TARGET_SPARC) 2898 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) 2942 2899 do_unassigned_access(addr, 0, 0, 0, 2); 2943 2900 #endif … … 2950 2907 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 2951 2908 #endif 2952 #if defined(TARGET_SPARC) 2909 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) 2953 2910 do_unassigned_access(addr, 0, 0, 0, 4); 2954 2911 #endif … … 2961 2918 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 2962 2919 #endif 2963 #if defined(TARGET_SPARC) 2920 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) 2964 2921 do_unassigned_access(addr, 1, 0, 0, 1); 2965 2922 #endif … … 2971 2928 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 2972 2929 #endif 2973 #if defined(TARGET_SPARC) 2930 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) 2974 2931 do_unassigned_access(addr, 1, 0, 0, 2); 2975 2932 #endif … … 2981 2938 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 2982 2939 #endif 2983 #if defined(TARGET_SPARC) 2940 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) 2984 2941 do_unassigned_access(addr, 1, 0, 0, 4); 2985 2942 #endif 2986 2943 } 2987 2944 2988 static CPUReadMemoryFunc * unassigned_mem_read[3] = {2945 static CPUReadMemoryFunc * const unassigned_mem_read[3] = { 2989 2946 unassigned_mem_readb, 2990 2947 unassigned_mem_readw, … … 2992 2949 }; 2993 2950 2994 static CPUWriteMemoryFunc * unassigned_mem_write[3] = {2951 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = { 2995 2952 unassigned_mem_writeb, 2996 2953 unassigned_mem_writew, … … 3066 3023 stw_p(qemu_get_ram_ptr(ram_addr), val); 3067 3024 #endif 3068 #ifdef CONFIG_KQEMU3069 if (cpu_single_env->kqemu_enabled &&3070 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)3071 kqemu_modify_page(cpu_single_env, ram_addr);3072 #endif3073 3025 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 3074 3026 #ifdef VBOX … … 3108 3060 stl_p(qemu_get_ram_ptr(ram_addr), val); 3109 3061 #endif 3110 #ifdef CONFIG_KQEMU3111 if (cpu_single_env->kqemu_enabled &&3112 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)3113 kqemu_modify_page(cpu_single_env, ram_addr);3114 #endif3115 3062 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 3116 3063 #ifdef VBOX … … 3124 3071 } 3125 3072 3126 static CPUReadMemoryFunc * error_mem_read[3] = {3073 static CPUReadMemoryFunc * const error_mem_read[3] = { 3127 3074 NULL, /* never used */ 3128 3075 NULL, /* never used */ … … 3130 3077 }; 3131 3078 3132 static CPUWriteMemoryFunc * notdirty_mem_write[3] = {3079 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = { 3133 3080 notdirty_mem_writeb, 3134 3081 notdirty_mem_writew, … … 3154 3101 } 3155 3102 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; 3156 TAILQ_FOREACH(wp, &env->watchpoints, entry) {3103 QTAILQ_FOREACH(wp, &env->watchpoints, entry) { 3157 3104 if ((vaddr == (wp->vaddr & len_mask) || 3158 3105 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { … … 3223 3170 } 3224 3171 3225 static CPUReadMemoryFunc * watch_mem_read[3] = {3172 static CPUReadMemoryFunc * const watch_mem_read[3] = { 3226 3173 watch_mem_readb, 3227 3174 watch_mem_readw, … … 3229 3176 }; 3230 3177 3231 static CPUWriteMemoryFunc * watch_mem_write[3] = {3178 static CPUWriteMemoryFunc * const watch_mem_write[3] = { 3232 3179 watch_mem_writeb, 3233 3180 watch_mem_writew, … … 3321 3268 } 3322 3269 3323 static CPUReadMemoryFunc * subpage_read[] = {3270 static CPUReadMemoryFunc * const subpage_read[] = { 3324 3271 &subpage_readb, 3325 3272 &subpage_readw, … … 3327 3274 }; 3328 3275 3329 static CPUWriteMemoryFunc * subpage_write[] = {3276 static CPUWriteMemoryFunc * const subpage_write[] = { 3330 3277 &subpage_writeb, 3331 3278 &subpage_writew, … … 3408 3355 returned if error. */ 3409 3356 static int cpu_register_io_memory_fixed(int io_index, 3410 CPUReadMemoryFunc * *mem_read,3411 CPUWriteMemoryFunc * *mem_write,3357 CPUReadMemoryFunc * const *mem_read, 3358 CPUWriteMemoryFunc * const *mem_write, 3412 3359 void *opaque) 3413 3360 { … … 3434 3381 } 3435 3382 3436 int cpu_register_io_memory(CPUReadMemoryFunc * *mem_read,3437 CPUWriteMemoryFunc * *mem_write,3383 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read, 3384 CPUWriteMemoryFunc * const *mem_write, 3438 3385 void *opaque) 3439 3386 { … … 3466 3413 io_mem_watch = cpu_register_io_memory(watch_mem_read, 3467 3414 watch_mem_write, NULL); 3468 #ifdef CONFIG_KQEMU3469 if (kqemu_phys_ram_base) {3470 /* alloc dirty bits array */3471 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);3472 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);3473 }3474 #endif3475 3415 } 3476 3416 … … 3704 3644 void *opaque; 3705 3645 void (*callback)(void *opaque); 3706 LIST_ENTRY(MapClient) link;3646 QLIST_ENTRY(MapClient) link; 3707 3647 } MapClient; 3708 3648 3709 static LIST_HEAD(map_client_list, MapClient) map_client_list3710 = LIST_HEAD_INITIALIZER(map_client_list);3649 static QLIST_HEAD(map_client_list, MapClient) map_client_list 3650 = QLIST_HEAD_INITIALIZER(map_client_list); 3711 3651 3712 3652 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) … … 3716 3656 client->opaque = opaque; 3717 3657 client->callback = callback; 3718 LIST_INSERT_HEAD(&map_client_list, client, link);3658 QLIST_INSERT_HEAD(&map_client_list, client, link); 3719 3659 return client; 3720 3660 } … … 3724 3664 MapClient *client = (MapClient *)_client; 3725 3665 3726 LIST_REMOVE(client, link);3666 QLIST_REMOVE(client, link); 3727 3667 qemu_free(client); 3728 3668 } … … 3732 3672 MapClient *client; 3733 3673 3734 while (! LIST_EMPTY(&map_client_list)) {3735 client = LIST_FIRST(&map_client_list);3674 while (!QLIST_EMPTY(&map_client_list)) { 3675 client = QLIST_FIRST(&map_client_list); 3736 3676 client->callback(client->opaque); 3737 3677 cpu_unregister_map_client(client); … … 3832 3772 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len); 3833 3773 } 3834 qemu_ free(bounce.buffer);3774 qemu_vfree(bounce.buffer); 3835 3775 bounce.buffer = NULL; 3836 3776 cpu_notify_map_clients(); -
trunk/src/recompiler/fpu/softfloat-native.c
r36175 r37675 3 3 #include "softfloat.h" 4 4 #include <math.h> 5 #if defined( HOST_SOLARIS)5 #if defined(CONFIG_SOLARIS) 6 6 #include <fenv.h> 7 7 #endif … … 10 10 { 11 11 STATUS(float_rounding_mode) = val; 12 #if defined(HOST_BSD) && !defined(__APPLE__) ||\13 (defined( HOST_SOLARIS) && (HOST_SOLARIS < 10 || HOST_SOLARIS== 11)) /* VBOX adds sol 11 */12 #if (defined(CONFIG_BSD) && !defined(__APPLE__) && !defined(__GLIBC__)) || \ 13 (defined(CONFIG_SOLARIS) && (CONFIG_SOLARIS_VERSION < 10 || CONFIG_SOLARIS_VERSION == 11)) /* VBOX adds sol 11 */ 14 14 fpsetround(val); 15 15 #elif defined(__arm__) … … 27 27 #endif 28 28 29 #if defined(HOST_BSD) || (defined(HOST_SOLARIS) && HOST_SOLARIS < 10) 29 #if defined(CONFIG_BSD) || \ 30 (defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10) 30 31 #define lrint(d) ((int32_t)rint(d)) 31 32 #define llrint(d) ((int64_t)rint(d)) … … 43 44 # define remainderl(fa, fb) (remainder(fa, fb)) 44 45 # endif /* VBOX && _BSD */ 45 46 #if !defined(__sparc__) && defined(HOST_SOLARIS) && HOST_SOLARIS < 10 46 #if !defined(__sparc__) && \ 47 (defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10) 47 48 extern long double rintl(long double); 48 49 extern long double scalbnl(long double, int); … … 359 360 | Software IEC/IEEE double-precision operations. 360 361 *----------------------------------------------------------------------------*/ 361 #if defined(__sun__) && defined(HOST_SOLARIS) && HOST_SOLARIS < 10 362 #if defined(__sun__) && \ 363 (defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10) 362 364 static inline float64 trunc(float64 x) 363 365 { -
trunk/src/recompiler/fpu/softfloat-native.h
r36414 r37675 20 20 * are defined in <iso/math_c99.h> with a compiler directive 21 21 */ 22 #if defined(HOST_SOLARIS) && (( HOST_SOLARIS <= 9 ) || ((HOST_SOLARIS >= 10) \ 23 && (__GNUC__ < 4))) \ 22 #if defined(CONFIG_SOLARIS) && \ 23 ((CONFIG_SOLARIS_VERSION <= 9 ) || \ 24 ((CONFIG_SOLARIS_VERSION >= 10) && (__GNUC__ < 4))) \ 24 25 || (defined(__OpenBSD__) && (OpenBSD < 200811)) 25 26 /* … … 62 63 #endif 63 64 64 #if defined(__sun__) && !defined( NEED_LIBSUNMATH)65 #if defined(__sun__) && !defined(CONFIG_NEEDS_LIBSUNMATH) 65 66 66 67 #ifndef isnan … … 112 113 | Software IEC/IEEE floating-point rounding mode. 113 114 *----------------------------------------------------------------------------*/ 114 #if (defined(HOST_BSD) && !defined(__APPLE__)) || defined(HOST_SOLARIS) 115 #if (defined(CONFIG_BSD) && !defined(__APPLE__) && !defined(__GLIBC__)) \ 116 || defined(CONFIG_SOLARIS) 115 117 #if defined(__OpenBSD__) 116 118 #define FE_RM FP_RM -
trunk/src/recompiler/fpu/softfloat.c
r36170 r37675 2456 2456 return roundAndPackFloat32( aSign, aExp, zSig STATUS_VAR ); 2457 2457 2458 } 2459 2460 2461 /*---------------------------------------------------------------------------- 2462 | Packs the sign `zSign', exponent `zExp', and significand `zSig' into a 2463 | half-precision floating-point value, returning the result. After being 2464 | shifted into the proper positions, the three fields are simply added 2465 | together to form the result. This means that any integer portion of `zSig' 2466 | will be added into the exponent. Since a properly normalized significand 2467 | will have an integer portion equal to 1, the `zExp' input should be 1 less 2468 | than the desired result exponent whenever `zSig' is a complete, normalized 2469 | significand. 2470 *----------------------------------------------------------------------------*/ 2471 static bits16 packFloat16(flag zSign, int16 zExp, bits16 zSig) 2472 { 2473 return (((bits32)zSign) << 15) + (((bits32)zExp) << 10) + zSig; 2474 } 2475 2476 /* Half precision floats come in two formats: standard IEEE and "ARM" format. 2477 The latter gains extra exponent range by omitting the NaN/Inf encodings. */ 2478 2479 float32 float16_to_float32( bits16 a, flag ieee STATUS_PARAM ) 2480 { 2481 flag aSign; 2482 int16 aExp; 2483 bits32 aSig; 2484 2485 aSign = a >> 15; 2486 aExp = (a >> 10) & 0x1f; 2487 aSig = a & 0x3ff; 2488 2489 if (aExp == 0x1f && ieee) { 2490 if (aSig) { 2491 /* Make sure correct exceptions are raised. */ 2492 float32ToCommonNaN(a STATUS_VAR); 2493 aSig |= 0x200; 2494 } 2495 return packFloat32(aSign, 0xff, aSig << 13); 2496 } 2497 if (aExp == 0) { 2498 int8 shiftCount; 2499 2500 if (aSig == 0) { 2501 return packFloat32(aSign, 0, 0); 2502 } 2503 2504 shiftCount = countLeadingZeros32( aSig ) - 21; 2505 aSig = aSig << shiftCount; 2506 aExp = -shiftCount; 2507 } 2508 return packFloat32( aSign, aExp + 0x70, aSig << 13); 2509 } 2510 2511 bits16 float32_to_float16( float32 a, flag ieee STATUS_PARAM) 2512 { 2513 flag aSign; 2514 int16 aExp; 2515 bits32 aSig; 2516 bits32 mask; 2517 bits32 increment; 2518 int8 roundingMode; 2519 2520 aSig = extractFloat32Frac( a ); 2521 aExp = extractFloat32Exp( a ); 2522 aSign = extractFloat32Sign( a ); 2523 if ( aExp == 0xFF ) { 2524 if (aSig) { 2525 /* Make sure correct exceptions are raised. */ 2526 float32ToCommonNaN(a STATUS_VAR); 2527 aSig |= 0x00400000; 2528 } 2529 return packFloat16(aSign, 0x1f, aSig >> 13); 2530 } 2531 if (aExp == 0 && aSign == 0) { 2532 return packFloat16(aSign, 0, 0); 2533 } 2534 /* Decimal point between bits 22 and 23. */ 2535 aSig |= 0x00800000; 2536 aExp -= 0x7f; 2537 if (aExp < -14) { 2538 mask = 0x007fffff; 2539 if (aExp < -24) { 2540 aExp = -25; 2541 } else { 2542 mask >>= 24 + aExp; 2543 } 2544 } else { 2545 mask = 0x00001fff; 2546 } 2547 if (aSig & mask) { 2548 float_raise( float_flag_underflow STATUS_VAR ); 2549 roundingMode = STATUS(float_rounding_mode); 2550 switch (roundingMode) { 2551 case float_round_nearest_even: 2552 increment = (mask + 1) >> 1; 2553 if ((aSig & mask) == increment) { 2554 increment = aSig & (increment << 1); 2555 } 2556 break; 2557 case float_round_up: 2558 increment = aSign ? 0 : mask; 2559 break; 2560 case float_round_down: 2561 increment = aSign ? mask : 0; 2562 break; 2563 default: /* round_to_zero */ 2564 increment = 0; 2565 break; 2566 } 2567 aSig += increment; 2568 if (aSig >= 0x01000000) { 2569 aSig >>= 1; 2570 aExp++; 2571 } 2572 } else if (aExp < -14 2573 && STATUS(float_detect_tininess) == float_tininess_before_rounding) { 2574 float_raise( float_flag_underflow STATUS_VAR); 2575 } 2576 2577 if (ieee) { 2578 if (aExp > 15) { 2579 float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR); 2580 return packFloat16(aSign, 0x1f, 0); 2581 } 2582 } else { 2583 if (aExp > 16) { 2584 float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR); 2585 return packFloat16(aSign, 0x1f, 0x3ff); 2586 } 2587 } 2588 if (aExp < -24) { 2589 return packFloat16(aSign, 0, 0); 2590 } 2591 if (aExp < -14) { 2592 aSig >>= -14 - aExp; 2593 aExp = -14; 2594 } 2595 return packFloat16(aSign, aExp + 14, aSig >> 13); 2458 2596 } 2459 2597 -
trunk/src/recompiler/fpu/softfloat.h
r36175 r37675 37 37 #endif 38 38 39 #if defined( HOST_SOLARIS) && defined(NEEDS_LIBSUNMATH)39 #if defined(CONFIG_SOLARIS) && defined(CONFIG_NEEDS_LIBSUNMATH) 40 40 #include <sunmath.h> 41 41 #endif … … 95 95 #else 96 96 /* native float support */ 97 #if (defined(__i386__) || defined(__x86_64__)) && (!defined( HOST_BSD) || defined(VBOX)) /** @todo VBOX: not correct on windows */97 #if (defined(__i386__) || defined(__x86_64__)) && (!defined(CONFIG_BSD) || defined(VBOX)) /** @todo VBOX: not correct on windows */ 98 98 #define FLOATX80 99 99 #endif … … 155 155 #ifdef FLOAT128 156 156 typedef struct { 157 #ifdef WORDS_BIGENDIAN157 #ifdef HOST_WORDS_BIGENDIAN 158 158 uint64_t high, low; 159 159 #else … … 250 250 float128 int64_to_float128( int64_t STATUS_PARAM ); 251 251 #endif 252 253 /*---------------------------------------------------------------------------- 254 | Software half-precision conversion routines. 255 *----------------------------------------------------------------------------*/ 256 bits16 float32_to_float16( float32, flag STATUS_PARAM ); 257 float32 float16_to_float32( bits16, flag STATUS_PARAM ); 252 258 253 259 /*---------------------------------------------------------------------------- -
trunk/src/recompiler/gen-icount.h
r36170 r37675 12 12 13 13 icount_label = gen_new_label(); 14 /* FIXME: This generates lousy code. We can't use tcg_new_temp because15 count needs to live over the conditional branch. To workaround this16 we allow the target to supply a convenient register temporary. */17 #ifndef ICOUNT_TEMP18 14 count = tcg_temp_local_new_i32(); 19 #else20 count = ICOUNT_TEMP;21 #endif22 15 tcg_gen_ld_i32(count, cpu_env, offsetof(CPUState, icount_decr.u32)); 23 16 /* This is a horrid hack to allow fixing up the value later. */ … … 27 20 tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label); 28 21 tcg_gen_st16_i32(count, cpu_env, offsetof(CPUState, icount_decr.u16.low)); 29 #ifndef ICOUNT_TEMP30 22 tcg_temp_free_i32(count); 31 #endif32 23 } 33 24 -
trunk/src/recompiler/host-utils.c
r17040 r37675 24 24 */ 25 25 26 #include "exec.h" 26 #include <stdlib.h> 27 #ifndef VBOX 28 #include <stdint.h> 29 #else 30 # include <iprt/types.h> 31 #endif 27 32 #include "host-utils.h" 28 33 -
trunk/src/recompiler/host-utils.h
r36125 r37675 28 28 #if defined(__x86_64__) 29 29 #define __HAVE_FAST_MULU64__ 30 static always_inline void mulu64(uint64_t *plow, uint64_t *phigh,31 30 static inline void mulu64(uint64_t *plow, uint64_t *phigh, 31 uint64_t a, uint64_t b) 32 32 { 33 33 __asm__ ("mul %0\n\t" … … 36 36 } 37 37 #define __HAVE_FAST_MULS64__ 38 static always_inline void muls64(uint64_t *plow, uint64_t *phigh,39 38 static inline void muls64(uint64_t *plow, uint64_t *phigh, 39 int64_t a, int64_t b) 40 40 { 41 41 __asm__ ("imul %0\n\t" … … 50 50 /* Binary search for leading zeros. */ 51 51 52 static always_inline int clz32(uint32_t val)52 static inline int clz32(uint32_t val) 53 53 { 54 54 #if QEMU_GNUC_PREREQ(3, 4) … … 87 87 } 88 88 89 static always_inline int clo32(uint32_t val)89 static inline int clo32(uint32_t val) 90 90 { 91 91 return clz32(~val); 92 92 } 93 93 94 static always_inline int clz64(uint64_t val)94 static inline int clz64(uint64_t val) 95 95 { 96 96 #if QEMU_GNUC_PREREQ(3, 4) … … 112 112 } 113 113 114 static always_inline int clo64(uint64_t val)114 static inline int clo64(uint64_t val) 115 115 { 116 116 return clz64(~val); 117 117 } 118 118 119 static always_inline int ctz32(uint32_t val)119 static inline int ctz32(uint32_t val) 120 120 { 121 121 #if QEMU_GNUC_PREREQ(3, 4) … … 129 129 cnt = 0; 130 130 if (!(val & 0x0000FFFFUL)) { 131 131 cnt += 16; 132 132 val >>= 16; 133 133 } 134 134 if (!(val & 0x000000FFUL)) { 135 135 cnt += 8; 136 136 val >>= 8; 137 137 } 138 138 if (!(val & 0x0000000FUL)) { 139 139 cnt += 4; 140 140 val >>= 4; 141 141 } 142 142 if (!(val & 0x00000003UL)) { 143 143 cnt += 2; 144 144 val >>= 2; 145 145 } 146 146 if (!(val & 0x00000001UL)) { 147 147 cnt++; 148 148 val >>= 1; 149 149 } 150 150 if (!(val & 0x00000001UL)) { 151 152 153 154 155 #endif 156 157 158 static always_inline int cto32(uint32_t val)151 cnt++; 152 } 153 154 return cnt; 155 #endif 156 } 157 158 static inline int cto32(uint32_t val) 159 159 { 160 160 return ctz32(~val); 161 161 } 162 162 163 static always_inline int ctz64(uint64_t val)164 { 165 #if QEMU_GNUC_PREREQ(3, 4) 166 if (val) 167 return __builtin_ctz (val);163 static inline int ctz64(uint64_t val) 164 { 165 #if QEMU_GNUC_PREREQ(3, 4) 166 if (val) 167 return __builtin_ctzll(val); 168 168 else 169 169 return 64; … … 181 181 } 182 182 183 static always_inline int cto64(uint64_t val)183 static inline int cto64(uint64_t val) 184 184 { 185 185 return ctz64(~val); 186 186 } 187 187 188 static always_inline int ctpop8(uint8_t val)188 static inline int ctpop8(uint8_t val) 189 189 { 190 190 val = (val & 0x55) + ((val >> 1) & 0x55); … … 195 195 } 196 196 197 static always_inline int ctpop16(uint16_t val)197 static inline int ctpop16(uint16_t val) 198 198 { 199 199 val = (val & 0x5555) + ((val >> 1) & 0x5555); … … 205 205 } 206 206 207 static always_inline int ctpop32(uint32_t val)207 static inline int ctpop32(uint32_t val) 208 208 { 209 209 #if QEMU_GNUC_PREREQ(3, 4) … … 220 220 } 221 221 222 static always_inline int ctpop64(uint64_t val)222 static inline int ctpop64(uint64_t val) 223 223 { 224 224 #if QEMU_GNUC_PREREQ(3, 4) -
trunk/src/recompiler/hostregs_helper.h
r36175 r37675 1 1 /* 2 * Save/restore host regist rs.2 * Save/restore host registers. 3 3 * 4 4 * Copyright (c) 2007 CodeSourcery -
trunk/src/recompiler/ioport.h
r36175 r37675 44 44 45 45 46 /* NOTE: as these functions may be even used when there is an isa 47 brige on non x86 targets, we always defined them */ 48 #if !defined(NO_CPU_IO_DEFS) && defined(NEED_CPU_H) 46 #ifndef VBOX 47 void cpu_outb(pio_addr_t addr, uint8_t val); 48 void cpu_outw(pio_addr_t addr, uint16_t val); 49 void cpu_outl(pio_addr_t addr, uint32_t val); 50 uint8_t cpu_inb(pio_addr_t addr); 51 uint16_t cpu_inw(pio_addr_t addr); 52 uint32_t cpu_inl(pio_addr_t addr); 53 #else 49 54 void cpu_outb(CPUState *env, pio_addr_t addr, uint8_t val); 50 55 void cpu_outw(CPUState *env, pio_addr_t addr, uint16_t val); -
trunk/src/recompiler/osdep.h
r36172 r37675 20 20 #define qemu_vprintf(pszFormat, args) \ 21 21 RTLogPrintfV((pszFormat), (args)) 22 #define qemu_printf RTLogPrintf 22 23 /**@todo the following macros belongs elsewhere */ 23 24 #define qemu_malloc(cb) RTMemAlloc(cb) 24 25 #define qemu_mallocz(cb) RTMemAllocZ(cb) 25 26 #define qemu_realloc(ptr, cb) RTMemRealloc(ptr, cb) 26 27 27 #define qemu_free(pv) RTMemFree(pv) 28 28 #define qemu_strdup(psz) RTStrDup(psz) 29 29 30 #define qemu_vmalloc(cb) RTMemPageAlloc(cb) 31 #define qemu_vfree(pv) RTMemPageFree(pv, missing_size_parameter) 32 33 #ifndef NULL 34 # define NULL 0 35 #endif 36 30 /* Misc wrappers */ 37 31 #define fflush(file) RTLogFlush(NULL) 38 32 #define printf(...) LogIt(LOG_INSTANCE, 0, LOG_GROUP_REM_PRINTF, (__VA_ARGS__)) … … 42 36 #endif 43 37 44 #define assert(cond) Assert(cond)38 #define assert(cond) Assert(cond) 45 39 46 40 #else /* !VBOX */ 47 41 48 42 #include <stdarg.h> 43 #include <stddef.h> 49 44 50 #define VBOX_ONLY(x) 51 45 #define VBOX_ONLY(x) /* nike */ 52 46 #define qemu_snprintf snprintf /* bird */ 53 47 #define qemu_vsnprintf vsnprintf /* bird */ 54 48 #define qemu_vprintf vprintf /* bird */ 55 56 #define qemu_printf printf57 58 void *qemu_malloc(size_t size);59 void *qemu_mallocz(size_t size);60 void qemu_free(void *ptr);61 char *qemu_strdup(const char *str);62 63 void *qemu_vmalloc(size_t size);64 void qemu_vfree(void *ptr);65 66 void *get_mmap_addr(unsigned long size);67 49 68 50 #endif /* !VBOX */ … … 95 77 #endif 96 78 97 #if ndef offsetof79 #ifdef CONFIG_NEED_OFFSETOF 98 80 #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *) 0)->MEMBER) 99 81 #endif … … 103 85 (type *) ((char *) __mptr - offsetof(type, member));}) 104 86 #endif 87 88 /* Convert from a base type to a parent type, with compile time checking. */ 89 #ifdef __GNUC__ 90 #define DO_UPCAST(type, field, dev) ( __extension__ ( { \ 91 char __attribute__((unused)) offset_must_be_zero[ \ 92 -offsetof(type, field)]; \ 93 container_of(dev, type, field);})) 94 #else 95 #define DO_UPCAST(type, field, dev) container_of(dev, type, field) 96 #endif 97 98 #define typeof_field(type, field) typeof(((type *)0)->field) 99 #define type_check(t1,t2) ((t1*)0 - (t2*)0) 105 100 106 101 #ifndef MIN … … 116 111 117 112 #ifndef always_inline 118 #if (__GNUC__ < 3) || defined(__APPLE__) 119 #define always_inline inline 120 #else 121 #define always_inline __attribute__ (( always_inline )) __inline__ 113 #if !((__GNUC__ < 3) || defined(__APPLE__)) 122 114 #ifdef __OPTIMIZE__ 123 #define inline always_inline115 #define inline __attribute__ (( always_inline )) __inline__ 124 116 #endif 125 117 #endif … … 136 128 #ifndef VBOX 137 129 #define qemu_printf printf 138 #endif 130 #else /*VBOX*/ 131 #define qemu_printf RTLogPrintf 132 #endif /*VBOX*/ 139 133 140 134 #if defined (__GNUC__) && defined (__GNUC_MINOR__) … … 165 159 #endif /* !_WIN32 */ 166 160 #else /* VBOX */ 167 # define qemu_memalign(alignment, size) ( (alignment) <= PAGE_SIZE ? RTMemPageAlloc((size)) : NULL ) 161 # define qemu_memalign(alignment, size) ( (alignment) <= PAGE_SIZE ? RTMemPageAlloc((size)) : NULL ) 162 # define qemu_vfree(pv) RTMemPageFree(pv, missing_size_parameter) 163 # define qemu_vmalloc(cb) RTMemPageAlloc(cb) 168 164 #endif /* VBOX */ 169 165 -
trunk/src/recompiler/qemu-common.h
r36175 r37675 3 3 #define QEMU_COMMON_H 4 4 5 #include "config-host.h" 6 5 7 #ifdef VBOX 6 8 7 # include < string.h>8 # include <i nttypes.h>9 # include <iprt/string.h> 10 # include <iprt/types.h> 9 11 # include <iprt/ctype.h> 10 11 #define QEMU_NORETURN __attribute__ ((__noreturn__))12 12 13 13 void pstrcpy(char *buf, int buf_size, const char *str); 14 14 char *pstrcat(char *buf, int buf_size, const char *s); 15 # define snprintf RTStrPrintf 16 17 #define qemu_isalnum(c) RT_C_IS_ALNUM((unsigned char)(c)) 18 #define qemu_isalpha(c) RT_C_IS_ALPHA((unsigned char)(c)) 19 #define qemu_iscntrl(c) RT_C_IS_CNTRL((unsigned char)(c)) 20 #define qemu_isdigit(c) RT_C_IS_DIGIT((unsigned char)(c)) 21 #define qemu_isgraph(c) RT_C_IS_GRAPH((unsigned char)(c)) 22 #define qemu_islower(c) RT_C_IS_LOWER((unsigned char)(c)) 23 #define qemu_isprint(c) RT_C_IS_PRINT((unsigned char)(c)) 24 #define qemu_ispunct(c) RT_C_IS_PUNCT((unsigned char)(c)) 25 #define qemu_isspace(c) RT_C_IS_SPACE((unsigned char)(c)) 26 #define qemu_isupper(c) RT_C_IS_UPPER((unsigned char)(c)) 27 #define qemu_isxdigit(c) RT_C_IS_XDIGIT((unsigned char)(c)) 28 #define qemu_tolower(c) RT_C_TO_LOWER((unsigned char)(c)) 29 #define qemu_toupper(c) RT_C_TO_UPPER((unsigned char)(c)) 30 #define qemu_isascii(c) RT_C_IS_ASCII((unsigned char)(c)) 31 #define qemu_toascii(c) RT_C_TO_ASCII((unsigned char)(c)) 32 33 #define qemu_init_vcpu(env) do { } while (0) /* we don't need this :-) */ 34 15 # define snprintf RTStrPrintf 16 17 # define qemu_isalnum(c) RT_C_IS_ALNUM((unsigned char)(c)) 18 # define qemu_isalpha(c) RT_C_IS_ALPHA((unsigned char)(c)) 19 # define qemu_iscntrl(c) RT_C_IS_CNTRL((unsigned char)(c)) 20 # define qemu_isdigit(c) RT_C_IS_DIGIT((unsigned char)(c)) 21 # define qemu_isgraph(c) RT_C_IS_GRAPH((unsigned char)(c)) 22 # define qemu_islower(c) RT_C_IS_LOWER((unsigned char)(c)) 23 # define qemu_isprint(c) RT_C_IS_PRINT((unsigned char)(c)) 24 # define qemu_ispunct(c) RT_C_IS_PUNCT((unsigned char)(c)) 25 # define qemu_isspace(c) RT_C_IS_SPACE((unsigned char)(c)) 26 # define qemu_isupper(c) RT_C_IS_UPPER((unsigned char)(c)) 27 # define qemu_isxdigit(c) RT_C_IS_XDIGIT((unsigned char)(c)) 28 # define qemu_tolower(c) RT_C_TO_LOWER((unsigned char)(c)) 29 # define qemu_toupper(c) RT_C_TO_UPPER((unsigned char)(c)) 30 # define qemu_isascii(c) RT_C_IS_ASCII((unsigned char)(c)) 31 # define qemu_toascii(c) RT_C_TO_ASCII((unsigned char)(c)) 32 33 # define qemu_init_vcpu(env) do { } while (0) /* we don't need this :-) */ 34 35 # define QEMU_NORETURN __attribute__((__noreturn__)) 36 # ifdef CONFIG_GCC_ATTRIBUTE_WARN_UNUSED_RESULT 37 # define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) 38 # else 39 # define QEMU_WARN_UNUSED_RESULT 40 # endif 35 41 36 42 #else /* !VBOX */ … … 63 69 #include <sys/stat.h> 64 70 #include <assert.h> 65 #include "config-host.h"66 71 67 72 #ifndef O_LARGEFILE … … 71 76 #define O_BINARY 0 72 77 #endif 73 78 #ifndef MAP_ANONYMOUS 79 #define MAP_ANONYMOUS MAP_ANON 80 #endif 74 81 #ifndef ENOMEDIUM 75 82 #define ENOMEDIUM ENODEV 76 83 #endif 77 78 #ifndef HAVE_IOVEC 79 #define HAVE_IOVEC 84 #if !defined(ENOTSUP) 85 #define ENOTSUP 4096 86 #endif 87 88 #ifndef CONFIG_IOVEC 89 #define CONFIG_IOVEC 80 90 struct iovec { 81 91 void *iov_base; 82 92 size_t iov_len; 83 93 }; 94 /* 95 * Use the same value as Linux for now. 96 */ 97 #define IOV_MAX 1024 84 98 #else 85 99 #include <sys/uio.h> … … 89 103 #define fsync _commit 90 104 #define lseek _lseeki64 91 #define ENOTSUP 409692 105 extern int qemu_ftruncate64(int, int64_t); 93 106 #define ftruncate qemu_ftruncate64 94 95 107 96 108 static inline char *realpath(const char *path, char *resolved_path) … … 123 135 124 136 typedef void QEMUBHFunc(void *opaque); 137 138 void async_context_push(void); 139 void async_context_pop(void); 140 int get_async_context_id(void); 125 141 126 142 QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque); … … 136 152 void qemu_bh_delete(QEMUBH *bh); 137 153 int qemu_bh_poll(void); 154 void qemu_bh_update_timeout(int *timeout); 138 155 139 156 uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c); … … 150 167 time_t mktimegm(struct tm *tm); 151 168 int qemu_fls(int i); 169 int qemu_fdatasync(int fd); 170 171 /* path.c */ 172 void init_paths(const char *prefix); 173 const char *path(const char *pathname); 152 174 153 175 #define qemu_isalnum(c) isalnum((unsigned char)(c)) … … 177 199 178 200 201 void qemu_mutex_lock_iothread(void); 202 void qemu_mutex_unlock_iothread(void); 203 204 int qemu_open(const char *name, int flags, ...); 205 void qemu_set_cloexec(int fd); 206 207 #ifndef _WIN32 208 int qemu_pipe(int pipefd[2]); 209 #endif 210 179 211 /* Error handling. */ 180 212 … … 208 240 typedef TextConsole QEMUConsole; 209 241 typedef struct CharDriverState CharDriverState; 242 typedef struct MACAddr MACAddr; 210 243 typedef struct VLANState VLANState; 244 typedef struct VLANClientState VLANClientState; 211 245 typedef struct QEMUFile QEMUFile; 212 246 typedef struct i2c_bus i2c_bus; … … 214 248 typedef struct SMBusDevice SMBusDevice; 215 249 typedef struct QEMUTimer QEMUTimer; 250 typedef struct PCIHostState PCIHostState; 251 typedef struct PCIExpressHost PCIExpressHost; 216 252 typedef struct PCIBus PCIBus; 217 253 typedef struct PCIDevice PCIDevice; … … 255 291 void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov); 256 292 void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len); 293 void qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t size); 257 294 void qemu_iovec_destroy(QEMUIOVector *qiov); 258 295 void qemu_iovec_reset(QEMUIOVector *qiov); … … 263 300 typedef struct Monitor Monitor; 264 301 302 /* Convert a byte between binary and BCD. */ 303 static inline uint8_t to_bcd(uint8_t val) 304 { 305 return ((val / 10) << 4) | (val % 10); 306 } 307 308 static inline uint8_t from_bcd(uint8_t val) 309 { 310 return ((val >> 4) * 10) + (val & 0x0f); 311 } 312 265 313 #include "module.h" 266 314 -
trunk/src/recompiler/qemu-lock.h
r36175 r37675 33 33 than physical CPUs (the extreme case being a single CPU host) a spinlock 34 34 simply wastes CPU until the OS decides to preempt it. */ 35 #if defined( USE_NPTL)35 #if defined(CONFIG_USE_NPTL) 36 36 37 37 #include <pthread.h> -
trunk/src/recompiler/qemu-log.h
r36170 r37675 16 16 /* Returns true if qemu_log() will really write somewhere 17 17 */ 18 #ifndef VBOX 18 19 #define qemu_log_enabled() (logfile != NULL) 20 #else 21 # define qemu_log_enabled() LogIsEnabled() 22 #endif 19 23 20 24 /* Returns true if a bit is set in the current loglevel mask … … 23 27 24 28 29 25 30 /* Logging functions: */ 26 31 27 32 /* main logging function 28 33 */ 34 #ifndef VBOX 29 35 #define qemu_log(...) do { \ 30 36 if (logfile) \ 31 37 fprintf(logfile, ## __VA_ARGS__); \ 32 38 } while (0) 39 #else 40 # define qemu_log(...) Log((__VA_ARGS__)) 41 #endif 33 42 34 43 /* vfprintf-like logging function 35 44 */ 45 #ifndef VBOX 36 46 #define qemu_log_vprintf(fmt, va) do { \ 37 47 if (logfile) \ 38 48 vfprintf(logfile, fmt, va); \ 39 49 } while (0) 50 #else 51 # define qemu_log_vprintf(fmt, va) do { \ 52 if (LogIsEnabled()) \ 53 RTLogLoggerExV(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_1, LOG_GROUP, fmt, va); \ 54 } while (0) 55 #endif 40 56 41 57 /* log only if a bit is set on the current loglevel mask 42 58 */ 59 #ifndef VBOX 43 60 #define qemu_log_mask(b, ...) do { \ 44 61 if (loglevel & (b)) \ 45 62 fprintf(logfile, ## __VA_ARGS__); \ 46 63 } while (0) 64 #else 65 # define qemu_log_mask(b, ...) do { \ 66 if (loglevel & (b)) \ 67 Log((__VA_ARGS__)); \ 68 } while (0) 69 #endif 47 70 48 71 … … 52 75 53 76 /* cpu_dump_state() logging functions: */ 77 #ifndef VBOX 54 78 #define log_cpu_state(env, f) cpu_dump_state((env), logfile, fprintf, (f)); 79 #else 80 #define log_cpu_state(env, f) cpu_dump_state((env), NULL, NULL, (f)); 81 #endif 55 82 #define log_cpu_state_mask(b, env, f) do { \ 56 83 if (loglevel & (b)) log_cpu_state((env), (f)); \ … … 71 98 72 99 /* fflush() the log file */ 100 #ifndef VBOX 73 101 #define qemu_log_flush() fflush(logfile) 102 #else 103 # define qemu_log_flush() RTLogFlush(LOG_INSTANCE) 104 #endif 74 105 75 106 /* Close the log file */ 107 #ifndef VBOX 76 108 #define qemu_log_close() do { \ 77 109 fclose(logfile); \ 78 110 logfile = NULL; \ 79 111 } while (0) 112 #else 113 # define qemu_log_close() do { } while (0) 114 #endif 80 115 81 116 /* Set up a new log file */ 117 #ifndef VBOX 82 118 #define qemu_log_set_file(f) do { \ 83 119 logfile = (f); \ 84 120 } while (0) 121 #else 122 # define qemu_log_set_file(f) do { } while (0) 123 #endif 85 124 86 125 /* Set up a new log file, only if none is set */ 126 #ifndef VBOX 87 127 #define qemu_log_try_set_file(f) do { \ 88 128 if (!logfile) \ 89 129 logfile = (f); \ 90 130 } while (0) 131 #else 132 #define qemu_log_try_set_file(f) do { } while (0) 133 #endif 91 134 92 135 -
trunk/src/recompiler/softmmu_template.h
r36175 r37675 98 98 #endif 99 99 #endif /* SHIFT > 2 */ 100 #ifdef CONFIG_KQEMU101 env->last_io_time = cpu_get_time_fast();102 #endif103 100 return res; 104 101 } … … 252 249 #endif 253 250 #endif /* SHIFT > 2 */ 254 #ifdef CONFIG_KQEMU255 env->last_io_time = cpu_get_time_fast();256 #endif257 251 } 258 252 -
trunk/src/recompiler/target-i386/cpu.h
r36299 r37675 347 347 #define MSR_GSBASE 0xc0000101 348 348 #define MSR_KERNELGSBASE 0xc0000102 349 #define MSR_TSC_AUX 0xc0000103 349 350 350 351 #define MSR_VM_HSAVE_PA 0xc0010117 … … 557 558 } MMXReg; 558 559 559 #ifdef WORDS_BIGENDIAN560 #ifdef HOST_WORDS_BIGENDIAN 560 561 #define XMM_B(n) _b[15 - (n)] 561 562 #define XMM_W(n) _w[7 - (n)] … … 584 585 #define MMX_Q(n) q 585 586 587 typedef union { 588 #ifdef USE_X86LDOUBLE 589 CPU86_LDouble d __attribute__((aligned(16))); 590 #else 591 CPU86_LDouble d; 592 #endif 593 MMXReg mmx; 594 } FPReg; 595 596 typedef struct { 597 uint64_t base; 598 uint64_t mask; 599 } MTRRVar; 600 601 #define CPU_NB_REGS64 16 602 #define CPU_NB_REGS32 8 603 586 604 #ifdef TARGET_X86_64 587 #define CPU_NB_REGS 16605 #define CPU_NB_REGS CPU_NB_REGS64 588 606 #else 589 #define CPU_NB_REGS 8607 #define CPU_NB_REGS CPU_NB_REGS32 590 608 #endif 591 609 … … 617 635 618 636 target_ulong cr[5]; /* NOTE: cr1 is unused */ 619 uint64_t a20_mask;637 int32_t a20_mask; 620 638 621 639 /* FPU state */ 622 640 unsigned int fpstt; /* top of stack index */ 623 u nsigned int fpus;624 u nsigned int fpuc;641 uint16_t fpus; 642 uint16_t fpuc; 625 643 uint8_t fptags[8]; /* 0 = valid, 1 = empty */ 626 union { 627 #ifdef USE_X86LDOUBLE 628 CPU86_LDouble d __attribute__((aligned(16))); 629 #else 630 CPU86_LDouble d; 631 #endif 632 MMXReg mmx; 633 } fpregs[8]; 644 FPReg fpregs[8]; 634 645 635 646 /* emulator internal variables */ … … 678 689 target_ulong kernelgsbase; 679 690 #endif 691 uint64_t system_time_msr; 692 uint64_t wall_clock_msr; 680 693 681 694 uint64_t tsc; … … 733 746 uint64_t mtrr_fixed[11]; 734 747 uint64_t mtrr_deftype; 735 struct { 736 uint64_t base; 737 uint64_t mask; 738 } mtrr_var[8]; 739 740 #ifdef CONFIG_KQEMU 741 int kqemu_enabled; 742 int last_io_time; 743 #endif 748 MTRRVar mtrr_var[8]; 744 749 745 750 /* For KVM */ 746 uint64_t interrupt_bitmap[256 / 64];747 751 uint32_t mp_state; 752 int32_t exception_injected; 753 int32_t interrupt_injected; 754 uint8_t soft_interrupt; 755 uint8_t nmi_injected; 756 uint8_t nmi_pending; 757 uint8_t has_error_code; 758 uint32_t sipi_vector; 748 759 749 760 /* in order to simplify APIC support, we leave this pointer to the … … 754 765 uint64 mcg_status; 755 766 uint64 mcg_ctl; 756 uint64 *mce_banks; 767 uint64 mce_banks[MCE_BANKS_DEF*4]; 768 769 uint64_t tsc_aux; 770 771 /* vmstate */ 772 uint16_t fpus_vmstate; 773 uint16_t fptag_vmstate; 774 uint16_t fpregs_format_vmstate; 757 775 #else /* VBOX */ 758 776 … … 956 974 } 957 975 976 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, 977 target_ulong *base, unsigned int *limit, 978 unsigned int *flags); 979 958 980 /* wrapper, just in case memory mappings must be changed */ 959 981 static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) … … 987 1009 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 988 1010 int is_write, int mmu_idx, int is_softmmu); 1011 #define cpu_handle_mmu_fault cpu_x86_handle_mmu_fault 989 1012 void cpu_x86_set_a20(CPUX86State *env, int a20_state); 990 1013 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, … … 999 1022 static inline int hw_breakpoint_type(unsigned long dr7, int index) 1000 1023 { 1001 return (dr7 >> (DR7_TYPE_SHIFT + (index * 2))) & 3;1024 return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3; 1002 1025 } 1003 1026 1004 1027 static inline int hw_breakpoint_len(unsigned long dr7, int index) 1005 1028 { 1006 int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 2))) & 3);1029 int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3); 1007 1030 return (len == 2) ? 8 : len + 1; 1008 1031 } … … 1032 1055 #define X86_DUMP_FPU 0x0001 /* dump FPU state too */ 1033 1056 #define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */ 1034 1035 #ifdef CONFIG_KQEMU1036 static inline int cpu_get_time_fast(void)1037 {1038 int low, high;1039 asm volatile("rdtsc" : "=a" (low), "=d" (high));1040 return low;1041 }1042 #endif1043 1057 1044 1058 #ifdef VBOX … … 1070 1084 #define cpu_list x86_cpu_list 1071 1085 1072 #define CPU_SAVE_VERSION 1 01086 #define CPU_SAVE_VERSION 11 1073 1087 1074 1088 /* MMU modes definitions */ -
trunk/src/recompiler/target-i386/exec.h
r36175 r37675 44 44 #include "qemu-log.h" 45 45 46 #undef EAX 46 47 #define EAX (env->regs[R_EAX]) 48 #undef ECX 47 49 #define ECX (env->regs[R_ECX]) 50 #undef EDX 48 51 #define EDX (env->regs[R_EDX]) 52 #undef EBX 49 53 #define EBX (env->regs[R_EBX]) 54 #undef ESP 50 55 #define ESP (env->regs[R_ESP]) 56 #undef EBP 51 57 #define EBP (env->regs[R_EBP]) 58 #undef ESI 52 59 #define ESI (env->regs[R_ESI]) 60 #undef EDI 53 61 #define EDI (env->regs[R_EDI]) 62 #undef EIP 54 63 #define EIP (env->eip) 55 64 #define DF (env->df) … … 116 125 #define floatx_compare floatx80_compare 117 126 #define floatx_compare_quiet floatx80_compare_quiet 118 #ifdef VBOX119 #undef sin120 #undef cos121 #undef sqrt122 #undef pow123 #undef log124 #undef tan125 #undef atan2126 #undef floor127 #undef ceil128 #undef ldexp129 #endif /* !VBOX */130 #if !defined(VBOX) || !defined(_MSC_VER)131 #define sin sinl132 #define cos cosl133 #define sqrt sqrtl134 #define pow powl135 #define log logl136 #define tan tanl137 #define atan2 atan2l138 #define floor floorl139 #define ceil ceill140 #define ldexp ldexpl141 #endif142 127 #else 143 128 #define floatx_to_int32 float64_to_int32 … … 158 143 #endif 159 144 160 #ifdef VBOX161 extern CPU86_LDouble sin(CPU86_LDouble x);162 extern CPU86_LDouble cos(CPU86_LDouble x);163 extern CPU86_LDouble sqrt(CPU86_LDouble x);164 extern CPU86_LDouble pow(CPU86_LDouble, CPU86_LDouble);165 extern CPU86_LDouble log(CPU86_LDouble x);166 extern CPU86_LDouble tan(CPU86_LDouble x);167 extern CPU86_LDouble atan2(CPU86_LDouble, CPU86_LDouble);168 extern CPU86_LDouble floor(CPU86_LDouble x);169 extern CPU86_LDouble ceil(CPU86_LDouble x);170 #endif /* VBOX */171 172 145 #define RC_MASK 0xc00 173 146 #define RC_NEAR 0x000 … … 202 175 typedef union { 203 176 double d; 204 #if !defined( WORDS_BIGENDIAN) && !defined(__arm__)177 #if !defined(HOST_WORDS_BIGENDIAN) && !defined(__arm__) 205 178 struct { 206 179 uint32_t lower; -
trunk/src/recompiler/target-i386/helper.c
r36175 r37675 135 135 { 136 136 .name = "qemu64", 137 .level = 2,137 .level = 4, 138 138 .vendor1 = CPUID_VENDOR_AMD_1, 139 139 .vendor2 = CPUID_VENDOR_AMD_2, … … 147 147 /* this feature is needed for Solaris and isn't fully implemented */ 148 148 CPUID_PSE36, 149 .ext_features = CPUID_EXT_SSE3 ,149 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT, 150 150 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 151 151 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 152 .ext3_features = CPUID_EXT3_SVM, 152 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 153 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 153 154 .xlevel = 0x8000000A, 154 155 .model_id = "QEMU Virtual CPU version " QEMU_VERSION, … … 167 168 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 168 169 CPUID_PSE36, 169 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */170 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,170 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 171 CPUID_EXT_POPCNT, 171 172 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 172 173 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | … … 174 175 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 175 176 CPUID_EXT2_FFXSR, 176 /* Missing: CPUID_EXT3_ LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,177 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,177 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 178 CPUID_EXT3_CR8LEG, 178 179 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 179 180 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 180 .ext3_features = CPUID_EXT3_SVM, 181 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 182 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 181 183 .xlevel = 0x8000001A, 182 184 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" … … 199 201 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3, 200 202 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 201 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */203 .ext3_features = CPUID_EXT3_LAHF_LM, 202 204 .xlevel = 0x80000008, 203 205 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 204 206 }, 207 { 208 .name = "kvm64", 209 .level = 5, 210 .vendor1 = CPUID_VENDOR_INTEL_1, 211 .vendor2 = CPUID_VENDOR_INTEL_2, 212 .vendor3 = CPUID_VENDOR_INTEL_3, 213 .family = 15, 214 .model = 6, 215 .stepping = 1, 216 /* Missing: CPUID_VME, CPUID_HT */ 217 .features = PPRO_FEATURES | 218 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 219 CPUID_PSE36, 220 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 221 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16, 222 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 223 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 224 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 225 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 226 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 227 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 228 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 229 .ext3_features = 0, 230 .xlevel = 0x80000008, 231 .model_id = "Common KVM processor" 232 }, 205 233 #endif 206 234 { 207 235 .name = "qemu32", 208 .level = 2,236 .level = 4, 209 237 .family = 6, 210 238 .model = 3, 211 239 .stepping = 3, 212 240 .features = PPRO_FEATURES, 213 .ext_features = CPUID_EXT_SSE3 ,241 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT, 214 242 .xlevel = 0, 215 243 .model_id = "QEMU Virtual CPU version " QEMU_VERSION, … … 365 393 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0; 366 394 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0; 367 int family = -1, model = -1, stepping = -1;395 uint32_t numvalue; 368 396 369 397 def = NULL; … … 397 425 if (!strcmp(featurestr, "family")) { 398 426 char *err; 399 family = strtol(val, &err, 10);400 if (!*val || *err || family < 0) {427 numvalue = strtoul(val, &err, 0); 428 if (!*val || *err) { 401 429 fprintf(stderr, "bad numerical value %s\n", val); 402 430 goto error; 403 431 } 404 x86_cpu_def->family = family;432 x86_cpu_def->family = numvalue; 405 433 } else if (!strcmp(featurestr, "model")) { 406 434 char *err; 407 model = strtol(val, &err, 10);408 if (!*val || *err || model < 0 || model> 0xff) {435 numvalue = strtoul(val, &err, 0); 436 if (!*val || *err || numvalue > 0xff) { 409 437 fprintf(stderr, "bad numerical value %s\n", val); 410 438 goto error; 411 439 } 412 x86_cpu_def->model = model;440 x86_cpu_def->model = numvalue; 413 441 } else if (!strcmp(featurestr, "stepping")) { 414 442 char *err; 415 stepping = strtol(val, &err, 10);416 if (!*val || *err || stepping < 0 || stepping> 0xf) {443 numvalue = strtoul(val, &err, 0); 444 if (!*val || *err || numvalue > 0xf) { 417 445 fprintf(stderr, "bad numerical value %s\n", val); 418 446 goto error; 419 447 } 420 x86_cpu_def->stepping = stepping; 448 x86_cpu_def->stepping = numvalue ; 449 } else if (!strcmp(featurestr, "level")) { 450 char *err; 451 numvalue = strtoul(val, &err, 0); 452 if (!*val || *err) { 453 fprintf(stderr, "bad numerical value %s\n", val); 454 goto error; 455 } 456 x86_cpu_def->level = numvalue; 457 } else if (!strcmp(featurestr, "xlevel")) { 458 char *err; 459 numvalue = strtoul(val, &err, 0); 460 if (!*val || *err) { 461 fprintf(stderr, "bad numerical value %s\n", val); 462 goto error; 463 } 464 if (numvalue < 0x80000000) { 465 numvalue += 0x80000000; 466 } 467 x86_cpu_def->xlevel = numvalue; 421 468 } else if (!strcmp(featurestr, "vendor")) { 422 469 if (strlen(val) != 12) { … … 594 641 cpu_breakpoint_remove_all(env, BP_CPU); 595 642 cpu_watchpoint_remove_all(env, BP_CPU); 643 644 #ifndef VBOX 645 env->mcg_status = 0; 646 #endif 596 647 } 597 648 … … 666 717 const char *name, struct SegmentCache *sc) 667 718 { 719 #ifdef VBOX 720 # define cpu_fprintf(f, ...) RTLogPrintf(__VA_ARGS__) 721 #endif 668 722 #ifdef TARGET_X86_64 669 723 if (env->hflags & HF_CS64_MASK) { … … 714 768 done: 715 769 cpu_fprintf(f, "\n"); 770 #ifdef VBOX 771 # undef cpu_fprintf 772 #endif 716 773 } 717 774 … … 724 781 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" }; 725 782 726 if (kvm_enabled()) 727 kvm_arch_get_registers(env); 783 #ifdef VBOX 784 # define cpu_fprintf(f, ...) RTLogPrintf(__VA_ARGS__) 785 #endif 786 cpu_synchronize_state(env); 728 787 729 788 eflags = env->eflags; … … 762 821 env->hflags & HF_CPL_MASK, 763 822 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, 764 ( int)(env->a20_mask >> 20) & 1,823 (env->a20_mask >> 20) & 1, 765 824 (env->hflags >> HF_SMM_SHIFT) & 1, 766 825 env->halted); … … 789 848 env->hflags & HF_CPL_MASK, 790 849 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, 791 ( int)(env->a20_mask >> 20) & 1,850 (env->a20_mask >> 20) & 1, 792 851 (env->hflags >> HF_SMM_SHIFT) & 1, 793 852 env->halted); … … 900 959 } 901 960 } 961 #ifdef VBOX 962 # undef cpu_fprintf 963 #endif 902 964 } 903 965 … … 920 982 we must flush everything */ 921 983 tlb_flush(env, 1); 922 env->a20_mask = (~0x100000) | (a20_state << 20);984 env->a20_mask = ~(1 << 20) | (a20_state << 20); 923 985 } 924 986 } … … 1028 1090 /* XXX: This value should match the one returned by CPUID 1029 1091 * and in exec.c */ 1030 #if defined(CONFIG_KQEMU)1031 #define PHYS_ADDR_MASK 0xfffff000LL1032 #else1033 1092 # if defined(TARGET_X86_64) 1034 1093 # define PHYS_ADDR_MASK 0xfffffff000LL … … 1036 1095 # define PHYS_ADDR_MASK 0xffffff000LL 1037 1096 # endif 1038 #endif1039 1097 1040 1098 /* return value: … … 1514 1572 } 1515 1573 } else { 1516 TAILQ_FOREACH(bp, &env->breakpoints, entry)1574 QTAILQ_FOREACH(bp, &env->breakpoints, entry) 1517 1575 if (bp->pc == env->eip) { 1518 1576 if (bp->flags & BP_CPU) { … … 1595 1653 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF; 1596 1654 cenv->mcg_ctl = ~(uint64_t)0; 1597 bank_num = cenv->mcg_cap & 0xff; 1598 cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4); 1655 bank_num = MCE_BANKS_DEF; 1599 1656 for (bank = 0; bank < bank_num; bank++) 1600 1657 cenv->mce_banks[bank*4] = ~(uint64_t)0; … … 1637 1694 } 1638 1695 1696 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx, 1697 uint32_t *ecx, uint32_t *edx) 1698 { 1699 *ebx = env->cpuid_vendor1; 1700 *edx = env->cpuid_vendor2; 1701 *ecx = env->cpuid_vendor3; 1702 1703 /* sysenter isn't supported on compatibility mode on AMD, syscall 1704 * isn't supported in compatibility mode on Intel. 1705 * Normally we advertise the actual cpu vendor, but you can override 1706 * this if you want to use KVM's sysenter/syscall emulation 1707 * in compatibility mode and when doing cross vendor migration 1708 */ 1709 if (kvm_enabled() && env->cpuid_vendor_override) { 1710 host_cpuid(0, 0, NULL, ebx, ecx, edx); 1711 } 1712 } 1713 1639 1714 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 1640 1715 uint32_t *eax, uint32_t *ebx, … … 1653 1728 case 0: 1654 1729 *eax = env->cpuid_level; 1655 *ebx = env->cpuid_vendor1; 1656 *edx = env->cpuid_vendor2; 1657 *ecx = env->cpuid_vendor3; 1658 1659 /* sysenter isn't supported on compatibility mode on AMD. and syscall 1660 * isn't supported in compatibility mode on Intel. so advertise the 1661 * actuall cpu, and say goodbye to migration between different vendors 1662 * is you use compatibility mode. */ 1663 if (kvm_enabled() && !env->cpuid_vendor_override) 1664 host_cpuid(0, 0, NULL, ebx, ecx, edx); 1730 get_cpuid_vendor(env, ebx, ecx, edx); 1665 1731 break; 1666 1732 case 1: … … 1669 1735 *ecx = env->cpuid_ext_features; 1670 1736 *edx = env->cpuid_features; 1737 if (env->nr_cores * env->nr_threads > 1) { 1738 *ebx |= (env->nr_cores * env->nr_threads) << 16; 1739 *edx |= 1 << 28; /* HTT bit */ 1740 } 1671 1741 break; 1672 1742 case 2: … … 1679 1749 case 4: 1680 1750 /* cache info: needed for Core compatibility */ 1751 if (env->nr_cores > 1) { 1752 *eax = (env->nr_cores - 1) << 26; 1753 } else { 1754 *eax = 0; 1755 } 1681 1756 switch (count) { 1682 1757 case 0: /* L1 dcache info */ 1683 *eax = 0x0000121;1758 *eax |= 0x0000121; 1684 1759 *ebx = 0x1c0003f; 1685 1760 *ecx = 0x000003f; … … 1687 1762 break; 1688 1763 case 1: /* L1 icache info */ 1689 *eax = 0x0000122;1764 *eax |= 0x0000122; 1690 1765 *ebx = 0x1c0003f; 1691 1766 *ecx = 0x000003f; … … 1693 1768 break; 1694 1769 case 2: /* L2 cache info */ 1695 *eax = 0x0000143; 1770 *eax |= 0x0000143; 1771 if (env->nr_threads > 1) { 1772 *eax |= (env->nr_threads - 1) << 14; 1773 } 1696 1774 *ebx = 0x3c0003f; 1697 1775 *ecx = 0x0000fff; … … 1746 1824 *edx = env->cpuid_ext2_features; 1747 1825 1826 /* The Linux kernel checks for the CMPLegacy bit and 1827 * discards multiple thread information if it is set. 1828 * So dont set it here for Intel to make Linux guests happy. 1829 */ 1830 if (env->nr_cores * env->nr_threads > 1) { 1831 uint32_t tebx, tecx, tedx; 1832 get_cpuid_vendor(env, &tebx, &tecx, &tedx); 1833 if (tebx != CPUID_VENDOR_INTEL_1 || 1834 tedx != CPUID_VENDOR_INTEL_2 || 1835 tecx != CPUID_VENDOR_INTEL_3) { 1836 *ecx |= 1 << 1; /* CmpLegacy bit */ 1837 } 1838 } 1839 1748 1840 if (kvm_enabled()) { 1749 /* Nested SVM not yet supported in KVM*/1841 /* Nested SVM not yet supported in upstream QEMU */ 1750 1842 *ecx &= ~CPUID_EXT3_SVM; 1751 } else {1752 /* AMD 3DNow! is not supported in QEMU */1753 *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT);1754 1843 } 1755 1844 break; … … 1781 1870 if (env->cpuid_ext2_features & CPUID_EXT2_LM) { 1782 1871 /* 64 bit processor */ 1783 #if defined(CONFIG_KQEMU)1784 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */1785 #else1786 1872 /* XXX: The physical address space is limited to 42 bits in exec.c. */ 1787 1873 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */ 1788 #endif1789 1874 } else { 1790 #if defined(CONFIG_KQEMU)1791 *eax = 0x00000020; /* 32 bits physical */1792 #else1793 1875 if (env->cpuid_features & CPUID_PSE36) 1794 1876 *eax = 0x00000024; /* 36 bits physical */ 1795 1877 else 1796 1878 *eax = 0x00000020; /* 32 bits physical */ 1797 #endif1798 1879 } 1799 1880 *ebx = 0; 1800 1881 *ecx = 0; 1801 1882 *edx = 0; 1883 if (env->nr_cores * env->nr_threads > 1) { 1884 *ecx |= (env->nr_cores * env->nr_threads) - 1; 1885 } 1802 1886 break; 1803 1887 case 0x8000000A: … … 1816 1900 } 1817 1901 } 1902 1818 1903 1819 1904 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, … … 1881 1966 mce_init(env); 1882 1967 #endif 1883 cpu_reset(env);1884 #ifdef CONFIG_KQEMU1885 kqemu_init(env);1886 #endif1887 1968 1888 1969 qemu_init_vcpu(env); -
trunk/src/recompiler/target-i386/helper.h
r36175 r37675 81 81 DEF_HELPER_0(cpuid, void) 82 82 DEF_HELPER_0(rdtsc, void) 83 DEF_HELPER_0(rdtscp, void) 83 84 DEF_HELPER_0(rdpmc, void) 84 85 DEF_HELPER_0(rdmsr, void) … … 209 210 DEF_HELPER_1(bsf, tl, tl) 210 211 DEF_HELPER_1(bsr, tl, tl) 212 DEF_HELPER_2(lzcnt, tl, tl, int) 211 213 212 214 /* MMX/SSE */ … … 237 239 DEF_HELPER_0(cli_vme, void) 238 240 DEF_HELPER_0(sti_vme, void) 239 DEF_HELPER_0(rdtscp, void)240 241 DEF_HELPER_0(check_external_event, void) 241 242 DEF_HELPER_0(dump_state, void) -
trunk/src/recompiler/target-i386/op_helper.c
r36765 r37675 675 675 676 676 #ifdef VBOX 677 677 678 /* Keep in sync with gen_check_external_event() */ 678 679 void helper_check_external_event() … … 695 696 sync_seg(env, reg, env->segs[reg].newselector); 696 697 } 698 697 699 #endif /* VBOX */ 698 700 … … 714 716 void helper_outb(uint32_t port, uint32_t data) 715 717 { 718 #ifndef VBOX 719 cpu_outb(port, data & 0xff); 720 #else 716 721 cpu_outb(env, port, data & 0xff); 722 #endif 717 723 } 718 724 719 725 target_ulong helper_inb(uint32_t port) 720 726 { 727 #ifndef VBOX 728 return cpu_inb(port); 729 #else 721 730 return cpu_inb(env, port); 731 #endif 722 732 } 723 733 724 734 void helper_outw(uint32_t port, uint32_t data) 725 735 { 736 #ifndef VBOX 737 cpu_outw(port, data & 0xffff); 738 #else 726 739 cpu_outw(env, port, data & 0xffff); 740 #endif 727 741 } 728 742 729 743 target_ulong helper_inw(uint32_t port) 730 744 { 745 #ifndef VBOX 746 return cpu_inw(port); 747 #else 731 748 return cpu_inw(env, port); 749 #endif 732 750 } 733 751 734 752 void helper_outl(uint32_t port, uint32_t data) 735 753 { 754 #ifndef VBOX 755 cpu_outl(port, data); 756 #else 736 757 cpu_outl(env, port, data); 758 #endif 737 759 } 738 760 739 761 target_ulong helper_inl(uint32_t port) 740 762 { 763 #ifndef VBOX 764 return cpu_inl(port); 765 #else 741 766 return cpu_inl(env, port); 767 #endif 742 768 } 743 769 … … 1406 1432 cpu_x86_set_cpl(env, 3); 1407 1433 } 1408 #ifdef CONFIG_KQEMU1409 if (kqemu_is_ok(env)) {1410 if (env->hflags & HF_LMA_MASK)1411 CC_OP = CC_OP_EFLAGS;1412 env->exception_index = -1;1413 cpu_loop_exit();1414 }1415 #endif1416 1434 } 1417 1435 #endif 1418 1436 1419 1437 #ifdef VBOX 1438 1420 1439 /** 1421 1440 * Checks and processes external VMM events. … … 1459 1478 } 1460 1479 } 1480 1461 1481 /* helper for recording call instruction addresses for later scanning */ 1462 1482 void helper_record_call() … … 1467 1487 remR3RecordCall(env); 1468 1488 } 1489 1469 1490 #endif /* VBOX */ 1470 1491 … … 2945 2966 EIP = offset; 2946 2967 } 2947 #ifdef CONFIG_KQEMU2948 if (kqemu_is_ok(env)) {2949 env->exception_index = -1;2950 cpu_loop_exit();2951 }2952 #endif2953 2968 } 2954 2969 … … 3094 3109 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0)) 3095 3110 { 3096 # ifdef DEBUG3111 # ifdef DEBUG 3097 3112 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc); 3098 # endif3113 # endif 3099 3114 new_cs = new_cs & 0xfffc; 3100 3115 } … … 3326 3341 } 3327 3342 env->hflags2 &= ~HF2_NMI_MASK; 3328 #ifdef CONFIG_KQEMU3329 if (kqemu_is_ok(env)) {3330 CC_OP = CC_OP_EFLAGS;3331 env->exception_index = -1;3332 cpu_loop_exit();3333 }3334 #endif3335 3343 } 3336 3344 … … 3338 3346 { 3339 3347 helper_ret_protected(shift, 0, addend); 3340 #ifdef CONFIG_KQEMU3341 if (kqemu_is_ok(env)) {3342 env->exception_index = -1;3343 cpu_loop_exit();3344 }3345 #endif3346 3348 } 3347 3349 … … 3416 3418 ESP = ECX; 3417 3419 EIP = EDX; 3418 #ifdef CONFIG_KQEMU3419 if (kqemu_is_ok(env)) {3420 env->exception_index = -1;3421 cpu_loop_exit();3422 }3423 #endif3424 3420 } 3425 3421 … … 3536 3532 } 3537 3533 3538 #ifdef VBOX3539 3534 void helper_rdtscp(void) 3540 3535 { 3536 #ifndef VBOX 3537 helper_rdtsc(); 3538 ECX = (uint32_t)(env->tsc_aux); 3539 #else 3541 3540 uint64_t val; 3542 3541 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { … … 3551 3550 else 3552 3551 ECX = 0; 3553 }3554 3552 #endif /* VBOX */ 3553 } 3555 3554 3556 3555 void helper_rdpmc(void) … … 3705 3704 env->mcg_ctl = val; 3706 3705 break; 3706 case MSR_TSC_AUX: 3707 env->tsc_aux = val; 3708 break; 3707 3709 # endif /* !VBOX */ 3708 3710 default: … … 3788 3790 val = env->kernelgsbase; 3789 3791 break; 3790 #endif 3791 #ifdef CONFIG_KQEMU 3792 case MSR_QPI_COMMBASE: 3793 if (env->kqemu_enabled) { 3794 val = kqemu_comm_base; 3795 } else { 3796 val = 0; 3797 } 3798 break; 3792 # ifndef VBOX 3793 case MSR_TSC_AUX: 3794 val = env->tsc_aux; 3795 break; 3796 # endif /*!VBOX*/ 3799 3797 #endif 3800 3798 # ifndef VBOX … … 5010 5008 target_ulong addr; 5011 5009 5010 /* The operand must be 16 byte aligned */ 5011 if (ptr & 0xf) { 5012 raise_exception(EXCP0D_GPF); 5013 } 5014 5012 5015 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; 5013 5016 fptag = 0; … … 5065 5068 CPU86_LDouble tmp; 5066 5069 target_ulong addr; 5070 5071 /* The operand must be 16 byte aligned */ 5072 if (ptr & 0xf) { 5073 raise_exception(EXCP0D_GPF); 5074 } 5067 5075 5068 5076 env->fpuc = lduw(ptr); … … 6732 6740 } 6733 6741 6734 target_ulong helper_ bsr(target_ulong t0)6742 target_ulong helper_lzcnt(target_ulong t0, int wordsize) 6735 6743 { 6736 6744 int count; 6737 6745 target_ulong res, mask; 6738 6746 6747 if (wordsize > 0 && t0 == 0) { 6748 return wordsize; 6749 } 6739 6750 res = t0; 6740 6751 count = TARGET_LONG_BITS - 1; … … 6744 6755 res <<= 1; 6745 6756 } 6757 if (wordsize > 0) { 6758 return wordsize - 1 - count; 6759 } 6746 6760 return count; 6747 6761 } 6748 6762 6763 target_ulong helper_bsr(target_ulong t0) 6764 { 6765 return helper_lzcnt(t0, 0); 6766 } 6749 6767 6750 6768 static int compute_all_eflags(void) -
trunk/src/recompiler/target-i386/ops_sse.h
r36175 r37675 811 811 { 812 812 d->XMM_S(0) = approx_rcp(s->XMM_S(0)); 813 } 814 815 static inline uint64_t helper_extrq(uint64_t src, int shift, int len) 816 { 817 uint64_t mask; 818 819 if (len == 0) { 820 mask = ~0LL; 821 } else { 822 mask = (1ULL << len) - 1; 823 } 824 return (src >> shift) & mask; 825 } 826 827 void helper_extrq_r(XMMReg *d, XMMReg *s) 828 { 829 d->XMM_Q(0) = helper_extrq(d->XMM_Q(0), s->XMM_B(1), s->XMM_B(0)); 830 } 831 832 void helper_extrq_i(XMMReg *d, int index, int length) 833 { 834 d->XMM_Q(0) = helper_extrq(d->XMM_Q(0), index, length); 835 } 836 837 static inline uint64_t helper_insertq(uint64_t src, int shift, int len) 838 { 839 uint64_t mask; 840 841 if (len == 0) { 842 mask = ~0ULL; 843 } else { 844 mask = (1ULL << len) - 1; 845 } 846 return (src & ~(mask << shift)) | ((src & mask) << shift); 847 } 848 849 void helper_insertq_r(XMMReg *d, XMMReg *s) 850 { 851 d->XMM_Q(0) = helper_insertq(s->XMM_Q(0), s->XMM_B(9), s->XMM_B(8)); 852 } 853 854 void helper_insertq_i(XMMReg *d, int index, int length) 855 { 856 d->XMM_Q(0) = helper_insertq(d->XMM_Q(0), index, length); 813 857 } 814 858 … … 906 950 SSE_HELPER_CMP(cmpord, FPU_CMPORD) 907 951 908 const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};952 static const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; 909 953 910 954 void helper_ucomiss(Reg *d, Reg *s) -
trunk/src/recompiler/target-i386/ops_sse_header.h
r36175 r37675 198 198 DEF_HELPER_2(rcpps, void, XMMReg, XMMReg) 199 199 DEF_HELPER_2(rcpss, void, XMMReg, XMMReg) 200 DEF_HELPER_2(extrq_r, void, XMMReg, XMMReg) 201 DEF_HELPER_3(extrq_i, void, XMMReg, int, int) 202 DEF_HELPER_2(insertq_r, void, XMMReg, XMMReg) 203 DEF_HELPER_3(insertq_i, void, XMMReg, int, int) 200 204 DEF_HELPER_2(haddps, void, XMMReg, XMMReg) 201 205 DEF_HELPER_2(haddpd, void, XMMReg, XMMReg) -
trunk/src/recompiler/target-i386/translate.c
r36266 r37675 75 75 static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp; 76 76 static TCGv_i32 cpu_cc_op; 77 static TCGv cpu_regs[CPU_NB_REGS]; 77 78 /* local temps */ 78 79 static TCGv cpu_T[2], cpu_T3; … … 82 83 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32; 83 84 static TCGv_i64 cpu_tmp1_i64; 84 static TCGv cpu_tmp5 , cpu_tmp6;85 static TCGv cpu_tmp5; 85 86 86 87 #include "gen-icount.h" … … 306 307 #endif /* !TARGET_X86_64 */ 307 308 308 #if defined( WORDS_BIGENDIAN)309 #if defined(HOST_WORDS_BIGENDIAN) 309 310 #define REG_B_OFFSET (sizeof(target_ulong) - 1) 310 311 #define REG_H_OFFSET (sizeof(target_ulong) - 2) … … 322 323 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0) 323 324 { 325 TCGv tmp; 326 327 switch(ot) { 328 case OT_BYTE: 329 tmp = tcg_temp_new(); 330 tcg_gen_ext8u_tl(tmp, t0); 331 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) { 332 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xff); 333 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp); 334 } else { 335 tcg_gen_shli_tl(tmp, tmp, 8); 336 tcg_gen_andi_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], ~0xff00); 337 tcg_gen_or_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], tmp); 338 } 339 tcg_temp_free(tmp); 340 break; 341 case OT_WORD: 342 tmp = tcg_temp_new(); 343 tcg_gen_ext16u_tl(tmp, t0); 344 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff); 345 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp); 346 tcg_temp_free(tmp); 347 break; 348 default: /* XXX this shouldn't be reached; abort? */ 349 case OT_LONG: 350 /* For x86_64, this sets the higher half of register to zero. 351 For i386, this is equivalent to a mov. */ 352 tcg_gen_ext32u_tl(cpu_regs[reg], t0); 353 break; 354 #ifdef TARGET_X86_64 355 case OT_QUAD: 356 tcg_gen_mov_tl(cpu_regs[reg], t0); 357 break; 358 #endif 359 } 360 } 361 362 static inline void gen_op_mov_reg_T0(int ot, int reg) 363 { 364 gen_op_mov_reg_v(ot, reg, cpu_T[0]); 365 } 366 367 static inline void gen_op_mov_reg_T1(int ot, int reg) 368 { 369 gen_op_mov_reg_v(ot, reg, cpu_T[1]); 370 } 371 372 static inline void gen_op_mov_reg_A0(int size, int reg) 373 { 374 TCGv tmp; 375 376 switch(size) { 377 case 0: 378 tmp = tcg_temp_new(); 379 tcg_gen_ext16u_tl(tmp, cpu_A0); 380 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff); 381 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp); 382 tcg_temp_free(tmp); 383 break; 384 default: /* XXX this shouldn't be reached; abort? */ 385 case 1: 386 /* For x86_64, this sets the higher half of register to zero. 387 For i386, this is equivalent to a mov. */ 388 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0); 389 break; 390 #ifdef TARGET_X86_64 391 case 2: 392 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0); 393 break; 394 #endif 395 } 396 } 397 398 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg) 399 { 324 400 switch(ot) { 325 401 case OT_BYTE: 326 402 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) { 327 tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_B_OFFSET);403 goto std_case; 328 404 } else { 329 tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET); 330 } 331 break; 332 case OT_WORD: 333 tcg_gen_st16_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET); 334 break; 335 #ifdef TARGET_X86_64 336 case OT_LONG: 337 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); 338 /* high part of register set to zero */ 339 tcg_gen_movi_tl(cpu_tmp0, 0); 340 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET); 405 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8); 406 tcg_gen_ext8u_tl(t0, t0); 407 } 341 408 break; 342 409 default: 343 case OT_QUAD: 344 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, regs[reg])); 345 break; 346 #else 347 default: 348 case OT_LONG: 349 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); 350 break; 351 #endif 410 std_case: 411 tcg_gen_mov_tl(t0, cpu_regs[reg]); 412 break; 352 413 } 353 414 } 354 415 355 static inline void gen_op_mov_reg_T0(int ot, int reg)356 {357 gen_op_mov_reg_v(ot, reg, cpu_T[0]);358 }359 360 static inline void gen_op_mov_reg_T1(int ot, int reg)361 {362 gen_op_mov_reg_v(ot, reg, cpu_T[1]);363 }364 365 static inline void gen_op_mov_reg_A0(int size, int reg)366 {367 switch(size) {368 case 0:369 tcg_gen_st16_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);370 break;371 #ifdef TARGET_X86_64372 case 1:373 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);374 /* high part of register set to zero */375 tcg_gen_movi_tl(cpu_tmp0, 0);376 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);377 break;378 default:379 case 2:380 tcg_gen_st_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));381 break;382 #else383 default:384 case 1:385 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);386 break;387 #endif388 }389 }390 391 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)392 {393 switch(ot) {394 case OT_BYTE:395 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {396 #ifndef VBOX397 goto std_case;398 #else399 tcg_gen_ld8u_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_B_OFFSET);400 #endif401 } else {402 tcg_gen_ld8u_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);403 }404 break;405 default:406 #ifndef VBOX407 std_case:408 #endif409 tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));410 break;411 }412 }413 414 416 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg) 415 417 { … … 419 421 static inline void gen_op_movl_A0_reg(int reg) 420 422 { 421 tcg_gen_ ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);423 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]); 422 424 } 423 425 … … 461 463 switch(size) { 462 464 case 0: 463 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 464 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val); 465 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET); 465 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val); 466 tcg_gen_ext16u_tl(cpu_tmp0, cpu_tmp0); 467 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff); 468 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0); 466 469 break; 467 470 case 1: 468 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 469 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val); 470 #ifdef TARGET_X86_64 471 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff); 472 #endif 473 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 471 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val); 472 /* For x86_64, this sets the higher half of register to zero. 473 For i386, this is equivalent to a nop. */ 474 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0); 475 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0); 474 476 break; 475 477 #ifdef TARGET_X86_64 476 478 case 2: 477 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 478 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val); 479 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 479 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val); 480 480 break; 481 481 #endif … … 487 487 switch(size) { 488 488 case 0: 489 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 490 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]); 491 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET); 489 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]); 490 tcg_gen_ext16u_tl(cpu_tmp0, cpu_tmp0); 491 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff); 492 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0); 492 493 break; 493 494 case 1: 494 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 495 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]); 496 #ifdef TARGET_X86_64 497 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff); 498 #endif 499 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 495 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]); 496 /* For x86_64, this sets the higher half of register to zero. 497 For i386, this is equivalent to a nop. */ 498 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0); 499 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0); 500 500 break; 501 501 #ifdef TARGET_X86_64 502 502 case 2: 503 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 504 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]); 505 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 503 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]); 506 504 break; 507 505 #endif … … 516 514 static inline void gen_op_addl_A0_reg_sN(int shift, int reg) 517 515 { 518 tcg_gen_ ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));516 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]); 519 517 if (shift != 0) 520 518 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift); 521 519 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); 522 #ifdef TARGET_X86_64 523 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);524 #endif 520 /* For x86_64, this sets the higher half of register to zero. 521 For i386, this is equivalent to a nop. */ 522 tcg_gen_ext32u_tl(cpu_A0, cpu_A0); 525 523 } 526 524 … … 620 618 static inline void gen_op_movq_A0_reg(int reg) 621 619 { 622 tcg_gen_ ld_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));620 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]); 623 621 } 624 622 625 623 static inline void gen_op_addq_A0_reg_sN(int shift, int reg) 626 624 { 627 tcg_gen_ ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));625 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]); 628 626 if (shift != 0) 629 627 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift); … … 877 875 static inline void gen_op_jnz_ecx(int size, int label1) 878 876 { 879 tcg_gen_ ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));877 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]); 880 878 gen_extu(size + 1, cpu_tmp0); 881 879 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1); … … 884 882 static inline void gen_op_jz_ecx(int size, int label1) 885 883 { 886 tcg_gen_ ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));884 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]); 887 885 gen_extu(size + 1, cpu_tmp0); 888 886 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1); … … 1773 1771 if (is_right) { 1774 1772 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0); 1775 tcg_gen_sub _tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);1773 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0); 1776 1774 tcg_gen_shl_tl(t0, t0, cpu_tmp0); 1777 1775 } else { 1778 1776 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0); 1779 tcg_gen_sub _tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);1777 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0); 1780 1778 tcg_gen_shr_tl(t0, t0, cpu_tmp0); 1781 1779 } … … 1998 1996 1999 1997 /* only needed if count > 16, but a test would complicate */ 2000 tcg_gen_sub _tl(cpu_tmp5, tcg_const_tl(32), t2);1998 tcg_gen_subfi_tl(cpu_tmp5, 32, t2); 2001 1999 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5); 2002 2000 … … 2012 2010 2013 2011 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5); 2014 tcg_gen_sub _tl(cpu_tmp0, tcg_const_tl(32), cpu_tmp5);2015 tcg_gen_shr_tl(cpu_tmp 6, t1, cpu_tmp0);2016 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp 6);2012 tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5); 2013 tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0); 2014 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5); 2017 2015 2018 2016 tcg_gen_shl_tl(t0, t0, t2); 2019 tcg_gen_sub _tl(cpu_tmp5, tcg_const_tl(32), t2);2017 tcg_gen_subfi_tl(cpu_tmp5, 32, t2); 2020 2018 tcg_gen_shr_tl(t1, t1, cpu_tmp5); 2021 2019 tcg_gen_or_tl(t0, t0, t1); … … 2030 2028 2031 2029 tcg_gen_shr_tl(t0, t0, t2); 2032 tcg_gen_sub _tl(cpu_tmp5, tcg_const_tl(data_bits), t2);2030 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2); 2033 2031 tcg_gen_shl_tl(t1, t1, cpu_tmp5); 2034 2032 tcg_gen_or_tl(t0, t0, t1); … … 2041 2039 2042 2040 tcg_gen_shl_tl(t0, t0, t2); 2043 tcg_gen_sub _tl(cpu_tmp5, tcg_const_tl(data_bits), t2);2041 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2); 2044 2042 tcg_gen_shr_tl(t1, t1, cpu_tmp5); 2045 2043 tcg_gen_or_tl(t0, t0, t1); … … 2223 2221 } 2224 2222 } 2225 /* XXX: index == 4 is always invalid*/2226 if (havesib && (index != 4 || scale != 0)) {2223 /* index == 4 means no index */ 2224 if (havesib && (index != 4)) { 2227 2225 #ifdef TARGET_X86_64 2228 2226 if (s->aflag == 2) { … … 3021 3019 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ 3022 3020 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */ 3023 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd */3021 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */ 3024 3022 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */ 3025 3023 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */ … … 3078 3076 [0x76] = MMX_OP2(pcmpeql), 3079 3077 [0x77] = { SSE_DUMMY }, /* emms */ 3078 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */ 3079 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r }, 3080 3080 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps }, 3081 3081 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps }, … … 3358 3358 case 0x02b: /* movntps */ 3359 3359 case 0x12b: /* movntps */ 3360 if (mod == 3) 3361 goto illegal_op; 3362 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3363 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); 3364 break; 3360 3365 case 0x3f0: /* lddqu */ 3361 3366 if (mod == 3) 3362 3367 goto illegal_op; 3363 3368 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3364 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); 3369 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); 3370 break; 3371 case 0x22b: /* movntss */ 3372 case 0x32b: /* movntsd */ 3373 if (mod == 3) 3374 goto illegal_op; 3375 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3376 if (b1 & 1) { 3377 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State, 3378 xmm_regs[reg])); 3379 } else { 3380 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, 3381 xmm_regs[reg].XMM_L(0))); 3382 gen_op_st_T0_A0(OT_LONG + s->mem_index); 3383 } 3365 3384 break; 3366 3385 case 0x6e: /* movd mm, ea */ … … 3518 3537 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)), 3519 3538 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); 3539 break; 3540 case 0x178: 3541 case 0x378: 3542 { 3543 int bit_index, field_length; 3544 3545 if (b1 == 1 && reg != 0) 3546 goto illegal_op; 3547 field_length = ldub_code(s->pc++) & 0x3F; 3548 bit_index = ldub_code(s->pc++) & 0x3F; 3549 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 3550 offsetof(CPUX86State,xmm_regs[reg])); 3551 if (b1 == 1) 3552 gen_helper_extrq_i(cpu_ptr0, tcg_const_i32(bit_index), 3553 tcg_const_i32(field_length)); 3554 else 3555 gen_helper_insertq_i(cpu_ptr0, tcg_const_i32(bit_index), 3556 tcg_const_i32(field_length)); 3557 } 3520 3558 break; 3521 3559 case 0x7e: /* movd ea, mm */ … … 4912 4950 ot = OT_QUAD; 4913 4951 } else if (op == 3 || op == 5) { 4914 /* for call calls, the operand is 16 or 32 bit, even 4915 in long mode */ 4916 ot = dflag ? OT_LONG : OT_WORD; 4952 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD; 4917 4953 } else if (op == 6) { 4918 4954 /* default push size is 64 bit */ … … 5198 5234 } 5199 5235 label1 = gen_new_label(); 5200 tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUState, regs[R_EAX])); 5201 tcg_gen_sub_tl(t2, t2, t0); 5236 tcg_gen_sub_tl(t2, cpu_regs[R_EAX], t0); 5202 5237 gen_extu(ot, t2); 5203 5238 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1); … … 5623 5658 5624 5659 case 0x91 ... 0x97: /* xchg R, EAX */ 5660 do_xchg_reg_eax: 5625 5661 ot = dflag + OT_WORD; 5626 5662 reg = (b & 7) | REX_B(s); … … 5777 5813 tcg_gen_movi_tl(cpu_T3, val); 5778 5814 } else { 5779 tcg_gen_ ld_tl(cpu_T3, cpu_env, offsetof(CPUState, regs[R_ECX]));5815 tcg_gen_mov_tl(cpu_T3, cpu_regs[R_ECX]); 5780 5816 } 5781 5817 gen_shiftd_rm_T1_T3(s, ot, opreg, op); … … 6593 6629 if (s->dflag == 0) 6594 6630 tval &= 0xffff; 6595 #ifdef VBOX /* upstream fix */ 6596 else if (!CODE64(s)) 6631 else if(!CODE64(s)) 6597 6632 tval &= 0xffffffff; 6598 #endif6599 6633 gen_movtl_T0_im(next_eip); 6600 6634 gen_push_T0(s); … … 6693 6727 l1 = gen_new_label(); 6694 6728 gen_jcc1(s, s->cc_op, b ^ 1, l1); 6695 tcg_gen_ st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);6729 tcg_gen_mov_tl(cpu_regs[reg], t0); 6696 6730 gen_set_label(l1); 6697 tcg_gen_movi_tl(cpu_tmp0, 0); 6698 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET); 6731 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_regs[reg]); 6699 6732 } else 6700 6733 #endif … … 6930 6963 modrm = ldub_code(s->pc++); 6931 6964 reg = ((modrm >> 3) & 7) | rex_r; 6932 gen_ldst_modrm(s, 6965 gen_ldst_modrm(s,modrm, ot, OR_TMP0, 0); 6933 6966 gen_extu(ot, cpu_T[0]); 6934 label1 = gen_new_label();6935 tcg_gen_movi_tl(cpu_cc_dst, 0);6936 6967 t0 = tcg_temp_local_new(); 6937 6968 tcg_gen_mov_tl(t0, cpu_T[0]); 6938 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1); 6939 if (b & 1) { 6940 gen_helper_bsr(cpu_T[0], t0); 6969 if ((b & 1) && (prefixes & PREFIX_REPZ) && 6970 (s->cpuid_ext3_features & CPUID_EXT3_ABM)) { 6971 switch(ot) { 6972 case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0, 6973 tcg_const_i32(16)); break; 6974 case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0, 6975 tcg_const_i32(32)); break; 6976 case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0, 6977 tcg_const_i32(64)); break; 6978 } 6979 gen_op_mov_reg_T0(ot, reg); 6941 6980 } else { 6942 gen_helper_bsf(cpu_T[0], t0); 6943 } 6944 gen_op_mov_reg_T0(ot, reg); 6945 tcg_gen_movi_tl(cpu_cc_dst, 1); 6946 gen_set_label(label1); 6947 tcg_gen_discard_tl(cpu_cc_src); 6948 s->cc_op = CC_OP_LOGICB + ot; 6981 label1 = gen_new_label(); 6982 tcg_gen_movi_tl(cpu_cc_dst, 0); 6983 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1); 6984 if (b & 1) { 6985 gen_helper_bsr(cpu_T[0], t0); 6986 } else { 6987 gen_helper_bsf(cpu_T[0], t0); 6988 } 6989 gen_op_mov_reg_T0(ot, reg); 6990 tcg_gen_movi_tl(cpu_cc_dst, 1); 6991 gen_set_label(label1); 6992 tcg_gen_discard_tl(cpu_cc_src); 6993 s->cc_op = CC_OP_LOGICB + ot; 6994 } 6949 6995 tcg_temp_free(t0); 6950 6996 } … … 7005 7051 /* misc */ 7006 7052 case 0x90: /* nop */ 7007 /* XXX: xchg + rex handling */7008 7053 /* XXX: correct lock test for all insn */ 7009 if (prefixes & PREFIX_LOCK) 7054 if (prefixes & PREFIX_LOCK) { 7010 7055 goto illegal_op; 7056 } 7057 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */ 7058 if (REX_B(s)) { 7059 goto do_xchg_reg_eax; 7060 } 7011 7061 if (prefixes & PREFIX_REPZ) { 7012 7062 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE); … … 7586 7636 case 4: /* smsw */ 7587 7637 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); 7588 #if defined TARGET_X86_64 && defined WORDS_BIGENDIAN7638 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN 7589 7639 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4); 7590 7640 #else … … 7604 7654 } 7605 7655 break; 7606 case 7: /* invlpg */ 7607 if (s->cpl != 0) { 7608 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 7609 } else { 7610 if (mod == 3) { 7611 #ifdef TARGET_X86_64 7612 if (CODE64(s) && rm == 0) { 7613 /* swapgs */ 7614 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,segs[R_GS].base)); 7615 tcg_gen_ld_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,kernelgsbase)); 7616 tcg_gen_st_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,segs[R_GS].base)); 7617 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,kernelgsbase)); 7618 } else 7619 #endif 7620 { 7621 goto illegal_op; 7622 } 7656 case 7: 7657 if (mod != 3) { /* invlpg */ 7658 if (s->cpl != 0) { 7659 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 7623 7660 } else { 7624 7661 if (s->cc_op != CC_OP_DYNAMIC) … … 7629 7666 gen_jmp_im(s->pc - s->cs_base); 7630 7667 gen_eob(s); 7668 } 7669 } else { 7670 switch (rm) { 7671 case 0: /* swapgs */ 7672 #ifdef TARGET_X86_64 7673 if (CODE64(s)) { 7674 if (s->cpl != 0) { 7675 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 7676 } else { 7677 tcg_gen_ld_tl(cpu_T[0], cpu_env, 7678 offsetof(CPUX86State,segs[R_GS].base)); 7679 tcg_gen_ld_tl(cpu_T[1], cpu_env, 7680 offsetof(CPUX86State,kernelgsbase)); 7681 tcg_gen_st_tl(cpu_T[1], cpu_env, 7682 offsetof(CPUX86State,segs[R_GS].base)); 7683 tcg_gen_st_tl(cpu_T[0], cpu_env, 7684 offsetof(CPUX86State,kernelgsbase)); 7685 } 7686 } else 7687 #endif 7688 { 7689 goto illegal_op; 7690 } 7691 break; 7692 case 1: /* rdtscp */ 7693 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) 7694 goto illegal_op; 7695 if (s->cc_op != CC_OP_DYNAMIC) 7696 gen_op_set_cc_op(s->cc_op); 7697 gen_jmp_im(pc_start - s->cs_base); 7698 if (use_icount) 7699 gen_io_start(); 7700 gen_helper_rdtscp(); 7701 if (use_icount) { 7702 gen_io_end(); 7703 gen_jmp(s, s->pc - s->cs_base); 7704 } 7705 break; 7706 default: 7707 goto illegal_op; 7631 7708 } 7632 7709 } … … 7676 7753 { 7677 7754 int label1; 7678 TCGv t0, t1, t2; 7679 #ifdef VBOX 7680 TCGv a0; 7681 #endif 7755 TCGv t0, t1, t2, a0; 7682 7756 7683 7757 if (!s->pe || s->vm86) … … 7686 7760 t1 = tcg_temp_local_new(); 7687 7761 t2 = tcg_temp_local_new(); 7688 #ifdef VBOX7689 a0 = tcg_temp_local_new();7690 #endif7691 7762 ot = OT_WORD; 7692 7763 modrm = ldub_code(s->pc++); … … 7696 7767 if (mod != 3) { 7697 7768 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 7698 #ifdef VBOX 7769 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0); 7770 a0 = tcg_temp_local_new(); 7699 7771 tcg_gen_mov_tl(a0, cpu_A0); 7700 #endif7701 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);7702 7772 } else { 7703 7773 gen_op_mov_v_reg(ot, t0, rm); 7774 TCGV_UNUSED(a0); 7704 7775 } 7705 7776 gen_op_mov_v_reg(ot, t1, reg); … … 7714 7785 gen_set_label(label1); 7715 7786 if (mod != 3) { 7716 #ifdef VBOX7717 /* cpu_A0 doesn't survive branch */7718 7787 gen_op_st_v(ot + s->mem_index, t0, a0); 7719 #else 7720 gen_op_st_v(ot + s->mem_index, t0, cpu_A0); 7721 #endif 7722 } else { 7788 tcg_temp_free(a0); 7789 } else { 7723 7790 gen_op_mov_reg_v(ot, rm, t0); 7724 7791 } … … 7732 7799 tcg_temp_free(t1); 7733 7800 tcg_temp_free(t2); 7734 #ifdef VBOX7735 tcg_temp_free(a0);7736 #endif7737 7801 } 7738 7802 break; … … 7803 7867 else 7804 7868 ot = OT_LONG; 7869 if ((prefixes & PREFIX_LOCK) && (reg == 0) && 7870 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) { 7871 reg = 8; 7872 } 7805 7873 switch(reg) { 7806 7874 case 0: … … 7890 7958 case 0: /* fxsave */ 7891 7959 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || 7892 (s-> flags & HF_EM_MASK))7960 (s->prefix & PREFIX_LOCK)) 7893 7961 goto illegal_op; 7894 if ( s->flags & HF_TS_MASK) {7962 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { 7895 7963 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); 7896 7964 break; … … 7904 7972 case 1: /* fxrstor */ 7905 7973 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || 7906 (s-> flags & HF_EM_MASK))7974 (s->prefix & PREFIX_LOCK)) 7907 7975 goto illegal_op; 7908 if ( s->flags & HF_TS_MASK) {7976 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { 7909 7977 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); 7910 7978 break; … … 8005 8073 case 0x128 ... 0x12f: 8006 8074 case 0x138 ... 0x13a: 8007 case 0x150 ... 0x17 7:8075 case 0x150 ... 0x179: 8008 8076 case 0x17c ... 0x17f: 8009 8077 case 0x1c2: … … 8043 8111 cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp), 8044 8112 "cc_tmp"); 8113 8114 #ifdef TARGET_X86_64 8115 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0, 8116 offsetof(CPUState, regs[R_EAX]), "rax"); 8117 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0, 8118 offsetof(CPUState, regs[R_ECX]), "rcx"); 8119 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0, 8120 offsetof(CPUState, regs[R_EDX]), "rdx"); 8121 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0, 8122 offsetof(CPUState, regs[R_EBX]), "rbx"); 8123 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0, 8124 offsetof(CPUState, regs[R_ESP]), "rsp"); 8125 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0, 8126 offsetof(CPUState, regs[R_EBP]), "rbp"); 8127 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0, 8128 offsetof(CPUState, regs[R_ESI]), "rsi"); 8129 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0, 8130 offsetof(CPUState, regs[R_EDI]), "rdi"); 8131 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0, 8132 offsetof(CPUState, regs[8]), "r8"); 8133 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0, 8134 offsetof(CPUState, regs[9]), "r9"); 8135 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0, 8136 offsetof(CPUState, regs[10]), "r10"); 8137 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0, 8138 offsetof(CPUState, regs[11]), "r11"); 8139 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0, 8140 offsetof(CPUState, regs[12]), "r12"); 8141 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0, 8142 offsetof(CPUState, regs[13]), "r13"); 8143 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0, 8144 offsetof(CPUState, regs[14]), "r14"); 8145 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0, 8146 offsetof(CPUState, regs[15]), "r15"); 8147 #else 8148 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0, 8149 offsetof(CPUState, regs[R_EAX]), "eax"); 8150 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0, 8151 offsetof(CPUState, regs[R_ECX]), "ecx"); 8152 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0, 8153 offsetof(CPUState, regs[R_EDX]), "edx"); 8154 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0, 8155 offsetof(CPUState, regs[R_EBX]), "ebx"); 8156 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0, 8157 offsetof(CPUState, regs[R_ESP]), "esp"); 8158 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0, 8159 offsetof(CPUState, regs[R_EBP]), "ebp"); 8160 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0, 8161 offsetof(CPUState, regs[R_ESI]), "esi"); 8162 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0, 8163 offsetof(CPUState, regs[R_EDI]), "edi"); 8164 #endif 8045 8165 8046 8166 /* register helpers */ … … 8143 8263 cpu_tmp4 = tcg_temp_new(); 8144 8264 cpu_tmp5 = tcg_temp_new(); 8145 cpu_tmp6 = tcg_temp_new();8146 8265 cpu_ptr0 = tcg_temp_new_ptr(); 8147 8266 cpu_ptr1 = tcg_temp_new_ptr(); … … 8159 8278 gen_icount_start(); 8160 8279 for(;;) { 8161 if (unlikely(! TAILQ_EMPTY(&env->breakpoints))) {8162 TAILQ_FOREACH(bp, &env->breakpoints, entry) {8280 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { 8281 QTAILQ_FOREACH(bp, &env->breakpoints, entry) { 8163 8282 if (bp->pc == pc_ptr && 8164 8283 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) { -
trunk/src/recompiler/tcg/i386/tcg-target.c
r36175 r37675 293 293 } 294 294 295 static inline void tgen_arithi(TCGContext *s, int c, int r0, int32_t val) 296 { 297 if (val == (int8_t)val) { 295 static inline void tgen_arithi(TCGContext *s, int c, int r0, int32_t val, int cf) 296 { 297 if (!cf && ((c == ARITH_ADD && val == 1) || (c == ARITH_SUB && val == -1))) { 298 /* inc */ 299 tcg_out_opc(s, 0x40 + r0); 300 } else if (!cf && ((c == ARITH_ADD && val == -1) || (c == ARITH_SUB && val == 1))) { 301 /* dec */ 302 tcg_out_opc(s, 0x48 + r0); 303 } else if (val == (int8_t)val) { 298 304 tcg_out_modrm(s, 0x83, c, r0); 299 305 tcg_out8(s, val); 306 } else if (c == ARITH_AND && val == 0xffu && r0 < 4) { 307 /* movzbl */ 308 tcg_out_modrm(s, 0xb6 | P_EXT, r0, r0); 309 } else if (c == ARITH_AND && val == 0xffffu) { 310 /* movzwl */ 311 tcg_out_modrm(s, 0xb7 | P_EXT, r0, r0); 300 312 } else { 301 313 tcg_out_modrm(s, 0x81, c, r0); … … 307 319 { 308 320 if (val != 0) 309 tgen_arithi(s, ARITH_ADD, reg, val );321 tgen_arithi(s, ARITH_ADD, reg, val, 0); 310 322 } 311 323 … … 314 326 { 315 327 if (val != 0) 316 tgen_arithi(s, ARITH_SUB, reg, val );328 tgen_arithi(s, ARITH_SUB, reg, val, 0); 317 329 } 318 330 #endif … … 363 375 tcg_out_modrm(s, 0x85, arg1, arg1); 364 376 } else { 365 tgen_arithi(s, ARITH_CMP, arg1, arg2 );377 tgen_arithi(s, ARITH_CMP, arg1, arg2, 0); 366 378 } 367 379 } else { … … 370 382 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index); 371 383 } 372 373 #ifdef VBOX374 375 DECLINLINE(void) tcg_out_long_call(TCGContext *s, void* dst)376 {377 intptr_t disp;378 # ifdef VBOX379 tcg_gen_stack_alignment_check(s);380 # endif381 disp = (uintptr_t)dst - (uintptr_t)s->code_ptr - 5;382 tcg_out8(s, 0xe8); /* call disp32 */383 tcg_out32(s, disp); /* disp32 */384 }385 386 DECLINLINE(void) tcg_out_long_jmp(TCGContext *s, void* dst)387 {388 intptr_t disp = (uintptr_t)dst - (uintptr_t)s->code_ptr - 5;389 tcg_out8(s, 0xe9); /* jmp disp32 */390 tcg_out32(s, disp); /* disp32 */391 }392 393 #endif /* VBOX */394 384 395 385 /* XXX: we implement it at the target level to avoid having to … … 474 464 #endif 475 465 466 #ifndef CONFIG_USER_ONLY 467 #define GUEST_BASE 0 468 #endif 469 476 470 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 477 static void *vbox_ld_helpers[] = { 471 472 static void * const vbox_ld_helpers[] = { 478 473 __ldub_vbox_phys, 479 474 __lduw_vbox_phys, … … 486 481 }; 487 482 488 static void * vbox_st_helpers[] = {483 static void * const vbox_st_helpers[] = { 489 484 __stb_vbox_phys, 490 485 __stw_vbox_phys, … … 493 488 }; 494 489 490 DECLINLINE(void) tcg_out_long_call(TCGContext *s, void* dst) 491 { 492 intptr_t disp; 493 # ifdef VBOX 494 tcg_gen_stack_alignment_check(s); 495 # endif 496 disp = (uintptr_t)dst - (uintptr_t)s->code_ptr - 5; 497 tcg_out8(s, 0xe8); /* call disp32 */ 498 tcg_out32(s, disp); /* disp32 */ 499 } 500 495 501 static void tcg_out_vbox_phys_read(TCGContext *s, int index, 496 502 int addr_reg, … … 503 509 AssertMsg(sizeof(RTGCPHYS) == 8, ("Physical address must be 64-bits, update caller\n")); 504 510 505 # if 0511 # if 0 506 512 tcg_out8(s, 0x6a); tcg_out8(s, 0x00); /* push $0 */ 507 513 tcg_out_push(s, addr_reg); 508 # else514 # else 509 515 /* mov addr_reg, %eax */ 510 516 tcg_out_mov(s, TCG_REG_EAX, addr_reg); 511 # endif517 # endif 512 518 513 519 tcg_out_long_call(s, vbox_ld_helpers[index]); … … 526 532 int useReg2 = ((index & 3) == 3); 527 533 528 # if 0534 # if 0 529 535 /* out parameter (value2) */ 530 536 if (useReg2) … … 536 542 tcg_out8(s, 0x6a); tcg_out8(s, 0x00); /* push $0 */ 537 543 tcg_out_push(s, addr_reg); 538 # else544 # else 539 545 Assert(val_reg != TCG_REG_EAX && (!useReg2 || (val_reg2 != TCG_REG_EAX))); 540 546 /* mov addr_reg, %eax */ … … 546 552 tcg_out_mov(s, TCG_REG_ECX, val_reg2); 547 553 548 # endif554 # endif 549 555 /* call it */ 550 556 tcg_out_long_call(s, vbox_st_helpers[index]); 551 557 552 558 /* clean stack after us */ 553 # if 0559 # if 0 554 560 tcg_out_addi(s, TCG_REG_ESP, 8 + (useReg2 ? 8 : 4)); 555 561 # endif … … 604 610 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); 605 611 606 #ifndef VBOX607 612 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */ 608 613 tcg_out8(s, 0x80 | (r1 << 3) | 0x04); 614 #ifndef VBOX 609 615 tcg_out8(s, (5 << 3) | r1); 616 #else 617 tcg_out8(s, (TCG_AREG0 << 3) | r1); /* env, not %ebp */ 618 Assert(mem_index >= 0 && mem_index < NB_MMU_MODES); 619 #endif 610 620 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read)); 611 #else612 tcg_out_opc(s, 0x8d); /* lea offset(r1, env), r1 */613 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);614 tcg_out8(s, (TCG_AREG0 << 3) | r1);615 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read));616 #endif617 621 618 622 /* cmp 0(r1), r0 */ … … 714 718 case 0: 715 719 /* movzbl */ 716 tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, 0);720 tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, GUEST_BASE); 717 721 break; 718 722 case 0 | 4: 719 723 /* movsbl */ 720 tcg_out_modrm_offset(s, 0xbe | P_EXT, data_reg, r0, 0);724 tcg_out_modrm_offset(s, 0xbe | P_EXT, data_reg, r0, GUEST_BASE); 721 725 break; 722 726 case 1: 723 727 /* movzwl */ 724 tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0);728 tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, GUEST_BASE); 725 729 if (bswap) { 726 730 /* rolw $8, data_reg */ … … 732 736 case 1 | 4: 733 737 /* movswl */ 734 tcg_out_modrm_offset(s, 0xbf | P_EXT, data_reg, r0, 0);738 tcg_out_modrm_offset(s, 0xbf | P_EXT, data_reg, r0, GUEST_BASE); 735 739 if (bswap) { 736 740 /* rolw $8, data_reg */ … … 745 749 case 2: 746 750 /* movl (r0), data_reg */ 747 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);751 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, GUEST_BASE); 748 752 if (bswap) { 749 753 /* bswap */ … … 761 765 } 762 766 if (!bswap) { 763 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);764 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 4);767 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, GUEST_BASE); 768 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, GUEST_BASE + 4); 765 769 } else { 766 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 4);770 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, GUEST_BASE + 4); 767 771 tcg_out_opc(s, (0xc8 + data_reg) | P_EXT); 768 772 769 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 0);773 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, GUEST_BASE); 770 774 /* bswap */ 771 775 tcg_out_opc(s, (0xc8 + data_reg2) | P_EXT); … … 842 846 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); 843 847 844 #ifndef VBOX845 848 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */ 846 849 tcg_out8(s, 0x80 | (r1 << 3) | 0x04); 850 #ifndef VBOX 847 851 tcg_out8(s, (5 << 3) | r1); 852 #else 853 tcg_out8(s, (TCG_AREG0 << 3) | r1); /* env is not %ebp */ 854 Assert(mem_index >= 0 && mem_index < NB_MMU_MODES); 855 #endif 848 856 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write)); 849 #else850 tcg_out_opc(s, 0x8d); /* lea offset(r1, env), r1 */851 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);852 tcg_out8(s, (TCG_AREG0 << 3) | r1);853 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write));854 #endif855 857 856 858 /* cmp 0(r1), r0 */ … … 1004 1006 case 0: 1005 1007 /* movb */ 1006 tcg_out_modrm_offset(s, 0x88, data_reg, r0, 0);1008 tcg_out_modrm_offset(s, 0x88, data_reg, r0, GUEST_BASE); 1007 1009 break; 1008 1010 case 1: … … 1016 1018 /* movw */ 1017 1019 tcg_out8(s, 0x66); 1018 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);1020 tcg_out_modrm_offset(s, 0x89, data_reg, r0, GUEST_BASE); 1019 1021 break; 1020 1022 case 2: … … 1026 1028 } 1027 1029 /* movl */ 1028 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);1030 tcg_out_modrm_offset(s, 0x89, data_reg, r0, GUEST_BASE); 1029 1031 break; 1030 1032 case 3: … … 1033 1035 /* bswap data_reg */ 1034 1036 tcg_out_opc(s, (0xc8 + r1) | P_EXT); 1035 tcg_out_modrm_offset(s, 0x89, r1, r0, 0);1037 tcg_out_modrm_offset(s, 0x89, r1, r0, GUEST_BASE); 1036 1038 tcg_out_mov(s, r1, data_reg); 1037 1039 /* bswap data_reg */ 1038 1040 tcg_out_opc(s, (0xc8 + r1) | P_EXT); 1039 tcg_out_modrm_offset(s, 0x89, r1, r0, 4);1041 tcg_out_modrm_offset(s, 0x89, r1, r0, GUEST_BASE + 4); 1040 1042 } else { 1041 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);1042 tcg_out_modrm_offset(s, 0x89, data_reg2, r0, 4);1043 tcg_out_modrm_offset(s, 0x89, data_reg, r0, GUEST_BASE); 1044 tcg_out_modrm_offset(s, 0x89, data_reg2, r0, GUEST_BASE + 4); 1043 1045 } 1044 1046 break; … … 1158 1160 gen_arith: 1159 1161 if (const_args[2]) { 1160 tgen_arithi(s, c, args[0], args[2] );1162 tgen_arithi(s, c, args[0], args[2], 0); 1161 1163 } else { 1162 1164 tcg_out_modrm(s, 0x01 | (c << 3), args[2], args[0]); … … 1216 1218 case INDEX_op_add2_i32: 1217 1219 if (const_args[4]) 1218 tgen_arithi(s, ARITH_ADD, args[0], args[4] );1220 tgen_arithi(s, ARITH_ADD, args[0], args[4], 1); 1219 1221 else 1220 1222 tcg_out_modrm(s, 0x01 | (ARITH_ADD << 3), args[4], args[0]); 1221 1223 if (const_args[5]) 1222 tgen_arithi(s, ARITH_ADC, args[1], args[5] );1224 tgen_arithi(s, ARITH_ADC, args[1], args[5], 1); 1223 1225 else 1224 1226 tcg_out_modrm(s, 0x01 | (ARITH_ADC << 3), args[5], args[1]); … … 1226 1228 case INDEX_op_sub2_i32: 1227 1229 if (const_args[4]) 1228 tgen_arithi(s, ARITH_SUB, args[0], args[4] );1230 tgen_arithi(s, ARITH_SUB, args[0], args[4], 1); 1229 1231 else 1230 1232 tcg_out_modrm(s, 0x01 | (ARITH_SUB << 3), args[4], args[0]); 1231 1233 if (const_args[5]) 1232 tgen_arithi(s, ARITH_SBB, args[1], args[5] );1234 tgen_arithi(s, ARITH_SBB, args[1], args[5], 1); 1233 1235 else 1234 1236 tcg_out_modrm(s, 0x01 | (ARITH_SBB << 3), args[5], args[1]); … … 1263 1265 case INDEX_op_ext16s_i32: 1264 1266 tcg_out_modrm(s, 0xbf | P_EXT, args[0], args[1]); 1267 break; 1268 case INDEX_op_ext8u_i32: 1269 tcg_out_modrm(s, 0xb6 | P_EXT, args[0], args[1]); 1270 break; 1271 case INDEX_op_ext16u_i32: 1272 tcg_out_modrm(s, 0xb7 | P_EXT, args[0], args[1]); 1265 1273 break; 1266 1274 … … 1332 1340 { INDEX_op_shr_i32, { "r", "0", "ci" } }, 1333 1341 { INDEX_op_sar_i32, { "r", "0", "ci" } }, 1334 { INDEX_op_sar_i32, { "r", "0", "ci" } },1335 1342 { INDEX_op_rotl_i32, { "r", "0", "ci" } }, 1336 1343 { INDEX_op_rotr_i32, { "r", "0", "ci" } }, … … 1351 1358 { INDEX_op_ext8s_i32, { "r", "q" } }, 1352 1359 { INDEX_op_ext16s_i32, { "r", "r" } }, 1360 { INDEX_op_ext8u_i32, { "r", "q"} }, 1361 { INDEX_op_ext16u_i32, { "r", "r"} }, 1353 1362 1354 1363 #if TARGET_LONG_BITS == 32 -
trunk/src/recompiler/tcg/i386/tcg-target.h
r36175 r37675 53 53 #define TCG_TARGET_HAS_ext16s_i32 54 54 #define TCG_TARGET_HAS_rot_i32 55 #define TCG_TARGET_HAS_ext8u_i32 56 #define TCG_TARGET_HAS_ext16u_i32 57 58 #define TCG_TARGET_HAS_GUEST_BASE 55 59 56 60 /* Note: must be synced with dyngen-exec.h */ -
trunk/src/recompiler/tcg/tcg-op.h
r36175 r37675 1190 1190 } 1191 1191 1192 /* These are currently just for convenience.1193 We assume a target will recognise these automatically . */1194 1192 static inline void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg) 1195 1193 { 1194 #ifdef TCG_TARGET_HAS_ext8u_i32 1195 tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg); 1196 #else 1196 1197 tcg_gen_andi_i32(ret, arg, 0xffu); 1198 #endif 1197 1199 } 1198 1200 1199 1201 static inline void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg) 1200 1202 { 1203 #ifdef TCG_TARGET_HAS_ext16u_i32 1204 tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg); 1205 #else 1201 1206 tcg_gen_andi_i32(ret, arg, 0xffffu); 1207 #endif 1202 1208 } 1203 1209 … … 1359 1365 static inline void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg) 1360 1366 { 1367 #ifdef TCG_TARGET_HAS_ext8u_i64 1368 tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg); 1369 #else 1361 1370 tcg_gen_andi_i64(ret, arg, 0xffu); 1371 #endif 1362 1372 } 1363 1373 1364 1374 static inline void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg) 1365 1375 { 1376 #ifdef TCG_TARGET_HAS_ext16u_i64 1377 tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg); 1378 #else 1366 1379 tcg_gen_andi_i64(ret, arg, 0xffffu); 1380 #endif 1367 1381 } 1368 1382 1369 1383 static inline void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg) 1370 1384 { 1385 #ifdef TCG_TARGET_HAS_ext32u_i64 1386 tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg); 1387 #else 1371 1388 tcg_gen_andi_i64(ret, arg, 0xffffffffu); 1389 #endif 1372 1390 } 1373 1391 … … 1383 1401 static inline void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg) 1384 1402 { 1385 tcg_gen_ andi_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg)), 0xffffffffu);1403 tcg_gen_ext32u_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg))); 1386 1404 } 1387 1405 -
trunk/src/recompiler/tcg/tcg-opc.h
r36175 r37675 89 89 #ifdef TCG_TARGET_HAS_ext16s_i32 90 90 DEF2(ext16s_i32, 1, 1, 0, 0) 91 #endif 92 #ifdef TCG_TARGET_HAS_ext8u_i32 93 DEF2(ext8u_i32, 1, 1, 0, 0) 94 #endif 95 #ifdef TCG_TARGET_HAS_ext16u_i32 96 DEF2(ext16u_i32, 1, 1, 0, 0) 91 97 #endif 92 98 #ifdef TCG_TARGET_HAS_bswap16_i32 … … 153 159 DEF2(ext32s_i64, 1, 1, 0, 0) 154 160 #endif 161 #ifdef TCG_TARGET_HAS_ext8u_i64 162 DEF2(ext8u_i64, 1, 1, 0, 0) 163 #endif 164 #ifdef TCG_TARGET_HAS_ext16u_i64 165 DEF2(ext16u_i64, 1, 1, 0, 0) 166 #endif 167 #ifdef TCG_TARGET_HAS_ext32u_i64 168 DEF2(ext32u_i64, 1, 1, 0, 0) 169 #endif 155 170 #ifdef TCG_TARGET_HAS_bswap16_i64 156 171 DEF2(bswap16_i64, 1, 1, 0, 0) -
trunk/src/recompiler/tcg/tcg.c
r36175 r37675 28 28 #include "config.h" 29 29 30 #ifndef DEBUG_TCG30 #ifndef CONFIG_DEBUG_TCG 31 31 /* define it to suppress various consistency checks (faster) */ 32 32 #define NDEBUG … … 52 52 #include "qemu-common.h" 53 53 #include "cache-utils.h" 54 #include "host-utils.h" 54 55 55 56 /* Note: the long term plan is to reduce the dependancies on the QEMU … … 63 64 #include "elf.h" 64 65 66 #if defined(CONFIG_USE_GUEST_BASE) && !defined(TCG_TARGET_HAS_GUEST_BASE) 67 #error GUEST_BASE not supported on this host. 68 #endif 65 69 66 70 #ifdef VBOX … … 74 78 # undef USE_LIVENESS_ANALYSIS 75 79 # endif 80 ///* With 0.12.5 the liveness analysis does not work well even when targeting 81 // 32-bit guest cpus. Just disable it wholesale to be on the safe side. */ 82 //#undef USE_LIVENESS_ANALYSIS 76 83 #endif /* VBOX */ 77 84 … … 82 89 #define DEF(s, n, copy_size) { #s, 0, 0, n, n, 0, copy_size }, 83 90 #ifndef VBOX 84 #define DEF2(s, iargs, oargs, cargs, flags) { #s, iargs, oargs, cargs, iargs + oargs + cargs, flags, 0 },91 #define DEF2(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags, 0 }, 85 92 #else /* VBOX */ 86 # define DEF2(s, iargs, oargs, cargs, flags) { #s, iargs, oargs, cargs, iargs + oargs + cargs, flags, 0, 0, 0 },93 # define DEF2(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags, 0, 0, 0 }, 87 94 #endif /* VBOX */ 88 95 #include "tcg-opc.h" … … 1117 1124 nb_ops = gen_opc_ptr - gen_opc_buf; 1118 1125 1119 /* XXX: make it really dynamic */ 1120 s->op_dead_iargs = tcg_malloc(OPC_BUF_SIZE * sizeof(uint16_t)); 1126 s->op_dead_iargs = tcg_malloc(nb_ops * sizeof(uint16_t)); 1121 1127 1122 1128 dead_temps = tcg_malloc(s->nb_temps); … … 1901 1907 static int64_t tcg_table_op_count[NB_OPS]; 1902 1908 1903 void dump_op_count(void)1909 static void dump_op_count(void) 1904 1910 { 1905 1911 int i; … … 1939 1945 1940 1946 #ifdef DEBUG_DISAS 1947 # ifdef USE_LIVENESS_ANALYSIS /* vbox */ 1941 1948 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) { 1942 qemu_log("OP after l a:\n");1949 qemu_log("OP after liveness analysis:\n"); 1943 1950 tcg_dump_ops(s, logfile); 1944 1951 qemu_log("\n"); 1945 1952 } 1953 # endif /* USE_LIVENESS_ANALYSIS - vbox */ 1946 1954 #endif 1947 1955 … … 1981 1989 case INDEX_op_debug_insn_start: 1982 1990 /* debug instruction */ 1991 //#ifdef VBOX /* HACK ALERT: GROSS HACK to work around registister allocation bugs in v0.12.5 */ 1992 // save_globals(s, s->reserved_regs); 1993 //#endif 1983 1994 break; 1984 1995 case INDEX_op_nop: … … 2109 2120 cpu_fprintf(f, " avg cycles %0.1f\n", 2110 2121 s->restore_count ? (double)s->restore_time / s->restore_count : 0); 2111 { 2112 extern void dump_op_count(void); 2113 dump_op_count(); 2114 } 2122 2123 dump_op_count(); 2115 2124 } 2116 2125 #else -
trunk/src/recompiler/tcg/tcg.h
r36175 r37675 24 24 #include "qemu-common.h" 25 25 #include "tcg-target.h" 26 #include "tcg-runtime.h" 26 27 27 28 #if TCG_TARGET_REG_BITS == 32 … … 57 58 #define tcg_regset_set(d, s) (d) = (s) 58 59 #define tcg_regset_set32(d, reg, val32) (d) |= (val32) << (reg) 59 #define tcg_regset_set_reg(d, r) (d) |= 1 << (r)60 #define tcg_regset_reset_reg(d, r) (d) &= ~(1 << (r))60 #define tcg_regset_set_reg(d, r) (d) |= 1L << (r) 61 #define tcg_regset_reset_reg(d, r) (d) &= ~(1L << (r)) 61 62 #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) 62 63 #define tcg_regset_or(d, a, b) (d) = (a) | (b) … … 122 123 */ 123 124 124 #ifdef DEBUG_TCG125 #ifdef CONFIG_DEBUG_TCG 125 126 #define DEBUG_TCGV 1 126 127 #endif … … 466 467 void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type, 467 468 int label_index, long addend); 468 const TCGArg *tcg_gen_code_op(TCGContext *s, int opc, const TCGArg *args1,469 unsigned int dead_iargs);470 471 /* tcg-runtime.c */472 int64_t tcg_helper_shl_i64(int64_t arg1, int64_t arg2);473 int64_t tcg_helper_shr_i64(int64_t arg1, int64_t arg2);474 int64_t tcg_helper_sar_i64(int64_t arg1, int64_t arg2);475 int64_t tcg_helper_div_i64(int64_t arg1, int64_t arg2);476 int64_t tcg_helper_rem_i64(int64_t arg1, int64_t arg2);477 uint64_t tcg_helper_divu_i64(uint64_t arg1, uint64_t arg2);478 uint64_t tcg_helper_remu_i64(uint64_t arg1, uint64_t arg2);479 469 480 470 #ifndef VBOX 481 471 extern uint8_t code_gen_prologue[]; 482 472 #else 483 extern uint8_t *code_gen_prologue;473 extern uint8_t *code_gen_prologue; 484 474 #endif 485 475 #if defined(_ARCH_PPC) && !defined(_ARCH_PPC64) -
trunk/src/recompiler/tcg/x86_64/tcg-target.c
r36175 r37675 364 364 } 365 365 366 static void tcg_out_goto(TCGContext *s, int call, uint8_t *target) 367 { 368 int32_t disp; 369 370 disp = target - s->code_ptr - 5; 371 if (disp == (target - s->code_ptr - 5)) { 372 tcg_out8(s, call ? 0xe8 : 0xe9); 373 tcg_out32(s, disp); 374 } else { 375 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, (tcg_target_long) target); 376 tcg_out_modrm(s, 0xff, call ? 2 : 4, TCG_REG_R10); 377 } 378 } 379 366 380 static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret, 367 381 int arg1, tcg_target_long arg2) … … 384 398 static inline void tgen_arithi32(TCGContext *s, int c, int r0, int32_t val) 385 399 { 386 if (val == (int8_t)val) { 400 if ((c == ARITH_ADD && val == 1) || (c == ARITH_SUB && val == -1)) { 401 /* inc */ 402 tcg_out_modrm(s, 0xff, 0, r0); 403 } else if ((c == ARITH_ADD && val == -1) || (c == ARITH_SUB && val == 1)) { 404 /* dec */ 405 tcg_out_modrm(s, 0xff, 1, r0); 406 } else if (val == (int8_t)val) { 387 407 tcg_out_modrm(s, 0x83, c, r0); 388 408 tcg_out8(s, val); … … 401 421 static inline void tgen_arithi64(TCGContext *s, int c, int r0, int64_t val) 402 422 { 403 if (val == (int8_t)val) { 423 if ((c == ARITH_ADD && val == 1) || (c == ARITH_SUB && val == -1)) { 424 /* inc */ 425 tcg_out_modrm(s, 0xff | P_REXW, 0, r0); 426 } else if ((c == ARITH_ADD && val == -1) || (c == ARITH_SUB && val == 1)) { 427 /* dec */ 428 tcg_out_modrm(s, 0xff | P_REXW, 1, r0); 429 } else if (val == (int8_t)val) { 404 430 tcg_out_modrm(s, 0x83 | P_REXW, c, r0); 405 431 tcg_out8(s, val); … … 486 512 } 487 513 488 #ifdef VBOX489 490 DECLINLINE(void) tcg_out_pushq(TCGContext *s, tcg_target_long val)491 {492 tcg_out8(s, 0x68); /* push imm32, subs 8 from rsp */493 tcg_out32(s, val); /* imm32 */494 if ((val >> 32) != 0)495 {496 tcg_out8(s, 0xc7); /* mov imm32, 4(%rsp) */497 tcg_out8(s, 0x44);498 tcg_out8(s, 0x24);499 tcg_out8(s, 0x04);500 tcg_out32(s, ((uint64_t)val) >> 32); /* imm32 */501 }502 }503 504 DECLINLINE(void) tcg_out_long_call(TCGContext *s, tcg_target_long dst)505 {506 intptr_t disp = dst - (tcg_target_long)s->code_ptr - 5;507 /* can do normal call */508 if (disp < 2LL * _1G && disp > -2LL * _1G)509 {510 tcg_out8(s, 0xe8); /* call disp32 */511 tcg_out32(s, disp); /* disp32 */512 }513 else514 {515 # if 0516 /* Somewhat tricky, but allows long jump not touching registers */517 int off = 5 /* push imm32 */ + 5 /* push imm32 */ + 1 /* ret */;518 if ((((uint64_t)s->code_ptr) + 32) >> 32)519 off += 8;520 if (dst >> 32)521 off += 8;522 /* return address */523 tcg_out_pushq(s, (tcg_target_long)s->code_ptr+off);524 /* destination */525 tcg_out_pushq(s, dst);526 tcg_out8(s, 0xc3); /* ret, used as call */527 # else528 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_RAX, dst);529 tcg_out8(s, 0xff); /* call *%eax */530 tcg_out8(s, 0xd0);531 # endif532 }533 }534 535 DECLINLINE(void) tcg_out_long_jmp(TCGContext *s, tcg_target_long dst)536 {537 intptr_t disp;538 539 disp = dst - (tcg_target_long)s->code_ptr - 2;540 /* can do short relative jump */541 if (disp < 0x7f && disp > -0x7f)542 {543 tcg_out8(s, 0xeb); /* short jmp */544 tcg_out8(s, (int8_t)disp);545 return;546 }547 548 disp = dst - (tcg_target_long)s->code_ptr - 5;549 if (disp < 2LL * _1G && disp > -2LL * _1G)550 {551 tcg_out8(s, 0xe9); /* jmp */552 tcg_out32(s, (int32_t)disp);553 return;554 }555 # if 0556 tcg_out_pushq(s, dst);557 tcg_out8(s, 0xc3); /* ret */558 # else559 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_RAX, dst);560 tcg_out8(s, 0xff); /* jmp *%eax */561 tcg_out8(s, 0xe0);562 # endif563 }564 565 #endif /* VBOX */566 567 514 #if defined(CONFIG_SOFTMMU) 568 515 … … 585 532 586 533 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 587 static void * vbox_ld_helpers[] = {534 static void * const vbox_ld_helpers[] = { 588 535 __ldub_vbox_phys, 589 536 __lduw_vbox_phys, … … 596 543 }; 597 544 598 static void * vbox_st_helpers[] = {545 static void * const vbox_st_helpers[] = { 599 546 __stb_vbox_phys, 600 547 __stw_vbox_phys, … … 608 555 tcg_out_modrm(s, 0x8b | P_REXW, TCG_REG_RDI, addr_reg); 609 556 610 tcg_out_ long_call(s, (tcg_target_long)vbox_ld_helpers[index]);557 tcg_out_goto(s, 1, vbox_ld_helpers[index]); 611 558 /* mov %rax, data_reg*/ 612 559 tcg_out_modrm(s, 0x8b | P_REXW, data_reg, TCG_REG_RAX); … … 620 567 /* mov addr_reg, %rsi */ 621 568 tcg_out_modrm(s, 0x8b | P_REXW, TCG_REG_RSI, val_reg); 622 tcg_out_ long_call(s, (tcg_target_long)vbox_st_helpers[index]);569 tcg_out_goto(s, 1, vbox_st_helpers[index]); 623 570 } 624 571 … … 629 576 { 630 577 int addr_reg, data_reg, r0, r1, mem_index, s_bits, bswap, rexw; 578 int32_t offset; 631 579 #if defined(CONFIG_SOFTMMU) 632 580 uint8_t *label1_ptr, *label2_ptr; … … 679 627 /* XXX: move that code at the end of the TB */ 680 628 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RSI, mem_index); 681 #ifndef VBOX 682 tcg_out8(s, 0xe8); 683 tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] - 684 (tcg_target_long)s->code_ptr - 4); 685 #else 686 tcg_out_long_call(s, (tcg_target_long)qemu_ld_helpers[s_bits]); 687 #endif 629 tcg_out_goto(s, 1, qemu_ld_helpers[s_bits]); 688 630 689 631 switch(opc) { … … 729 671 tcg_out_modrm_offset(s, 0x03 | P_REXW, r0, r1, offsetof(CPUTLBEntry, addend) - 730 672 offsetof(CPUTLBEntry, addr_read)); 673 offset = 0; 731 674 #else 732 r0 = addr_reg; 675 if (GUEST_BASE == (int32_t)GUEST_BASE) { 676 r0 = addr_reg; 677 offset = GUEST_BASE; 678 } else { 679 offset = 0; 680 /* movq $GUEST_BASE, r0 */ 681 tcg_out_opc(s, (0xb8 + (r0 & 7)) | P_REXW, 0, r0, 0); 682 tcg_out32(s, GUEST_BASE); 683 tcg_out32(s, GUEST_BASE >> 32); 684 /* addq addr_reg, r0 */ 685 tcg_out_modrm(s, 0x01 | P_REXW, addr_reg, r0); 686 } 733 687 #endif 734 688 … … 743 697 case 0: 744 698 /* movzbl */ 745 tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, 0);699 tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, offset); 746 700 break; 747 701 case 0 | 4: 748 702 /* movsbX */ 749 tcg_out_modrm_offset(s, 0xbe | P_EXT | rexw, data_reg, r0, 0);703 tcg_out_modrm_offset(s, 0xbe | P_EXT | rexw, data_reg, r0, offset); 750 704 break; 751 705 case 1: 752 706 /* movzwl */ 753 tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0);707 tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, offset); 754 708 if (bswap) { 755 709 /* rolw $8, data_reg */ … … 762 716 if (bswap) { 763 717 /* movzwl */ 764 tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0);718 tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, offset); 765 719 /* rolw $8, data_reg */ 766 720 tcg_out8(s, 0x66); … … 772 726 } else { 773 727 /* movswX */ 774 tcg_out_modrm_offset(s, 0xbf | P_EXT | rexw, data_reg, r0, 0);728 tcg_out_modrm_offset(s, 0xbf | P_EXT | rexw, data_reg, r0, offset); 775 729 } 776 730 break; 777 731 case 2: 778 732 /* movl (r0), data_reg */ 779 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);733 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, offset); 780 734 if (bswap) { 781 735 /* bswap */ … … 786 740 if (bswap) { 787 741 /* movl (r0), data_reg */ 788 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);742 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, offset); 789 743 /* bswap */ 790 744 tcg_out_opc(s, (0xc8 + (data_reg & 7)) | P_EXT, 0, data_reg, 0); … … 793 747 } else { 794 748 /* movslq */ 795 tcg_out_modrm_offset(s, 0x63 | P_REXW, data_reg, r0, 0);749 tcg_out_modrm_offset(s, 0x63 | P_REXW, data_reg, r0, offset); 796 750 } 797 751 break; 798 752 case 3: 799 753 /* movq (r0), data_reg */ 800 tcg_out_modrm_offset(s, 0x8b | P_REXW, data_reg, r0, 0);754 tcg_out_modrm_offset(s, 0x8b | P_REXW, data_reg, r0, offset); 801 755 if (bswap) { 802 756 /* bswap */ … … 821 775 { 822 776 int addr_reg, data_reg, r0, r1, mem_index, s_bits, bswap, rexw; 777 int32_t offset; 823 778 #if defined(CONFIG_SOFTMMU) 824 779 uint8_t *label1_ptr, *label2_ptr; … … 890 845 } 891 846 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index); 892 #ifndef VBOX 893 tcg_out8(s, 0xe8); 894 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - 895 (tcg_target_long)s->code_ptr - 4); 896 #else 897 tcg_out_long_call(s, (tcg_target_long)qemu_st_helpers[s_bits]); 898 #endif 847 tcg_out_goto(s, 1, qemu_st_helpers[s_bits]); 899 848 900 849 /* jmp label2 */ … … 909 858 tcg_out_modrm_offset(s, 0x03 | P_REXW, r0, r1, offsetof(CPUTLBEntry, addend) - 910 859 offsetof(CPUTLBEntry, addr_write)); 860 offset = 0; 911 861 #else 912 r0 = addr_reg; 862 if (GUEST_BASE == (int32_t)GUEST_BASE) { 863 r0 = addr_reg; 864 offset = GUEST_BASE; 865 } else { 866 offset = 0; 867 /* movq $GUEST_BASE, r0 */ 868 tcg_out_opc(s, (0xb8 + (r0 & 7)) | P_REXW, 0, r0, 0); 869 tcg_out32(s, GUEST_BASE); 870 tcg_out32(s, GUEST_BASE >> 32); 871 /* addq addr_reg, r0 */ 872 tcg_out_modrm(s, 0x01 | P_REXW, addr_reg, r0); 873 } 913 874 #endif 914 875 … … 922 883 case 0: 923 884 /* movb */ 924 tcg_out_modrm_offset(s, 0x88 | P_REXB, data_reg, r0, 0);885 tcg_out_modrm_offset(s, 0x88 | P_REXB, data_reg, r0, offset); 925 886 break; 926 887 case 1: … … 934 895 /* movw */ 935 896 tcg_out8(s, 0x66); 936 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);897 tcg_out_modrm_offset(s, 0x89, data_reg, r0, offset); 937 898 break; 938 899 case 2: … … 944 905 } 945 906 /* movl */ 946 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);907 tcg_out_modrm_offset(s, 0x89, data_reg, r0, offset); 947 908 break; 948 909 case 3: … … 954 915 } 955 916 /* movq */ 956 tcg_out_modrm_offset(s, 0x89 | P_REXW, data_reg, r0, 0);917 tcg_out_modrm_offset(s, 0x89 | P_REXW, data_reg, r0, offset); 957 918 break; 958 919 default: … … 977 938 case INDEX_op_exit_tb: 978 939 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, args[0]); 979 #ifndef VBOX 980 tcg_out8(s, 0xe9); /* jmp tb_ret_addr */ 981 tcg_out32(s, tb_ret_addr - s->code_ptr - 4); 982 #else 983 tcg_out_long_jmp(s, (tcg_target_long)tb_ret_addr); 984 #endif 940 tcg_out_goto(s, 0, tb_ret_addr); 985 941 break; 986 942 case INDEX_op_goto_tb: … … 998 954 args[0])); 999 955 #else 1000 /** @todo: can we clobber RAX here? */ 956 /** @todo: can we clobber RAX here? */ /** @todo r=bird: I bet we cannot now... XXXX */ 1001 957 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_RAX, 1002 958 (tcg_target_long)&(s->tb_next[args[0]])); … … 1008 964 case INDEX_op_call: 1009 965 if (const_args[0]) { 1010 #ifndef VBOX 1011 tcg_out8(s, 0xe8); 1012 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4); 1013 #else 1014 tcg_out_long_call(s, args[0]); 1015 #endif 966 tcg_out_goto(s, 1, (void *) args[0]); 1016 967 } else { 1017 968 tcg_out_modrm(s, 0xff, 2, args[0]); … … 1020 971 case INDEX_op_jmp: 1021 972 if (const_args[0]) { 1022 tcg_out8(s, 0xe9); 1023 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4); 973 tcg_out_goto(s, 0, (void *) args[0]); 1024 974 } else { 1025 975 tcg_out_modrm(s, 0xff, 4, args[0]); … … 1289 1239 tcg_out_modrm(s, 0x63 | P_REXW, args[0], args[1]); 1290 1240 break; 1241 case INDEX_op_ext8u_i32: 1242 tcg_out_modrm(s, 0xb6 | P_EXT | P_REXB, args[0], args[1]); 1243 break; 1244 case INDEX_op_ext16u_i32: 1245 tcg_out_modrm(s, 0xb7 | P_EXT, args[0], args[1]); 1246 break; 1247 case INDEX_op_ext8u_i64: 1248 tcg_out_modrm(s, 0xb6 | P_EXT | P_REXW, args[0], args[1]); 1249 break; 1250 case INDEX_op_ext16u_i64: 1251 tcg_out_modrm(s, 0xb7 | P_EXT | P_REXW, args[0], args[1]); 1252 break; 1253 case INDEX_op_ext32u_i64: 1254 tcg_out_modrm(s, 0x8b, args[0], args[1]); 1255 break; 1291 1256 1292 1257 case INDEX_op_qemu_ld8u: … … 1463 1428 { INDEX_op_ext16s_i64, { "r", "r"} }, 1464 1429 { INDEX_op_ext32s_i64, { "r", "r"} }, 1430 { INDEX_op_ext8u_i32, { "r", "r"} }, 1431 { INDEX_op_ext16u_i32, { "r", "r"} }, 1432 { INDEX_op_ext8u_i64, { "r", "r"} }, 1433 { INDEX_op_ext16u_i64, { "r", "r"} }, 1434 { INDEX_op_ext32u_i64, { "r", "r"} }, 1465 1435 1466 1436 { INDEX_op_qemu_ld8u, { "r", "L" } }, … … 1475 1445 { INDEX_op_qemu_st16, { "L", "L" } }, 1476 1446 { INDEX_op_qemu_st32, { "L", "L" } }, 1477 { INDEX_op_qemu_st64, { "L", "L" , "L"} },1447 { INDEX_op_qemu_st64, { "L", "L" } }, 1478 1448 1479 1449 { -1 }, -
trunk/src/recompiler/tcg/x86_64/tcg-target.h
r36175 r37675 71 71 #define TCG_TARGET_HAS_ext16s_i64 72 72 #define TCG_TARGET_HAS_ext32s_i64 73 #define TCG_TARGET_HAS_ext8u_i32 74 #define TCG_TARGET_HAS_ext16u_i32 75 #define TCG_TARGET_HAS_ext8u_i64 76 #define TCG_TARGET_HAS_ext16u_i64 77 #define TCG_TARGET_HAS_ext32u_i64 78 73 79 #define TCG_TARGET_HAS_rot_i32 74 80 #define TCG_TARGET_HAS_rot_i64 81 82 #define TCG_TARGET_HAS_GUEST_BASE 75 83 76 84 /* Note: must be synced with dyngen-exec.h */ -
trunk/src/recompiler/tests/sha1.c
r36170 r37675 191 191 192 192 for (j = 0; j < 4; t >>= 8, j++) 193 *--fcp = (unsigned char) t 193 *--fcp = (unsigned char) t; 194 194 } 195 195 #else
Note:
See TracChangeset
for help on using the changeset viewer.