Changeset 36140 in vbox for trunk/src/recompiler
- Timestamp:
- Mar 3, 2011 1:48:16 PM (14 years ago)
- svn:sync-xref-src-repo-rev:
- 70322
- Location:
- trunk/src/recompiler
- Files:
-
- 2 added
- 51 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler/Makefile.kmk
r35572 r36140 61 61 VBoxRemPrimary_DEFS.solaris += __C99FEATURES__ 62 62 endif # win 63 VBoxRemPrimary_DEFS += IN_REM_R3 REM_INCLUDE_CPU_H 63 VBoxRemPrimary_DEFS += IN_REM_R3 REM_INCLUDE_CPU_H NEED_CPU_H 64 64 #VBoxRemPrimary_DEFS += REM_PHYS_ADDR_IN_TLB 65 65 #VBoxRemPrimary_DEFS += DEBUG_ALL_LOGGING DEBUG_DISAS DEBUG_PCALL DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB # Enables huge amounts of debug logging. -
trunk/src/recompiler/VBoxRecompiler.c
r36066 r36140 2100 2100 pVM->rem.s.Env.dr[i] = pCtx->dr[i]; 2101 2101 2102 #ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */ 2102 2103 /* 2103 2104 * Clear the halted hidden flag (the interrupt waking up the CPU can … … 2105 2106 */ 2106 2107 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK; 2108 #endif 2107 2109 2108 2110 /* -
trunk/src/recompiler/a.out.h
r33540 r36140 26 26 short f_magic; /* magic number */ 27 27 short f_nscns; /* number of sections */ 28 unsignedlong f_timdat; /* time & date stamp */29 unsignedlong f_symptr; /* file pointer to symtab */30 unsignedlong f_nsyms; /* number of symtab entries */28 host_ulong f_timdat; /* time & date stamp */ 29 host_ulong f_symptr; /* file pointer to symtab */ 30 host_ulong f_nsyms; /* number of symtab entries */ 31 31 short f_opthdr; /* sizeof(optional hdr) */ 32 32 short f_flags; /* flags */ … … 73 73 unsigned short magic; /* type of file */ 74 74 unsigned short vstamp; /* version stamp */ 75 unsignedlong tsize; /* text size in bytes, padded to FW bdry*/76 unsignedlong dsize; /* initialized data " " */77 unsignedlong bsize; /* uninitialized data " " */78 unsignedlong entry; /* entry pt. */79 unsignedlong text_start; /* base of text used for this file */80 unsignedlong data_start; /* base of data used for this file=75 host_ulong tsize; /* text size in bytes, padded to FW bdry*/ 76 host_ulong dsize; /* initialized data " " */ 77 host_ulong bsize; /* uninitialized data " " */ 78 host_ulong entry; /* entry pt. */ 79 host_ulong text_start; /* base of text used for this file */ 80 host_ulong data_start; /* base of data used for this file= 81 81 */ 82 82 } … … 104 104 struct external_scnhdr { 105 105 char s_name[8]; /* section name */ 106 unsignedlong s_paddr; /* physical address, offset106 host_ulong s_paddr; /* physical address, offset 107 107 of last addr in scn */ 108 unsignedlong s_vaddr; /* virtual address */109 unsignedlong s_size; /* section size */110 unsignedlong s_scnptr; /* file ptr to raw data for section */111 unsignedlong s_relptr; /* file ptr to relocation */112 unsignedlong s_lnnoptr; /* file ptr to line numbers */108 host_ulong s_vaddr; /* virtual address */ 109 host_ulong s_size; /* section size */ 110 host_ulong s_scnptr; /* file ptr to raw data for section */ 111 host_ulong s_relptr; /* file ptr to relocation */ 112 host_ulong s_lnnoptr; /* file ptr to line numbers */ 113 113 unsigned short s_nreloc; /* number of relocation entries */ 114 114 unsigned short s_nlnno; /* number of line number entries*/ 115 unsignedlong s_flags; /* flags */115 host_ulong s_flags; /* flags */ 116 116 }; 117 117 … … 137 137 struct external_lineno { 138 138 union { 139 unsignedlong l_symndx; /* function name symbol index, iff l_lnno 0 */140 unsignedlong l_paddr; /* (physical) address of line number */139 host_ulong l_symndx; /* function name symbol index, iff l_lnno 0 */ 140 host_ulong l_paddr; /* (physical) address of line number */ 141 141 } l_addr; 142 142 unsigned short l_lnno; /* line number */ … … 157 157 char e_name[E_SYMNMLEN]; 158 158 struct { 159 unsignedlong e_zeroes;160 unsignedlong e_offset;159 host_ulong e_zeroes; 160 host_ulong e_offset; 161 161 } e; 162 162 } e; 163 unsignedlong e_value;163 host_ulong e_value; 164 164 unsigned short e_scnum; 165 165 unsigned short e_type; … … 175 175 union external_auxent { 176 176 struct { 177 unsignedlong x_tagndx; /* str, un, or enum tag indx */177 host_ulong x_tagndx; /* str, un, or enum tag indx */ 178 178 union { 179 179 struct { … … 181 181 unsigned short x_size; /* str/union/array size */ 182 182 } x_lnsz; 183 unsignedlong x_fsize; /* size of function */183 host_ulong x_fsize; /* size of function */ 184 184 } x_misc; 185 185 union { 186 186 struct { /* if ISFCN, tag, or .bb */ 187 unsignedlong x_lnnoptr;/* ptr to fcn line # */188 unsignedlong x_endndx; /* entry ndx past block end */187 host_ulong x_lnnoptr;/* ptr to fcn line # */ 188 host_ulong x_endndx; /* entry ndx past block end */ 189 189 } x_fcn; 190 190 struct { /* if ISARY, up to 4 dimen. */ … … 198 198 char x_fname[E_FILNMLEN]; 199 199 struct { 200 unsignedlong x_zeroes;201 unsignedlong x_offset;200 host_ulong x_zeroes; 201 host_ulong x_offset; 202 202 } x_n; 203 203 } x_file; 204 204 205 205 struct { 206 unsignedlong x_scnlen; /* section length */206 host_ulong x_scnlen; /* section length */ 207 207 unsigned short x_nreloc; /* # relocation entries */ 208 208 unsigned short x_nlinno; /* # line numbers */ 209 unsignedlong x_checksum; /* section COMDAT checksum */209 host_ulong x_checksum; /* section COMDAT checksum */ 210 210 unsigned short x_associated;/* COMDAT associated section index */ 211 211 char x_comdat[1]; /* COMDAT selection number */ … … 213 213 214 214 struct { 215 unsignedlong x_tvfill; /* tv fill value */215 host_ulong x_tvfill; /* tv fill value */ 216 216 unsigned short x_tvlen; /* length of .tv */ 217 217 char x_tvran[2][2]; /* tv range */ … … 298 298 299 299 #define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* Section contains extended relocations. */ 300 #define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* Section is not cach eable.*/300 #define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* Section is not cachable. */ 301 301 #define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* Section is not pageable. */ 302 302 #define IMAGE_SCN_MEM_SHARED 0x10000000 /* Section is shareable. */ … … 345 345 unsigned short e_oeminfo; /* OEM information; e_oemid specific, 0x0 */ 346 346 char e_res2[10][2]; /* Reserved words, all 0x0 */ 347 unsignedlong e_lfanew; /* File address of new exe header, 0x80 */347 host_ulong e_lfanew; /* File address of new exe header, 0x80 */ 348 348 char dos_message[16][4]; /* other stuff, always follow DOS header */ 349 349 unsigned int nt_signature; /* required NT signature, 0x4550 */ … … 353 353 unsigned short f_magic; /* magic number */ 354 354 unsigned short f_nscns; /* number of sections */ 355 unsignedlong f_timdat; /* time & date stamp */356 unsignedlong f_symptr; /* file pointer to symtab */357 unsignedlong f_nsyms; /* number of symtab entries */355 host_ulong f_timdat; /* time & date stamp */ 356 host_ulong f_symptr; /* file pointer to symtab */ 357 host_ulong f_nsyms; /* number of symtab entries */ 358 358 unsigned short f_opthdr; /* sizeof(optional hdr) */ 359 359 unsigned short f_flags; /* flags */ … … 371 371 unsigned short magic; /* type of file */ 372 372 unsigned short vstamp; /* version stamp */ 373 unsignedlong tsize; /* text size in bytes, padded to FW bdry*/374 unsignedlong dsize; /* initialized data " " */375 unsignedlong bsize; /* uninitialized data " " */376 unsignedlong entry; /* entry pt. */377 unsignedlong text_start; /* base of text used for this file */378 unsignedlong data_start; /* base of all data used for this file */373 host_ulong tsize; /* text size in bytes, padded to FW bdry*/ 374 host_ulong dsize; /* initialized data " " */ 375 host_ulong bsize; /* uninitialized data " " */ 376 host_ulong entry; /* entry pt. */ 377 host_ulong text_start; /* base of text used for this file */ 378 host_ulong data_start; /* base of all data used for this file */ 379 379 380 380 /* NT extra fields; see internal.h for descriptions */ 381 unsignedlong ImageBase;382 unsignedlong SectionAlignment;383 unsignedlong FileAlignment;381 host_ulong ImageBase; 382 host_ulong SectionAlignment; 383 host_ulong FileAlignment; 384 384 unsigned short MajorOperatingSystemVersion; 385 385 unsigned short MinorOperatingSystemVersion; … … 389 389 unsigned short MinorSubsystemVersion; 390 390 char Reserved1[4]; 391 unsignedlong SizeOfImage;392 unsignedlong SizeOfHeaders;393 unsignedlong CheckSum;391 host_ulong SizeOfImage; 392 host_ulong SizeOfHeaders; 393 host_ulong CheckSum; 394 394 unsigned short Subsystem; 395 395 unsigned short DllCharacteristics; 396 unsignedlong SizeOfStackReserve;397 unsignedlong SizeOfStackCommit;398 unsignedlong SizeOfHeapReserve;399 unsignedlong SizeOfHeapCommit;400 unsignedlong LoaderFlags;401 unsignedlong NumberOfRvaAndSizes;396 host_ulong SizeOfStackReserve; 397 host_ulong SizeOfStackCommit; 398 host_ulong SizeOfHeapReserve; 399 host_ulong SizeOfHeapCommit; 400 host_ulong LoaderFlags; 401 host_ulong NumberOfRvaAndSizes; 402 402 /* IMAGE_DATA_DIRECTORY DataDirectory[IMAGE_NUMBEROF_DIRECTORY_ENTRIES]; */ 403 403 char DataDirectory[16][2][4]; /* 16 entries, 2 elements/entry, 4 chars */ -
trunk/src/recompiler/bswap.h
r36125 r36140 139 139 #define le16_to_cpupu(p) le16_to_cpup(p) 140 140 #define le32_to_cpupu(p) le32_to_cpup(p) 141 #define be32_to_cpupu(p) be32_to_cpup(p) 141 142 142 143 #define cpu_to_be16wu(p, v) cpu_to_be16w(p, v) … … 175 176 } 176 177 178 static inline uint32_t be32_to_cpupu(const uint32_t *p) 179 { 180 const uint8_t *p1 = (const uint8_t *)p; 181 return p1[3] | (p1[2] << 8) | (p1[1] << 16) | (p1[0] << 24); 182 } 183 177 184 static inline void cpu_to_be16wu(uint16_t *p, uint16_t v) 178 185 { -
trunk/src/recompiler/cpu-all.h
r36125 r36140 39 39 #endif 40 40 41 #if defined(__arm__) || defined(__sparc__) 41 #if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) 42 42 #define WORDS_ALIGNED 43 43 #endif … … 57 57 58 58 #include "bswap.h" 59 #include "softfloat.h" 59 60 60 61 #if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) … … 600 601 uint32_t a,b; 601 602 a = ldl_be_p(ptr); 602 b = ldl_be_p((uint8_t *)ptr+4);603 b = ldl_be_p((uint8_t *)ptr + 4); 603 604 return (((uint64_t)a<<32)|b); 604 605 } … … 637 638 { 638 639 stl_be_p(ptr, v >> 32); 639 stl_be_p((uint8_t *)ptr + 4, v);640 stl_be_p((uint8_t *)ptr + 4, v); 640 641 } 641 642 642 643 /* float access */ 644 643 645 static inline float32 ldfl_be_p(void *ptr) 644 646 { … … 665 667 CPU_DoubleU u; 666 668 u.l.upper = ldl_be_p(ptr); 667 u.l.lower = ldl_be_p((uint8_t *)ptr + 4);669 u.l.lower = ldl_be_p((uint8_t *)ptr + 4); 668 670 return u.d; 669 671 } … … 674 676 u.d = v; 675 677 stl_be_p(ptr, u.l.upper); 676 stl_be_p((uint8_t *)ptr + 4, u.l.lower);678 stl_be_p((uint8_t *)ptr + 4, u.l.lower); 677 679 } 678 680 … … 776 778 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ 777 779 #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE)) 778 #define h2g(x) ((target_ulong)(x - GUEST_BASE)) 780 #define h2g(x) ((target_ulong)((unsigned long)(x) - GUEST_BASE)) 781 779 782 #define saddr(x) g2h(x) 780 783 #define laddr(x) g2h(x) … … 826 829 #define ldsw_code(p) ldsw_raw(p) 827 830 #define ldl_code(p) ldl_raw(p) 831 #define ldq_code(p) ldq_raw(p) 828 832 829 833 #define ldub_kernel(p) ldub_raw(p) … … 832 836 #define ldsw_kernel(p) ldsw_raw(p) 833 837 #define ldl_kernel(p) ldl_raw(p) 838 #define ldq_kernel(p) ldq_raw(p) 834 839 #define ldfl_kernel(p) ldfl_raw(p) 835 840 #define ldfq_kernel(p) ldfq_raw(p) … … 874 879 void page_unprotect_range(target_ulong data, target_ulong data_size); 875 880 881 #if 0 /* bird: Not there in the code I'm looking at. */ 876 882 #define SINGLE_CPU_DEFINES 877 883 #ifdef SINGLE_CPU_DEFINES … … 937 943 938 944 #endif /* SINGLE_CPU_DEFINES */ 945 #endif /* bird: removed? */ 939 946 940 947 void cpu_dump_state(CPUState *env, FILE *f, 941 948 int (*cpu_fprintf)(FILE *f, const char *fmt, ...), 942 949 int flags); 943 944 DECLNORETURN(void) cpu_abort(CPUState *env, const char *fmt, ...); 950 void cpu_dump_statistics (CPUState *env, FILE *f, 951 int (*cpu_fprintf)(FILE *f, const char *fmt, ...), 952 int flags); 953 954 void cpu_abort(CPUState *env, const char *fmt, ...) 955 __attribute__ ((__format__ (__printf__, 2, 3))) 956 __attribute__ ((__noreturn__)); 945 957 extern CPUState *first_cpu; 946 958 extern CPUState *cpu_single_env; … … 955 967 #define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */ 956 968 #define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */ 957 #define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occur red. */969 #define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */ 958 970 #define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */ 959 971 #define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */ … … 961 973 #ifdef VBOX 962 974 /** Executes a single instruction. cpu_exec() will normally return EXCP_SINGLE_INSTR. */ 963 # define CPU_INTERRUPT_SINGLE_INSTR0x0400975 # define CPU_INTERRUPT_SINGLE_INSTR 0x0400 964 976 /** Executing a CPU_INTERRUPT_SINGLE_INSTR request, quit the cpu_loop. (for exceptions and suchlike) */ 965 # define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT0x0800977 # define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x0800 966 978 /** VM execution was interrupted by VMR3Reset, VMR3Suspend or VMR3PowerOff. */ 967 # define CPU_INTERRUPT_RC0x1000979 # define CPU_INTERRUPT_RC 0x1000 968 980 /** Exit current TB to process an external interrupt request (also in op.c!!) */ 969 # define CPU_INTERRUPT_EXTERNAL_EXIT0x2000981 # define CPU_INTERRUPT_EXTERNAL_EXIT 0x2000 970 982 /** Exit current TB to process an external interrupt request (also in op.c!!) */ 971 # define CPU_INTERRUPT_EXTERNAL_HARD0x4000983 # define CPU_INTERRUPT_EXTERNAL_HARD 0x4000 972 984 /** Exit current TB to process an external interrupt request (also in op.c!!) */ 973 # define CPU_INTERRUPT_EXTERNAL_TIMER0x8000985 # define CPU_INTERRUPT_EXTERNAL_TIMER 0x8000 974 986 /** Exit current TB to process an external interrupt request (also in op.c!!) */ 975 # define CPU_INTERRUPT_EXTERNAL_DMA0x10000987 # define CPU_INTERRUPT_EXTERNAL_DMA 0x10000 976 988 #endif /* VBOX */ 977 989 void cpu_interrupt(CPUState *s, int mask); … … 1014 1026 } CPULogItem; 1015 1027 1016 extern CPULogItem cpu_log_items[];1028 extern const CPULogItem cpu_log_items[]; 1017 1029 1018 1030 void cpu_set_log(int log_flags); … … 1043 1055 1044 1056 #ifndef VBOX 1045 extern int phys_ram_size;1057 extern ram_addr_t phys_ram_size; 1046 1058 extern int phys_ram_fd; 1047 extern int phys_ram_size; 1059 extern uint8_t *phys_ram_base; 1060 extern uint8_t *phys_ram_dirty; 1061 extern ram_addr_t ram_size; 1048 1062 #else /* VBOX */ 1049 1063 extern RTGCPHYS phys_ram_size; 1050 1064 /** This is required for bounds checking the phys_ram_dirty accesses. */ 1051 1065 extern RTGCPHYS phys_ram_dirty_size; 1066 extern uint8_t *phys_ram_dirty; 1052 1067 #endif /* VBOX */ 1053 #if !defined(VBOX)1054 extern uint8_t *phys_ram_base;1055 #endif1056 extern uint8_t *phys_ram_dirty;1057 1068 1058 1069 /* physical memory access */ … … 1091 1102 ram_addr_t size, 1092 1103 ram_addr_t phys_offset); 1093 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr);1104 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr); 1094 1105 ram_addr_t qemu_ram_alloc(ram_addr_t); 1095 1106 void qemu_ram_free(ram_addr_t addr); … … 1129 1140 uint8_t *buf, int len, int is_write); 1130 1141 1131 #define VGA_DIRTY_FLAG 0x011132 #define CODE_DIRTY_FLAG 0x021142 #define VGA_DIRTY_FLAG 0x01 1143 #define CODE_DIRTY_FLAG 0x02 1133 1144 #define KQEMU_DIRTY_FLAG 0x04 1134 1145 #define MIGRATION_DIRTY_FLAG 0x08 … … 1205 1216 /* host CPU ticks (if available) */ 1206 1217 1207 #ifdef VBOX 1208 # include <iprt/asm-amd64-x86.h> 1209 1210 DECLINLINE(int64_t) cpu_get_real_ticks(void) 1211 { 1212 return ASMReadTSC(); 1213 } 1214 1215 #elif defined(__powerpc__) 1218 #if defined(__powerpc__) 1216 1219 1217 1220 static inline uint32_t get_tbl(void) … … 1263 1266 } 1264 1267 1268 #elif defined(__hppa__) 1269 1270 static inline int64_t cpu_get_real_ticks(void) 1271 { 1272 int val; 1273 asm volatile ("mfctl %%cr16, %0" : "=r"(val)); 1274 return val; 1275 } 1276 1265 1277 #elif defined(__ia64) 1266 1278 … … 1281 1293 } 1282 1294 1283 #elif defined(__sparc_v 9__)1295 #elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__) 1284 1296 1285 1297 static inline int64_t cpu_get_real_ticks (void) … … 1302 1314 #endif 1303 1315 } 1316 1317 #elif defined(__mips__) 1318 1319 static inline int64_t cpu_get_real_ticks(void) 1320 { 1321 #if __mips_isa_rev >= 2 1322 uint32_t count; 1323 static uint32_t cyc_per_count = 0; 1324 1325 if (!cyc_per_count) 1326 __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count)); 1327 1328 __asm__ __volatile__("rdhwr %1, $2" : "=r" (count)); 1329 return (int64_t)(count * cyc_per_count); 1330 #else 1331 /* FIXME */ 1332 static int64_t ticks = 0; 1333 return ticks++; 1334 #endif 1335 } 1336 1304 1337 #else 1305 1338 /* The host CPU doesn't have an easily accessible cycle counter. 1306 Just return a monotonically increasing v lue. This will be totally wrong,1307 but hopefully better than nothing. */1339 Just return a monotonically increasing value. This will be 1340 totally wrong, but hopefully better than nothing. */ 1308 1341 static inline int64_t cpu_get_real_ticks (void) 1309 1342 { -
trunk/src/recompiler/cpu-defs.h
r33656 r36140 31 31 #define CPU_DEFS_H 32 32 33 #ifndef NEED_CPU_H 34 #error cpu.h included from common code 35 #endif 36 33 37 #include "config.h" 34 38 #include <setjmp.h> 35 #ifndef VBOX36 39 #include <inttypes.h> 37 #endif38 40 #include "osdep.h" 39 41 … … 87 89 #define HOST_LONG_SIZE (HOST_LONG_BITS / 8) 88 90 89 #define EXCP_INTERRUPT 91 #define EXCP_INTERRUPT 0x10000 /* async interruption */ 90 92 #define EXCP_HLT 0x10001 /* hlt instruction reached */ 91 93 #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ … … 130 132 target_ulong addr_write; 131 133 target_ulong addr_code; 132 133 use the corre sponding iotlb value. */134 /* Addend to virtual address to get physical address. IO accesses 135 use the correcponding iotlb value. */ 134 136 #if TARGET_PHYS_ADDR_BITS == 64 135 137 /* on i386 Linux make sure it is aligned */ … … 157 159 #endif 158 160 159 160 161 #define CPU_TEMP_BUF_NLONGS 128 161 162 162 #define CPU_COMMON \ 163 163 struct TranslationBlock *current_tb; /* currently executing TB */ \ … … 204 204 int watchpoint_hit; \ 205 205 \ 206 struct GDBRegisterState *gdb_regs; \ 207 \ 206 208 /* Core interrupt code */ \ 207 209 jmp_buf jmp_env; \ -
trunk/src/recompiler/cpu-exec.c
r36125 r36140 58 58 //#define DEBUG_EXEC 59 59 //#define DEBUG_SIGNAL 60 61 60 62 61 void cpu_loop_exit(void) … … 800 799 CPU tries to execute code at the magic address. 801 800 This will cause the magic PC value to be pushed to 802 the stack if an interrupt occur red at the wrong time.801 the stack if an interrupt occured at the wrong time. 803 802 We avoid this by disabling interrupts when 804 803 pc contains a magic address. */ … … 847 846 } 848 847 #endif 849 /* Don't use the cached inter rupt_request value,848 /* Don't use the cached interupt_request value, 850 849 do_interrupt may have updated the EXITTB flag. */ 851 850 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) { … … 1008 1007 return ret; 1009 1008 } 1009 1010 1010 #endif /* !VBOX */ 1011 1011 … … 1036 1036 (selector << 4), 0xffff, 0); 1037 1037 } else { 1038 load_seg(seg_reg, selector);1038 helper_load_seg(seg_reg, selector); 1039 1039 } 1040 1040 env = saved_env; 1041 1041 } 1042 1042 1043 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)1043 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32) 1044 1044 { 1045 1045 CPUX86State *saved_env; … … 1048 1048 env = s; 1049 1049 1050 helper_fsave( (target_ulong)ptr, data32);1050 helper_fsave(ptr, data32); 1051 1051 1052 1052 env = saved_env; 1053 1053 } 1054 1054 1055 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)1055 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32) 1056 1056 { 1057 1057 CPUX86State *saved_env; … … 1060 1060 env = s; 1061 1061 1062 helper_frstor( (target_ulong)ptr, data32);1062 helper_frstor(ptr, data32); 1063 1063 1064 1064 env = saved_env; … … 1094 1094 1095 1095 /* see if it is an MMU fault */ 1096 ret = cpu_x86_handle_mmu_fault(env, address, is_write, 1097 ((env->hflags & HF_CPL_MASK) == 3), 0); 1096 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); 1098 1097 if (ret < 0) 1099 1098 return 0; /* not an MMU fault */ … … 1144 1143 } 1145 1144 /* see if it is an MMU fault */ 1146 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);1145 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); 1147 1146 if (ret < 0) 1148 1147 return 0; /* not an MMU fault */ … … 1160 1159 sigprocmask(SIG_SETMASK, old_set, NULL); 1161 1160 cpu_loop_exit(); 1161 /* never comes here */ 1162 return 1; 1162 1163 } 1163 1164 #elif defined(TARGET_SPARC) … … 1180 1181 } 1181 1182 /* see if it is an MMU fault */ 1182 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);1183 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); 1183 1184 if (ret < 0) 1184 1185 return 0; /* not an MMU fault */ … … 1196 1197 sigprocmask(SIG_SETMASK, old_set, NULL); 1197 1198 cpu_loop_exit(); 1199 /* never comes here */ 1200 return 1; 1198 1201 } 1199 1202 #elif defined (TARGET_PPC) … … 1217 1220 1218 1221 /* see if it is an MMU fault */ 1219 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);1222 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); 1220 1223 if (ret < 0) 1221 1224 return 0; /* not an MMU fault */ … … 1266 1269 } 1267 1270 /* see if it is an MMU fault */ 1268 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);1271 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); 1269 1272 if (ret < 0) 1270 1273 return 0; /* not an MMU fault */ … … 1306 1309 1307 1310 /* see if it is an MMU fault */ 1308 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);1311 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); 1309 1312 if (ret < 0) 1310 1313 return 0; /* not an MMU fault */ … … 1321 1324 if (ret == 1) { 1322 1325 #if 0 1323 printf("PF exception: NIP=0x%08xerror=0x%x %p\n",1324 env-> nip, env->error_code, tb);1326 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n", 1327 env->PC, env->error_code, tb); 1325 1328 #endif 1326 1329 /* we restore the process signal mask as the sigreturn should … … 1356 1359 1357 1360 /* see if it is an MMU fault */ 1358 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);1361 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); 1359 1362 if (ret < 0) 1360 1363 return 0; /* not an MMU fault */ … … 1380 1383 return 1; 1381 1384 } 1385 1386 #elif defined (TARGET_ALPHA) 1387 static inline int handle_cpu_signal(unsigned long pc, unsigned long address, 1388 int is_write, sigset_t *old_set, 1389 void *puc) 1390 { 1391 TranslationBlock *tb; 1392 int ret; 1393 1394 if (cpu_single_env) 1395 env = cpu_single_env; /* XXX: find a correct solution for multithread */ 1396 #if defined(DEBUG_SIGNAL) 1397 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1398 pc, address, is_write, *(unsigned long *)old_set); 1399 #endif 1400 /* XXX: locking issue */ 1401 if (is_write && page_unprotect(h2g(address), pc, puc)) { 1402 return 1; 1403 } 1404 1405 /* see if it is an MMU fault */ 1406 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); 1407 if (ret < 0) 1408 return 0; /* not an MMU fault */ 1409 if (ret == 0) 1410 return 1; /* the MMU fault was handled without causing real CPU fault */ 1411 1412 /* now we have a real cpu fault */ 1413 tb = tb_find_pc(pc); 1414 if (tb) { 1415 /* the PC is inside the translated code. It means that we have 1416 a virtual CPU fault */ 1417 cpu_restore_state(tb, env, pc, puc); 1418 } 1419 #if 0 1420 printf("PF exception: NIP=0x%08x error=0x%x %p\n", 1421 env->nip, env->error_code, tb); 1422 #endif 1423 /* we restore the process signal mask as the sigreturn should 1424 do it (XXX: use sigsetjmp) */ 1425 sigprocmask(SIG_SETMASK, old_set, NULL); 1426 cpu_loop_exit(); 1427 /* never comes here */ 1428 return 1; 1429 } 1430 #elif defined (TARGET_CRIS) 1431 static inline int handle_cpu_signal(unsigned long pc, unsigned long address, 1432 int is_write, sigset_t *old_set, 1433 void *puc) 1434 { 1435 TranslationBlock *tb; 1436 int ret; 1437 1438 if (cpu_single_env) 1439 env = cpu_single_env; /* XXX: find a correct solution for multithread */ 1440 #if defined(DEBUG_SIGNAL) 1441 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1442 pc, address, is_write, *(unsigned long *)old_set); 1443 #endif 1444 /* XXX: locking issue */ 1445 if (is_write && page_unprotect(h2g(address), pc, puc)) { 1446 return 1; 1447 } 1448 1449 /* see if it is an MMU fault */ 1450 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); 1451 if (ret < 0) 1452 return 0; /* not an MMU fault */ 1453 if (ret == 0) 1454 return 1; /* the MMU fault was handled without causing real CPU fault */ 1455 1456 /* now we have a real cpu fault */ 1457 tb = tb_find_pc(pc); 1458 if (tb) { 1459 /* the PC is inside the translated code. It means that we have 1460 a virtual CPU fault */ 1461 cpu_restore_state(tb, env, pc, puc); 1462 } 1463 /* we restore the process signal mask as the sigreturn should 1464 do it (XXX: use sigsetjmp) */ 1465 sigprocmask(SIG_SETMASK, old_set, NULL); 1466 cpu_loop_exit(); 1467 /* never comes here */ 1468 return 1; 1469 } 1470 1382 1471 #else 1383 1472 #error unsupported target CPU … … 1412 1501 #define REG_TRAPNO TRAPNO 1413 1502 #endif 1414 pc = uc->uc_mcontext.gregs[REG_EIP]; 1415 trapno = uc->uc_mcontext.gregs[REG_TRAPNO]; 1416 #if defined(TARGET_I386) && defined(USE_CODE_COPY) 1417 if (trapno == 0x00 || trapno == 0x05) { 1418 /* send division by zero or bound exception */ 1419 cpu_send_trap(pc, trapno, uc); 1420 return 1; 1421 } else 1422 #endif 1423 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1424 trapno == 0xe ? 1425 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0, 1426 &uc->uc_sigmask, puc); 1503 pc = EIP_sig(uc); 1504 trapno = TRAP_sig(uc); 1505 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1506 trapno == 0xe ? 1507 (ERROR_sig(uc) >> 1) & 1 : 0, 1508 &uc->uc_sigmask, puc); 1427 1509 } 1428 1510 … … 1552 1634 { 1553 1635 siginfo_t *info = pinfo; 1636 int is_write; 1637 uint32_t insn; 1638 #if !defined(__arch64__) || defined(HOST_SOLARIS) 1554 1639 uint32_t *regs = (uint32_t *)(info + 1); 1555 1640 void *sigmask = (regs + 20); 1556 unsigned long pc;1557 int is_write;1558 uint32_t insn;1559 1560 1641 /* XXX: is there a standard glibc define ? */ 1561 pc = regs[1]; 1642 unsigned long pc = regs[1]; 1643 #else 1644 struct sigcontext *sc = puc; 1645 unsigned long pc = sc->sigc_regs.tpc; 1646 void *sigmask = (void *)sc->sigc_mask; 1647 #endif 1648 1562 1649 /* XXX: need kernel patch to get write flag faster */ 1563 1650 is_write = 0; … … 1590 1677 int is_write; 1591 1678 1679 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) 1592 1680 pc = uc->uc_mcontext.gregs[R15]; 1681 #else 1682 pc = uc->uc_mcontext.arm_pc; 1683 #endif 1593 1684 /* XXX: compute is_write */ 1594 1685 is_write = 0; … … 1664 1755 is_write = 0; 1665 1756 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1757 is_write, &uc->uc_sigmask, puc); 1758 } 1759 1760 #elif defined(__mips__) 1761 1762 int cpu_signal_handler(int host_signum, void *pinfo, 1763 void *puc) 1764 { 1765 siginfo_t *info = pinfo; 1766 struct ucontext *uc = puc; 1767 greg_t pc = uc->uc_mcontext.pc; 1768 int is_write; 1769 1770 /* XXX: compute is_write */ 1771 is_write = 0; 1772 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1773 is_write, &uc->uc_sigmask, puc); 1774 } 1775 1776 #elif defined(__hppa__) 1777 1778 int cpu_signal_handler(int host_signum, void *pinfo, 1779 void *puc) 1780 { 1781 struct siginfo *info = pinfo; 1782 struct ucontext *uc = puc; 1783 unsigned long pc; 1784 int is_write; 1785 1786 pc = uc->uc_mcontext.sc_iaoq[0]; 1787 /* FIXME: compute is_write */ 1788 is_write = 0; 1789 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1666 1790 is_write, 1667 1791 &uc->uc_sigmask, puc); -
trunk/src/recompiler/cutils.c
r26499 r36140 514 514 } 515 515 516 #endif 516 #endif /* VBOX */ 517 517 518 void pstrcpy(char *buf, int buf_size, const char *str) 518 519 { … … 588 589 return t; 589 590 } 590 #endif 591 #endif /* VBOX */ -
trunk/src/recompiler/dyngen-exec.h
r33656 r36140 47 47 #ifndef VBOX 48 48 49 #ifdef __OpenBSD__ 50 #include <sys/types.h> 51 #else 49 52 typedef unsigned char uint8_t; 50 53 typedef unsigned short uint16_t; 51 54 typedef unsigned int uint32_t; 52 / * Linux/Sparc64 defines uint64_t */53 #if !(defined (__sparc_v9__) && defined(__linux__)) 55 // Linux/Sparc64 defines uint64_t 56 #if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__)) 54 57 /* XXX may be done for all 64 bits targets ? */ 55 #if defined (__x86_64__) || defined(__ia64) 58 #if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(__powerpc64__) 56 59 typedef unsigned long uint64_t; 57 60 #else … … 61 64 62 65 /* if Solaris/__sun__, don't typedef int8_t, as it will be typedef'd 63 prior to this and will cause an error in comp ilation, conflicting66 prior to this and will cause an error in compliation, conflicting 64 67 with /usr/include/sys/int_types.h, line 75 */ 65 68 #ifndef __sun__ … … 69 72 typedef signed int int32_t; 70 73 // Linux/Sparc64 defines int64_t 71 #if !(defined (__sparc_v9__) && defined(__linux__)) 72 #if defined (__x86_64__) || defined(__ia64) 74 #if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__)) 75 #if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(__powerpc64__) 73 76 typedef signed long int64_t; 74 77 #else 75 78 typedef signed long long int64_t; 79 #endif 76 80 #endif 77 81 #endif … … 114 118 #endif /* VBOX */ 115 119 116 #if def __i386__120 #if defined(__i386__) 117 121 #ifndef VBOX 118 122 #define AREG0 "ebp" … … 120 124 #define AREG2 "esi" 121 125 #define AREG3 "edi" 122 #else 123 #define AREG0 "esi" 124 #define AREG1 "edi" 125 #endif 126 #endif 127 #ifdef __x86_64__ 128 #if defined(VBOX) 129 /* Must be in sync with TCG register notion, see tcg-target.h */ 130 #endif 126 #else /* VBOX - why are we different? */ 127 # define AREG0 "esi" 128 # define AREG1 "edi" 129 #endif /* VBOX */ 130 #elif defined(__x86_64__) 131 131 #define AREG0 "r14" 132 132 #define AREG1 "r15" 133 133 #define AREG2 "r12" 134 134 #define AREG3 "r13" 135 #endif 136 #ifdef __powerpc__ 135 //#define AREG4 "rbp" 136 //#define AREG5 "rbx" 137 #elif defined(__powerpc__) 137 138 #define AREG0 "r27" 138 139 #define AREG1 "r24" … … 150 151 #define AREG11 "r23" 151 152 #endif 152 #define USE_INT_TO_FLOAT_HELPERS 153 #define BUGGY_GCC_DIV64 154 #endif 155 #ifdef __arm__ 153 #elif defined(__arm__) 156 154 #define AREG0 "r7" 157 155 #define AREG1 "r4" 158 156 #define AREG2 "r5" 159 157 #define AREG3 "r6" 160 #endif 161 #ifdef __mips__ 162 #define AREG0 "s3" 158 #elif defined(__hppa__) 159 #define AREG0 "r17" 160 #define AREG1 "r14" 161 #define AREG2 "r15" 162 #define AREG3 "r16" 163 #elif defined(__mips__) 164 #define AREG0 "fp" 163 165 #define AREG1 "s0" 164 166 #define AREG2 "s1" 165 167 #define AREG3 "s2" 166 #endif 167 #ifdef __sparc__ 168 #define AREG4 "s3" 169 #define AREG5 "s4" 170 #define AREG6 "s5" 171 #define AREG7 "s6" 172 #define AREG8 "s7" 173 #elif defined(__sparc__) 168 174 #ifdef HOST_SOLARIS 169 175 #define AREG0 "g2" … … 174 180 #else 175 181 #ifdef __sparc_v9__ 176 #define AREG0 "g1" 177 #define AREG1 "g4" 178 #define AREG2 "g5" 179 #define AREG3 "g7" 182 #define AREG0 "g5" 183 #define AREG1 "g6" 184 #define AREG2 "g7" 180 185 #else 181 186 #define AREG0 "g6" … … 193 198 #endif 194 199 #endif 195 #define USE_FP_CONVERT 196 #endif 197 #ifdef __s390__ 200 #elif defined(__s390__) 198 201 #define AREG0 "r10" 199 202 #define AREG1 "r7" 200 203 #define AREG2 "r8" 201 204 #define AREG3 "r9" 202 #endif 203 #ifdef __alpha__ 205 #elif defined(__alpha__) 204 206 /* Note $15 is the frame pointer, so anything in op-i386.c that would 205 207 require a frame pointer, like alloca, would probably loose. */ … … 211 213 #define AREG5 "$13" 212 214 #define AREG6 "$14" 213 #endif 214 #ifdef __mc68000 215 #elif defined(__mc68000) 215 216 #define AREG0 "%a5" 216 217 #define AREG1 "%a4" … … 218 219 #define AREG3 "%d6" 219 220 #define AREG4 "%d5" 220 #endif 221 #ifdef __ia64__ 221 #elif defined(__ia64__) 222 222 #define AREG0 "r7" 223 223 #define AREG1 "r4" 224 224 #define AREG2 "r5" 225 225 #define AREG3 "r6" 226 #endif 227 228 #ifndef VBOX 226 #else 227 #error unsupported CPU 228 #endif 229 230 #ifndef VBOX /* WHY DO WE UNSUBSCRIBE TO THIS MACRO? */ 229 231 /* force GCC to generate only one epilog at the end of the function */ 230 232 #define FORCE_RET() __asm__ __volatile__("" : : : "memory"); … … 259 261 #define PARAM2 ({ int _r; asm("" : "=r"(_r) : "0" (&__op_param2)); _r; }) 260 262 #define PARAM3 ({ int _r; asm("" : "=r"(_r) : "0" (&__op_param3)); _r; }) 263 #elif defined(__s390__) 264 extern int __op_param1 __hidden; 265 extern int __op_param2 __hidden; 266 extern int __op_param3 __hidden; 267 #define PARAM1 ({ int _r; asm("bras %0,8; .long " ASM_NAME(__op_param1) "; l %0,0(%0)" : "=r"(_r) : ); _r; }) 268 #define PARAM2 ({ int _r; asm("bras %0,8; .long " ASM_NAME(__op_param2) "; l %0,0(%0)" : "=r"(_r) : ); _r; }) 269 #define PARAM3 ({ int _r; asm("bras %0,8; .long " ASM_NAME(__op_param3) "; l %0,0(%0)" : "=r"(_r) : ); _r; }) 261 270 #else 262 271 #if defined(__APPLE__) … … 278 287 #endif 279 288 280 #ifdef VBOX 281 #define GETPC() ASMReturnAddress() 289 #if defined(__i386__) 290 #define EXIT_TB() asm volatile ("ret") 291 #define GOTO_LABEL_PARAM(n) asm volatile ("jmp " ASM_NAME(__op_gen_label) #n) 292 #elif defined(__x86_64__) 293 #define EXIT_TB() asm volatile ("ret") 294 #define GOTO_LABEL_PARAM(n) asm volatile ("jmp " ASM_NAME(__op_gen_label) #n) 295 #elif defined(__powerpc__) 296 #define EXIT_TB() asm volatile ("blr") 297 #define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n) 282 298 #elif defined(__s390__) 299 #define EXIT_TB() asm volatile ("br %r14") 300 #define GOTO_LABEL_PARAM(n) asm volatile ("larl %r7,12; l %r7,0(%r7); br %r7; .long " ASM_NAME(__op_gen_label) #n) 301 #elif defined(__alpha__) 302 #define EXIT_TB() asm volatile ("ret") 303 #elif defined(__ia64__) 304 #define EXIT_TB() asm volatile ("br.ret.sptk.many b0;;") 305 #define GOTO_LABEL_PARAM(n) asm volatile ("br.sptk.many " \ 306 ASM_NAME(__op_gen_label) #n) 307 #elif defined(__sparc__) 308 #define EXIT_TB() asm volatile ("jmpl %i0 + 8, %g0; nop") 309 #define GOTO_LABEL_PARAM(n) asm volatile ("ba " ASM_NAME(__op_gen_label) #n ";nop") 310 #elif defined(__arm__) 311 #define EXIT_TB() asm volatile ("b exec_loop") 312 #define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n) 313 #elif defined(__mc68000) 314 #define EXIT_TB() asm volatile ("rts") 315 #elif defined(__mips__) 316 #define EXIT_TB() asm volatile ("jr $ra") 317 #define GOTO_LABEL_PARAM(n) asm volatile (".set noat; la $1, " ASM_NAME(__op_gen_label) #n "; jr $1; .set at") 318 #elif defined(__hppa__) 319 #define GOTO_LABEL_PARAM(n) asm volatile ("b,n " ASM_NAME(__op_gen_label) #n) 320 #else 321 #error unsupported CPU 322 #endif 323 283 324 /* The return address may point to the start of the next instruction. 284 325 Subtracting one gets us the call instruction itself. */ 326 #if defined(__s390__) 285 327 # define GETPC() ((void*)(((unsigned long)__builtin_return_address(0) & 0x7fffffffUL) - 1)) 286 328 #elif defined(__arm__) … … 291 333 # define GETPC() ((void *)((unsigned long)__builtin_return_address(0) - 1)) 292 334 #endif 335 293 336 #endif /* !defined(__DYNGEN_EXEC_H__) */ -
trunk/src/recompiler/elf.h
r33540 r36140 1 1 #ifndef _QEMU_ELF_H 2 2 #define _QEMU_ELF_H 3 4 #include <inttypes.h> 3 5 4 6 /* 32-bit ELF base types. */ … … 327 329 #define R_SPARC_64 32 328 330 #define R_SPARC_OLO10 33 331 #define R_SPARC_HH22 34 332 #define R_SPARC_HM10 35 333 #define R_SPARC_LM22 36 329 334 #define R_SPARC_WDISP16 40 330 335 #define R_SPARC_WDISP19 41 … … 639 644 #define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */ 640 645 641 /* Additional section ind ices. */642 643 #define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for ten tatively declared646 /* Additional section indeces. */ 647 648 #define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared 644 649 symbols in ANSI C. */ 645 650 #define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */ … … 1119 1124 } Elf64_Nhdr; 1120 1125 1126 #ifdef ELF_CLASS 1121 1127 #if ELF_CLASS == ELFCLASS32 1122 1128 … … 1126 1132 #define elf_shdr elf32_shdr 1127 1133 #define elf_sym elf32_sym 1134 #define elf_addr_t Elf32_Off 1128 1135 1129 1136 #ifdef ELF_USES_RELOCA … … 1140 1147 #define elf_shdr elf64_shdr 1141 1148 #define elf_sym elf64_sym 1149 #define elf_addr_t Elf64_Off 1142 1150 1143 1151 #ifdef ELF_USES_RELOCA … … 1159 1167 #endif 1160 1168 1169 #endif /* ELF_CLASS */ 1170 1161 1171 1162 1172 #endif /* _QEMU_ELF_H */ -
trunk/src/recompiler/exec-all.h
r36125 r36140 60 60 61 61 /* Maximum size a TCG op can expand to. This is complicated because a 62 single op may require several host instructions and regi ster reloads.62 single op may require several host instructions and regirster reloads. 63 63 For now take a wild guess at 128 bytes, which should allow at least 64 64 a couple of fixup instructions per argument. */ … … 191 191 target_ulong tmp; 192 192 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); 193 return (tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK;193 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; 194 194 } 195 195 … … 198 198 target_ulong tmp; 199 199 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); 200 return (((tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK) |201 (tmp & TB_JMP_ADDR_MASK));200 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) 201 | (tmp & TB_JMP_ADDR_MASK)); 202 202 } 203 203 … … 215 215 216 216 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; 217 218 217 extern uint8_t *code_gen_ptr; 219 218 extern int code_gen_max_blocks; … … 222 221 223 222 #if defined(__powerpc__) 224 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) 225 { 226 uint32_t val, *ptr; 227 228 /* patch the branch destination */ 229 ptr = (uint32_t *)jmp_addr; 230 val = *ptr; 231 val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc); 232 *ptr = val; 233 /* flush icache */ 234 asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory"); 235 asm volatile ("sync" : : : "memory"); 236 asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory"); 237 asm volatile ("sync" : : : "memory"); 238 asm volatile ("isync" : : : "memory"); 239 } 240 #elif defined(__i386__) 223 extern void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr); 224 #define tb_set_jmp_target1 ppc_tb_set_jmp_target 225 #elif defined(__i386__) || defined(__x86_64__) 241 226 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) 242 227 { 243 228 /* patch the branch destination */ 244 229 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); 245 /* no need to flush icache explicitely */ 230 /* no need to flush icache explicitly */ 231 } 232 #elif defined(__arm__) 233 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) 234 { 235 register unsigned long _beg __asm ("a1"); 236 register unsigned long _end __asm ("a2"); 237 register unsigned long _flg __asm ("a3"); 238 239 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */ 240 *(uint32_t *)jmp_addr |= ((addr - (jmp_addr + 8)) >> 2) & 0xffffff; 241 242 /* flush icache */ 243 _beg = jmp_addr; 244 _end = jmp_addr + 4; 245 _flg = 0; 246 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); 246 247 } 247 248 #endif … … 285 286 286 287 TranslationBlock *tb_find_pc(unsigned long pc_ptr); 287 288 #ifndef offsetof289 #define offsetof(type, field) ((size_t) &((type *)0)->field)290 #endif291 288 292 289 #if defined(_WIN32) … … 344 341 345 342 #if defined(CONFIG_USER_ONLY) 346 static inline target_ulong get_phys_addr_code(CPUState *env , target_ulong addr)343 static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr) 347 344 { 348 345 return addr; … … 389 386 } 390 387 391 392 388 /* Deterministic execution requires that IO only be performed on the last 393 389 instruction of a TB so that interrupts take effect immediately. */ … … 405 401 #endif 406 402 407 408 403 #ifdef USE_KQEMU 409 404 #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG)) 405 406 #define MSR_QPI_COMMBASE 0xfabe0010 410 407 411 408 int kqemu_init(CPUState *env); … … 415 412 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr); 416 413 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr); 414 void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size, 415 ram_addr_t phys_offset); 417 416 void kqemu_cpu_interrupt(CPUState *env); 418 417 void kqemu_record_dump(void); 418 419 extern uint32_t kqemu_comm_base; 419 420 420 421 static inline int kqemu_is_ok(CPUState *env) -
trunk/src/recompiler/exec.c
r36125 r36140 31 31 #ifndef VBOX 32 32 #ifdef _WIN32 33 #define WIN32_LEAN_AND_MEAN 33 34 #include <windows.h> 34 35 #else … … 54 55 #include "cpu.h" 55 56 #include "exec-all.h" 57 #include "qemu-common.h" 58 #include "tcg.h" 59 #ifndef VBOX 60 #include "hw/hw.h" 61 #endif 62 #include "osdep.h" 56 63 #if defined(CONFIG_USER_ONLY) 57 64 #include <qemu.h> … … 66 73 //#define DEBUG_TB_CHECK 67 74 //#define DEBUG_TLB_CHECK 75 76 //#define DEBUG_IOPORT 77 //#define DEBUG_SUBPAGE 68 78 69 79 #if !defined(CONFIG_USER_ONLY) … … 114 124 __attribute__((aligned (32))) 115 125 #endif 126 116 127 uint8_t code_gen_prologue[1024] code_gen_section; 117 118 128 #else /* VBOX */ 119 129 extern uint8_t* code_gen_prologue; 120 130 #endif /* VBOX */ 121 122 131 static uint8_t *code_gen_buffer; 123 132 static unsigned long code_gen_buffer_size; … … 171 180 172 181 typedef struct PhysPageDesc { 173 /* offset in host memory of the page + io_index in the low 12bits */182 /* offset in host memory of the page + io_index in the low bits */ 174 183 ram_addr_t phys_offset; 175 184 } PhysPageDesc; … … 194 203 #define L1_SIZE (1 << L1_BITS) 195 204 #define L2_SIZE (1 << L2_BITS) 196 197 static void io_mem_init(void);198 205 199 206 unsigned long qemu_real_host_page_size; … … 252 259 } subpage_t; 253 260 254 255 261 #ifndef VBOX 256 262 #ifdef _WIN32 … … 279 285 } 280 286 #endif 281 #else / / VBOX287 #else /* VBOX */ 282 288 static void map_exec(void *addr, long size) 283 289 { … … 285 291 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE); 286 292 } 287 #endif 293 #endif /* VBOX */ 288 294 289 295 static void page_init(void) … … 325 331 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); 326 332 #endif 333 327 334 #ifdef VBOX 328 335 /* We use other means to set reserved bit on our pages */ 329 #else 336 #else /* !VBOX */ 330 337 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY) 331 338 { … … 355 362 } 356 363 #endif 357 #endif 364 #endif /* !VBOX */ 358 365 } 359 366 … … 498 505 #endif 499 506 500 #ifdef VBOX 501 /* 502 * We don't need such huge codegen buffer size, as execute most of the code 503 * in raw or hwacc mode 504 */ 507 #ifdef VBOX /* We don't need such huge codegen buffer size, as execute 508 most of the code in raw or hwacc mode. */ 505 509 #define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024) 506 #else 510 #else /* !VBOX */ 507 511 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024) 508 #endif 512 #endif /* !VBOX */ 509 513 510 514 #if defined(CONFIG_USER_ONLY) 511 /* Currently it is not recomm ended to allocate big chunks of data in515 /* Currently it is not recommanded to allocate big chunks of data in 512 516 user mode. It will change when a dedicated libc will be used */ 513 517 #define USE_STATIC_CODE_GEN_BUFFER … … 829 833 } 830 834 } 831 #endif // DEBUG_TB_CHECK 835 836 #endif 832 837 833 838 /* invalidate one TB */ … … 956 961 957 962 #ifdef VBOX 963 958 964 void tb_invalidate_virt(CPUState *env, uint32_t eip) 959 965 { … … 998 1004 } 999 1005 # endif /* VBOX_STRICT */ 1006 1000 1007 #endif /* VBOX */ 1001 1008 … … 1032 1039 TranslationBlock *tb; 1033 1040 1034 p->code_bitmap = qemu_malloc (TARGET_PAGE_SIZE / 8);1041 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8); 1035 1042 if (!p->code_bitmap) 1036 1043 return; 1037 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);1038 1044 1039 1045 tb = p->first_tb; … … 1516 1522 static void breakpoint_invalidate(CPUState *env, target_ulong pc) 1517 1523 { 1518 target_ulong addr, pd; 1524 target_phys_addr_t addr; 1525 target_ulong pd; 1519 1526 ram_addr_t ram_addr; 1520 1527 PhysPageDesc *p; … … 1644 1651 if (env->singlestep_enabled != enabled) { 1645 1652 env->singlestep_enabled = enabled; 1646 /* must flush all the translated code to avoid inconsist encies */1653 /* must flush all the translated code to avoid inconsistancies */ 1647 1654 /* XXX: only flush what is necessary */ 1648 1655 tb_flush(env); … … 1657 1664 loglevel = log_flags; 1658 1665 if (loglevel && !logfile) { 1659 logfile = fopen(logfilename, "w");1666 logfile = fopen(logfilename, log_append ? "a" : "w"); 1660 1667 if (!logfile) { 1661 1668 perror(logfilename); … … 1665 1672 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ 1666 1673 { 1667 static uint8_tlogfile_buf[4096];1674 static char logfile_buf[4096]; 1668 1675 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); 1669 1676 } … … 1671 1678 setvbuf(logfile, NULL, _IOLBF, 0); 1672 1679 #endif 1680 log_append = 1; 1681 } 1682 if (!loglevel && logfile) { 1683 fclose(logfile); 1684 logfile = NULL; 1673 1685 } 1674 1686 } … … 1677 1689 { 1678 1690 logfilename = strdup(filename); 1691 if (logfile) { 1692 fclose(logfile); 1693 logfile = NULL; 1694 } 1695 cpu_set_log(loglevel); 1679 1696 } 1680 1697 #endif /* !VBOX */ … … 1747 1764 "show target assembly code for each compiled TB" }, 1748 1765 { CPU_LOG_TB_OP, "op", 1749 "show micro ops for each compiled TB (only usable if 'in_asm' used)" }, 1766 "show micro ops for each compiled TB" }, 1767 { CPU_LOG_TB_OP_OPT, "op_opt", 1768 "show micro ops " 1750 1769 #ifdef TARGET_I386 1751 { CPU_LOG_TB_OP_OPT, "op_opt",1752 "show micro ops after optimization for each compiled TB" }, 1753 #endif 1770 "before eflags optimization and " 1771 #endif 1772 "after liveness analysis" }, 1754 1773 { CPU_LOG_INT, "int", 1755 1774 "show interrupts/exceptions in short format" }, … … 1757 1776 "show trace before each executed TB (lots of logs)" }, 1758 1777 { CPU_LOG_TB_CPU, "cpu", 1759 "show CPU state before bloc translation" },1778 "show CPU state before block translation" }, 1760 1779 #ifdef TARGET_I386 1761 1780 { CPU_LOG_PCALL, "pcall", … … 1779 1798 int cpu_str_to_log_mask(const char *str) 1780 1799 { 1781 CPULogItem *item;1800 const CPULogItem *item; 1782 1801 int mask; 1783 1802 const char *p, *p1; … … 1814 1833 { 1815 1834 va_list ap; 1835 va_list ap2; 1816 1836 1817 1837 va_start(ap, fmt); 1838 va_copy(ap2, ap); 1818 1839 fprintf(stderr, "qemu: fatal: "); 1819 1840 vfprintf(stderr, fmt, ap); … … 1824 1845 cpu_dump_state(env, stderr, fprintf, 0); 1825 1846 #endif 1847 if (logfile) { 1848 fprintf(logfile, "qemu: fatal: "); 1849 vfprintf(logfile, fmt, ap2); 1850 fprintf(logfile, "\n"); 1851 #ifdef TARGET_I386 1852 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP); 1853 #else 1854 cpu_dump_state(env, logfile, fprintf, 0); 1855 #endif 1856 fflush(logfile); 1857 fclose(logfile); 1858 } 1859 va_end(ap2); 1826 1860 va_end(ap); 1827 1861 abort(); … … 1841 1875 return new_env; 1842 1876 } 1843 #endif 1877 #endif /* !VBOX */ 1844 1878 1845 1879 #if !defined(CONFIG_USER_ONLY) … … 1865 1899 } 1866 1900 1901 #ifdef VBOX 1867 1902 static CPUTLBEntry s_cputlb_empty_entry = { 1868 1903 .addr_read = -1, … … 1871 1906 .addend = -1, 1872 1907 }; 1908 #endif /* VBOX */ 1873 1909 1874 1910 /* NOTE: if flush_global is true, also flush global entries (not … … 1886 1922 1887 1923 for(i = 0; i < CPU_TLB_SIZE; i++) { 1924 #ifdef VBOX 1888 1925 int mmu_idx; 1889 1926 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1890 1927 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry; 1891 1928 } 1929 #else /* !VBOX */ 1930 env->tlb_table[0][i].addr_read = -1; 1931 env->tlb_table[0][i].addr_write = -1; 1932 env->tlb_table[0][i].addr_code = -1; 1933 env->tlb_table[1][i].addr_read = -1; 1934 env->tlb_table[1][i].addr_write = -1; 1935 env->tlb_table[1][i].addr_code = -1; 1936 #if (NB_MMU_MODES >= 3) 1937 env->tlb_table[2][i].addr_read = -1; 1938 env->tlb_table[2][i].addr_write = -1; 1939 env->tlb_table[2][i].addr_code = -1; 1940 #if (NB_MMU_MODES == 4) 1941 env->tlb_table[3][i].addr_read = -1; 1942 env->tlb_table[3][i].addr_write = -1; 1943 env->tlb_table[3][i].addr_code = -1; 1944 #endif 1945 #endif 1946 #endif /* !VBOX */ 1892 1947 } 1893 1948 … … 1987 2042 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; 1988 2043 if ((addr - start) < length) { 1989 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;2044 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY; 1990 2045 } 1991 2046 } … … 2129 2184 } 2130 2185 2131 2132 /* update the TLB corresponding to virtual page vaddr and phys addr 2133 addr so that it is no longer dirty */ 2134 static inline void tlb_set_dirty(CPUState *env, 2135 unsigned long addr, target_ulong vaddr) 2186 /* update the TLB corresponding to virtual page vaddr 2187 so that it is no longer dirty */ 2188 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr) 2136 2189 { 2137 2190 int i; 2138 2191 2139 addr &= TARGET_PAGE_MASK;2192 vaddr &= TARGET_PAGE_MASK; 2140 2193 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 2141 tlb_set_dirty1(&env->tlb_table[0][i], addr);2142 tlb_set_dirty1(&env->tlb_table[1][i], addr);2194 tlb_set_dirty1(&env->tlb_table[0][i], vaddr); 2195 tlb_set_dirty1(&env->tlb_table[1][i], vaddr); 2143 2196 #if (NB_MMU_MODES >= 3) 2144 2197 tlb_set_dirty1(&env->tlb_table[2][i], vaddr); … … 2302 2355 return ret; 2303 2356 } 2304 #if 02305 /* called from signal handler: invalidate the code and unprotect the2306 page. Return TRUE if the fault was successfully handled. */2307 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)2308 {2309 #if !defined(CONFIG_SOFTMMU)2310 VirtPageDesc *vp;2311 2312 #if defined(DEBUG_TLB)2313 printf("page_unprotect: addr=0x%08x\n", addr);2314 #endif2315 addr &= TARGET_PAGE_MASK;2316 2317 /* if it is not mapped, no need to worry here */2318 if (addr >= MMAP_AREA_END)2319 return 0;2320 vp = virt_page_find(addr >> TARGET_PAGE_BITS);2321 if (!vp)2322 return 0;2323 /* NOTE: in this case, validate_tag is _not_ tested as it2324 validates only the code TLB */2325 if (vp->valid_tag != virt_valid_tag)2326 return 0;2327 if (!(vp->prot & PAGE_WRITE))2328 return 0;2329 #if defined(DEBUG_TLB)2330 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",2331 addr, vp->phys_addr, vp->prot);2332 #endif2333 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)2334 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",2335 (unsigned long)addr, vp->prot);2336 /* set the dirty bit */2337 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;2338 /* flush the code inside */2339 tb_invalidate_phys_page(vp->phys_addr, pc, puc);2340 return 1;2341 #elif defined(VBOX)2342 addr &= TARGET_PAGE_MASK;2343 2344 /* if it is not mapped, no need to worry here */2345 if (addr >= MMAP_AREA_END)2346 return 0;2347 return 1;2348 #else2349 return 0;2350 #endif2351 }2352 #endif /* 0 */2353 2357 2354 2358 #else … … 2425 2429 2426 2430 /* modify the flags of a page and invalidate the code if 2427 necessary. The flag PAGE_WRITE_ORG is position ed automatically2431 necessary. The flag PAGE_WRITE_ORG is positionned automatically 2428 2432 depending on PAGE_WRITE */ 2429 2433 void page_set_flags(target_ulong start, target_ulong end, int flags) … … 2432 2436 target_ulong addr; 2433 2437 2438 /* mmap_lock should already be held. */ 2434 2439 start = start & TARGET_PAGE_MASK; 2435 2440 end = TARGET_PAGE_ALIGN(end); … … 2439 2444 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n")); 2440 2445 #endif 2441 spin_lock(&tb_lock);2442 2446 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { 2443 2447 p = page_find_alloc(addr >> TARGET_PAGE_BITS); 2448 /* We may be called for host regions that are outside guest 2449 address space. */ 2450 if (!p) 2451 return; 2444 2452 /* if the write protection is set, then we invalidate the code 2445 2453 inside */ … … 2451 2459 p->flags = flags; 2452 2460 } 2453 spin_unlock(&tb_lock);2454 2461 } 2455 2462 … … 2491 2498 2492 2499 /* called from signal handler: invalidate the code and unprotect the 2493 page. Return TRUE if the fault was succes sfully handled. */2500 page. Return TRUE if the fault was succesfully handled. */ 2494 2501 int page_unprotect(target_ulong address, unsigned long pc, void *puc) 2495 2502 { … … 2570 2577 } while (0) 2571 2578 2572 2573 2579 /* register physical memory. 'size' must be a multiple of the target 2574 2580 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an 2575 2581 io memory page */ 2576 2582 void cpu_register_physical_memory(target_phys_addr_t start_addr, 2577 unsigned longsize,2578 unsigned longphys_offset)2583 ram_addr_t size, 2584 ram_addr_t phys_offset) 2579 2585 { 2580 2586 target_phys_addr_t addr, end_addr; … … 2613 2619 } else { 2614 2620 p->phys_offset = phys_offset; 2615 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||2616 (phys_offset & IO_MEM_ROMD))2621 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || 2622 (phys_offset & IO_MEM_ROMD)) 2617 2623 phys_offset += TARGET_PAGE_SIZE; 2618 2624 } … … 2620 2626 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2621 2627 p->phys_offset = phys_offset; 2622 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||2623 (phys_offset & IO_MEM_ROMD))2628 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || 2629 (phys_offset & IO_MEM_ROMD)) 2624 2630 phys_offset += TARGET_PAGE_SIZE; 2625 2631 else { … … 2639 2645 } 2640 2646 } 2647 2641 2648 /* since each CPU stores ram addresses in its TLB cache, we must 2642 2649 reset the modified entries */ … … 2648 2655 2649 2656 /* XXX: temporary until new memory mapping API */ 2650 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)2657 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr) 2651 2658 { 2652 2659 PhysPageDesc *p; … … 2676 2683 { 2677 2684 } 2678 #endif 2679 2685 #endif /* !VBOX */ 2680 2686 2681 2687 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) 2682 2688 { 2683 2689 #ifdef DEBUG_UNASSIGNED 2684 printf("Unassigned mem read 0x%08x\n", (int)addr);2690 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 2685 2691 #endif 2686 2692 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) … … 2715 2721 { 2716 2722 #ifdef DEBUG_UNASSIGNED 2717 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val); 2723 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 2724 #endif 2725 #if defined(TARGET_SPARC) || defined(TARGET_CRIS) 2726 do_unassigned_access(addr, 1, 0, 0, 1); 2718 2727 #endif 2719 2728 } … … 2738 2747 #endif 2739 2748 } 2749 2740 2750 static CPUReadMemoryFunc *unassigned_mem_read[3] = { 2741 2751 unassigned_mem_readb, … … 2750 2760 }; 2751 2761 2752 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)2753 { 2754 unsigned long ram_addr; 2762 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr, 2763 uint32_t val) 2764 { 2755 2765 int dirty_flags; 2756 #if defined(VBOX)2757 ram_addr = addr;2758 #else2759 ram_addr = addr - (unsigned long)phys_ram_base;2760 #endif2761 2766 #ifdef VBOX 2762 2767 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) … … 2777 2782 } 2778 2783 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 2779 remR3PhysWriteU8( addr, val);2780 #else 2781 stb_p( (uint8_t *)(long)addr, val);2784 remR3PhysWriteU8(ram_addr, val); 2785 #else 2786 stb_p(phys_ram_base + ram_addr, val); 2782 2787 #endif 2783 2788 #ifdef USE_KQEMU … … 2794 2799 flushed */ 2795 2800 if (dirty_flags == 0xff) 2796 tlb_set_dirty(cpu_single_env, addr,cpu_single_env->mem_io_vaddr);2797 } 2798 2799 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)2800 { 2801 unsigned long ram_addr; 2801 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); 2802 } 2803 2804 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr, 2805 uint32_t val) 2806 { 2802 2807 int dirty_flags; 2803 #if defined(VBOX)2804 ram_addr = addr;2805 #else2806 ram_addr = addr - (unsigned long)phys_ram_base;2807 #endif2808 2808 #ifdef VBOX 2809 2809 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) … … 2824 2824 } 2825 2825 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 2826 remR3PhysWriteU16( addr, val);2827 #else 2828 stw_p( (uint8_t *)(long)addr, val);2826 remR3PhysWriteU16(ram_addr, val); 2827 #else 2828 stw_p(phys_ram_base + ram_addr, val); 2829 2829 #endif 2830 2830 … … 2842 2842 flushed */ 2843 2843 if (dirty_flags == 0xff) 2844 tlb_set_dirty(cpu_single_env, addr,cpu_single_env->mem_io_vaddr);2845 } 2846 2847 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)2848 { 2849 unsigned long ram_addr; 2844 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); 2845 } 2846 2847 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr, 2848 uint32_t val) 2849 { 2850 2850 int dirty_flags; 2851 #if defined(VBOX)2852 ram_addr = addr;2853 #else2854 ram_addr = addr - (unsigned long)phys_ram_base;2855 #endif2856 2851 #ifdef VBOX 2857 2852 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) … … 2872 2867 } 2873 2868 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 2874 remR3PhysWriteU32( addr, val);2875 #else 2876 stl_p( (uint8_t *)(long)addr, val);2869 remR3PhysWriteU32(ram_addr, val); 2870 #else 2871 stl_p(phys_ram_base + ram_addr, val); 2877 2872 #endif 2878 2873 #ifdef USE_KQEMU … … 2889 2884 flushed */ 2890 2885 if (dirty_flags == 0xff) 2891 tlb_set_dirty(cpu_single_env, addr,cpu_single_env->mem_io_vaddr);2886 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); 2892 2887 } 2893 2888 … … 2903 2898 notdirty_mem_writel, 2904 2899 }; 2905 2906 2900 2907 2901 /* Generate a debug exception if a watchpoint has been hit. */ … … 3184 3178 return io_mem_read[io_index >> IO_MEM_SHIFT]; 3185 3179 } 3180 3186 3181 #endif /* !defined(CONFIG_USER_ONLY) */ 3187 3182 … … 3210 3205 /* FIXME - should this return an error rather than just fail? */ 3211 3206 return; 3212 memcpy(p, buf, l en);3213 unlock_user(p, addr, l en);3207 memcpy(p, buf, l); 3208 unlock_user(p, addr, l); 3214 3209 } else { 3215 3210 if (!(flags & PAGE_READ)) 3216 3211 return; 3212 /* XXX: this code should not depend on lock_user */ 3217 3213 if (!(p = lock_user(VERIFY_READ, addr, l, 1))) 3218 3214 /* FIXME - should this return an error rather than just fail? */ 3219 3215 return; 3220 memcpy(buf, p, l en);3216 memcpy(buf, p, l); 3221 3217 unlock_user(p, addr, 0); 3222 3218 } … … 3508 3504 } else { 3509 3505 #ifndef VBOX 3510 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +3511 (addr & ~TARGET_PAGE_MASK);3506 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3507 ptr = phys_ram_base + addr1; 3512 3508 stl_p(ptr, val); 3513 3509 #else … … 3521 3517 /* set dirty bit */ 3522 3518 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 3523 3519 (0xff & ~CODE_DIRTY_FLAG); 3524 3520 } 3525 3521 } 3526 #endif 3522 #endif /* !VBOX */ 3527 3523 } 3528 3524 } … … 3561 3557 } 3562 3558 } 3563 3564 3559 3565 3560 /* warning: addr must be aligned */ … … 3632 3627 { 3633 3628 int l; 3634 target_ulong page, phys_addr; 3629 target_phys_addr_t phys_addr; 3630 target_ulong page; 3635 3631 3636 3632 while (len > 0) { -
trunk/src/recompiler/fpu/softfloat-macros.h
r21292 r36140 718 718 719 719 } 720 -
trunk/src/recompiler/fpu/softfloat-native.c
r21292 r36140 7 7 { 8 8 STATUS(float_rounding_mode) = val; 9 #if defined(_BSD) && !defined(__APPLE__) || (defined(HOST_SOLARIS) && (HOST_SOLARIS < 10 || HOST_SOLARIS == 11)) 9 #if defined(_BSD) && !defined(__APPLE__) || (defined(HOST_SOLARIS) && (HOST_SOLARIS < 10 || HOST_SOLARIS == 11)) /* VBOX adds sol 11 */ 10 10 fpsetround(val); 11 11 #elif defined(__arm__) … … 61 61 #endif 62 62 63 #if defined(_ ARCH_PPC)63 #if defined(__powerpc__) 64 64 65 65 /* correct (but slow) PowerPC rint() (glibc version is incorrect) */ 66 staticdouble qemu_rint(double x)66 double qemu_rint(double x) 67 67 { 68 68 double y = 4503599627370496.0; … … 230 230 { 231 231 if (a < b) { 232 return float_relation_less;232 return -1; 233 233 } else if (a == b) { 234 return float_relation_equal;234 return 0; 235 235 } else if (a > b) { 236 return float_relation_greater;237 } else { 238 return float_relation_unordered;236 return 1; 237 } else { 238 return 2; 239 239 } 240 240 } … … 242 242 { 243 243 if (isless(a, b)) { 244 return float_relation_less;244 return -1; 245 245 } else if (a == b) { 246 return float_relation_equal;246 return 0; 247 247 } else if (isgreater(a, b)) { 248 return float_relation_greater;249 } else { 250 return float_relation_unordered;248 return 1; 249 } else { 250 return 2; 251 251 } 252 252 } … … 258 258 a = u.i; 259 259 return ( ( ( a>>22 ) & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF ); 260 }261 262 int float32_is_nan( float32 a1 )263 {264 float32u u;265 uint64_t a;266 u.f = a1;267 a = u.i;268 return ( 0xFF800000 < ( a<<1 ) );269 260 } 270 261 … … 401 392 { 402 393 if (a < b) { 403 return float_relation_less;394 return -1; 404 395 } else if (a == b) { 405 return float_relation_equal;396 return 0; 406 397 } else if (a > b) { 407 return float_relation_greater;408 } else { 409 return float_relation_unordered;398 return 1; 399 } else { 400 return 2; 410 401 } 411 402 } … … 413 404 { 414 405 if (isless(a, b)) { 415 return float_relation_less;406 return -1; 416 407 } else if (a == b) { 417 return float_relation_equal;408 return 0; 418 409 } else if (isgreater(a, b)) { 419 return float_relation_greater;420 } else { 421 return float_relation_unordered;410 return 1; 411 } else { 412 return 2; 422 413 } 423 414 } … … 441 432 a = u.i; 442 433 443 return ( LIT64( 0xFF F0000000000000 ) < (bits64) ( a<<1 ) );434 return ( LIT64( 0xFFE0000000000000 ) < (bits64) ( a<<1 ) ); 444 435 445 436 } … … 493 484 { 494 485 if (a < b) { 495 return float_relation_less;486 return -1; 496 487 } else if (a == b) { 497 return float_relation_equal;488 return 0; 498 489 } else if (a > b) { 499 return float_relation_greater;500 } else { 501 return float_relation_unordered;490 return 1; 491 } else { 492 return 2; 502 493 } 503 494 } … … 505 496 { 506 497 if (isless(a, b)) { 507 return float_relation_less;498 return -1; 508 499 } else if (a == b) { 509 return float_relation_equal;500 return 0; 510 501 } else if (isgreater(a, b)) { 511 return float_relation_greater;512 } else { 513 return float_relation_unordered;502 return 1; 503 } else { 504 return 2; 514 505 } 515 506 } 516 507 int floatx80_is_signaling_nan( floatx80 a1) 517 {518 floatx80u u;519 uint64_t aLow;520 u.f = a1;521 522 aLow = u.i.low & ~ LIT64( 0x4000000000000000 );523 return524 ( ( u.i.high & 0x7FFF ) == 0x7FFF )525 && (bits64) ( aLow<<1 )526 && ( u.i.low == aLow );527 }528 529 int floatx80_is_nan( floatx80 a1 )530 508 { 531 509 floatx80u u; -
trunk/src/recompiler/fpu/softfloat-native.h
r36125 r36140 4 4 #if (defined(_BSD) && !defined(__APPLE__)) || defined(HOST_SOLARIS) 5 5 #include <ieeefp.h> 6 #elif defined(_MSC_VER) 7 # include <fpieee.h> 8 # ifndef fabsf 9 # define fabsf(f) ((float)fabs(f)) 10 # endif 6 #define fabsf(f) ((float)fabs(f)) 11 7 #else 12 8 #include <fenv.h> 13 9 #endif 14 10 15 #if defined(__OpenBSD__) || defined(__NetBSD__) 11 #ifdef __OpenBSD__ 12 /* Get OpenBSD version number */ 16 13 #include <sys/param.h> 17 14 #endif … … 39 36 #endif 40 37 41 #ifdef __NetBSD__42 #ifndef isgreater43 #define isgreater(x, y) __builtin_isgreater(x, y)44 #endif45 #ifndef isgreaterequal46 #define isgreaterequal(x, y) __builtin_isgreaterequal(x, y)47 #endif48 #ifndef isless49 #define isless(x, y) __builtin_isless(x, y)50 #endif51 #ifndef islessequal52 #define islessequal(x, y) __builtin_islessequal(x, y)53 #endif54 #ifndef isunordered55 #define isunordered(x, y) __builtin_isunordered(x, y)56 #endif57 #endif58 59 60 38 #define isnormal(x) (fpclass(x) >= FP_NZERO) 61 39 #define isgreater(x, y) ((!unordered(x, y)) && ((x) > (y))) … … 145 123 146 124 typedef struct float_status { 147 intfloat_rounding_mode;148 #ifdef FLOATX80 149 intfloatx80_rounding_precision;125 signed char float_rounding_mode; 126 #ifdef FLOATX80 127 signed char floatx80_rounding_precision; 150 128 #endif 151 129 } float_status; … … 251 229 int float32_compare_quiet( float32, float32 STATUS_PARAM ); 252 230 int float32_is_signaling_nan( float32 ); 253 int float32_is_nan( float32 );254 231 255 232 INLINE float32 float32_abs(float32 a) … … 261 238 { 262 239 return -a; 263 }264 265 INLINE float32 float32_is_infinity(float32 a)266 {267 return fpclassify(a) == FP_INFINITE;268 }269 270 INLINE float32 float32_is_neg(float32 a)271 {272 float32u u;273 u.f = a;274 return u.i >> 31;275 }276 277 INLINE float32 float32_is_zero(float32 a)278 {279 return fpclassify(a) == FP_ZERO;280 240 } 281 241 … … 372 332 } 373 333 374 INLINE float64 float64_is_infinity(float64 a)375 {376 return fpclassify(a) == FP_INFINITE;377 }378 379 INLINE float64 float64_is_neg(float64 a)380 {381 float64u u;382 u.f = a;383 return u.i >> 63;384 }385 386 INLINE float64 float64_is_zero(float64 a)387 {388 return fpclassify(a) == FP_ZERO;389 }390 391 334 INLINE float64 float64_scalbn(float64 a, int n) 392 335 { … … 464 407 int floatx80_compare_quiet( floatx80, floatx80 STATUS_PARAM ); 465 408 int floatx80_is_signaling_nan( floatx80 ); 466 int floatx80_is_nan( floatx80 );467 409 468 410 INLINE floatx80 floatx80_abs(floatx80 a) … … 476 418 } 477 419 478 INLINE floatx80 floatx80_is_infinity(floatx80 a)479 {480 return fpclassify(a) == FP_INFINITE;481 }482 483 INLINE floatx80 floatx80_is_neg(floatx80 a)484 {485 floatx80u u;486 u.f = a;487 return u.i.high >> 15;488 }489 490 INLINE floatx80 floatx80_is_zero(floatx80 a)491 {492 return fpclassify(a) == FP_ZERO;493 }494 495 420 INLINE floatx80 floatx80_scalbn(floatx80 a, int n) 496 421 { -
trunk/src/recompiler/fpu/softfloat-specialize.h
r21292 r36140 38 38 39 39 /*---------------------------------------------------------------------------- 40 | Underflow tininess-detection mode, statically initialized to default value. 41 | (The declaration in `softfloat.h' must match the `int8' type here.) 42 *----------------------------------------------------------------------------*/ 43 int8 float_detect_tininess = float_tininess_after_rounding; 44 45 /*---------------------------------------------------------------------------- 40 46 | Raises the exceptions specified by `flags'. Floating-point traps can be 41 47 | defined here if desired. It is currently not possible for such a trap … … 62 68 #if defined(TARGET_SPARC) 63 69 #define float32_default_nan make_float32(0x7FFFFFFF) 64 #elif defined(TARGET_POWERPC) || defined(TARGET_ARM)70 #elif defined(TARGET_POWERPC) 65 71 #define float32_default_nan make_float32(0x7FC00000) 66 72 #elif defined(TARGET_HPPA) … … 144 150 flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN; 145 151 bits32 av, bv, res; 146 147 if ( STATUS(default_nan_mode) )148 return float32_default_nan;149 152 150 153 aIsNaN = float32_is_nan( a ); … … 190 193 #if defined(TARGET_SPARC) 191 194 #define float64_default_nan make_float64(LIT64( 0x7FFFFFFFFFFFFFFF )) 192 #elif defined(TARGET_POWERPC) || defined(TARGET_ARM)195 #elif defined(TARGET_POWERPC) 193 196 #define float64_default_nan make_float64(LIT64( 0x7FF8000000000000 )) 194 197 #elif defined(TARGET_HPPA) … … 279 282 flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN; 280 283 bits64 av, bv, res; 281 282 if ( STATUS(default_nan_mode) )283 return float64_default_nan;284 284 285 285 aIsNaN = float64_is_nan( a ); … … 419 419 flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN; 420 420 421 if ( STATUS(default_nan_mode) ) {422 a.low = floatx80_default_nan_low;423 a.high = floatx80_default_nan_high;424 return a;425 }426 427 421 aIsNaN = floatx80_is_nan( a ); 428 422 aIsSignalingNaN = floatx80_is_signaling_nan( a ); … … 545 539 flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN; 546 540 547 if ( STATUS(default_nan_mode) ) {548 a.low = float128_default_nan_low;549 a.high = float128_default_nan_high;550 return a;551 }552 553 541 aIsNaN = float128_is_nan( a ); 554 542 aIsSignalingNaN = float128_is_signaling_nan( a ); -
trunk/src/recompiler/fpu/softfloat.c
r21292 r36140 31 31 =============================================================================*/ 32 32 33 /* FIXME: Flush-To-Zero only effects results. Denormal inputs should also34 be flushed to zero. */35 33 #include "softfloat.h" 36 34 … … 297 295 } 298 296 if ( zExp < 0 ) { 299 if ( STATUS(flush_to_zero) ) return packFloat32( zSign, 0, 0 );300 297 isTiny = 301 298 ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) … … 461 458 } 462 459 if ( zExp < 0 ) { 463 if ( STATUS(flush_to_zero) ) return packFloat64( zSign, 0, 0 );464 460 isTiny = 465 461 ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) … … 640 636 } 641 637 if ( zExp <= 0 ) { 642 if ( STATUS(flush_to_zero) ) return packFloatx80( zSign, 0, 0 );643 638 isTiny = 644 639 ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) … … 971 966 } 972 967 if ( zExp < 0 ) { 973 if ( STATUS(flush_to_zero) ) return packFloat128( zSign, 0, 0, 0 );974 968 isTiny = 975 969 ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) … … 1644 1638 return a; 1645 1639 } 1646 if ( aExp == 0 ) { 1647 if ( STATUS(flush_to_zero) ) return packFloat32( zSign, 0, 0 ); 1648 return packFloat32( zSign, 0, ( aSig + bSig )>>6 ); 1649 } 1640 if ( aExp == 0 ) return packFloat32( zSign, 0, ( aSig + bSig )>>6 ); 1650 1641 zSig = 0x40000000 + aSig + bSig; 1651 1642 zExp = aExp; … … 2055 2046 return roundAndPackFloat32( 0, zExp, zSig STATUS_VAR ); 2056 2047 2057 }2058 2059 /*----------------------------------------------------------------------------2060 | Returns the binary log of the single-precision floating-point value `a'.2061 | The operation is performed according to the IEC/IEEE Standard for Binary2062 | Floating-Point Arithmetic.2063 *----------------------------------------------------------------------------*/2064 float32 float32_log2( float32 a STATUS_PARAM )2065 {2066 flag aSign, zSign;2067 int16 aExp;2068 bits32 aSig, zSig, i;2069 2070 aSig = extractFloat32Frac( a );2071 aExp = extractFloat32Exp( a );2072 aSign = extractFloat32Sign( a );2073 2074 if ( aExp == 0 ) {2075 if ( aSig == 0 ) return packFloat32( 1, 0xFF, 0 );2076 normalizeFloat32Subnormal( aSig, &aExp, &aSig );2077 }2078 if ( aSign ) {2079 float_raise( float_flag_invalid STATUS_VAR);2080 return float32_default_nan;2081 }2082 if ( aExp == 0xFF ) {2083 if ( aSig ) return propagateFloat32NaN( a, float32_zero STATUS_VAR );2084 return a;2085 }2086 2087 aExp -= 0x7F;2088 aSig |= 0x00800000;2089 zSign = aExp < 0;2090 zSig = aExp << 23;2091 2092 for (i = 1 << 22; i > 0; i >>= 1) {2093 aSig = ( (bits64)aSig * aSig ) >> 23;2094 if ( aSig & 0x01000000 ) {2095 aSig >>= 1;2096 zSig |= i;2097 }2098 }2099 2100 if ( zSign )2101 zSig = -zSig;2102 2103 return normalizeRoundAndPackFloat32( zSign, 0x85, zSig STATUS_VAR );2104 2048 } 2105 2049 … … 2652 2596 return a; 2653 2597 } 2654 if ( aExp == 0 ) { 2655 if ( STATUS(flush_to_zero) ) return packFloat64( zSign, 0, 0 ); 2656 return packFloat64( zSign, 0, ( aSig + bSig )>>9 ); 2657 } 2598 if ( aExp == 0 ) return packFloat64( zSign, 0, ( aSig + bSig )>>9 ); 2658 2599 zSig = LIT64( 0x4000000000000000 ) + aSig + bSig; 2659 2600 zExp = aExp; … … 3051 2992 return roundAndPackFloat64( 0, zExp, zSig STATUS_VAR ); 3052 2993 3053 }3054 3055 /*----------------------------------------------------------------------------3056 | Returns the binary log of the double-precision floating-point value `a'.3057 | The operation is performed according to the IEC/IEEE Standard for Binary3058 | Floating-Point Arithmetic.3059 *----------------------------------------------------------------------------*/3060 float64 float64_log2( float64 a STATUS_PARAM )3061 {3062 flag aSign, zSign;3063 int16 aExp;3064 bits64 aSig, aSig0, aSig1, zSig, i;3065 3066 aSig = extractFloat64Frac( a );3067 aExp = extractFloat64Exp( a );3068 aSign = extractFloat64Sign( a );3069 3070 if ( aExp == 0 ) {3071 if ( aSig == 0 ) return packFloat64( 1, 0x7FF, 0 );3072 normalizeFloat64Subnormal( aSig, &aExp, &aSig );3073 }3074 if ( aSign ) {3075 float_raise( float_flag_invalid STATUS_VAR);3076 return float64_default_nan;3077 }3078 if ( aExp == 0x7FF ) {3079 if ( aSig ) return propagateFloat64NaN( a, float64_zero STATUS_VAR );3080 return a;3081 }3082 3083 aExp -= 0x3FF;3084 aSig |= LIT64( 0x0010000000000000 );3085 zSign = aExp < 0;3086 zSig = (bits64)aExp << 52;3087 for (i = 1LL << 51; i > 0; i >>= 1) {3088 mul64To128( aSig, aSig, &aSig0, &aSig1 );3089 aSig = ( aSig0 << 12 ) | ( aSig1 >> 52 );3090 if ( aSig & LIT64( 0x0020000000000000 ) ) {3091 aSig >>= 1;3092 zSig |= i;3093 }3094 }3095 3096 if ( zSign )3097 zSig = -zSig;3098 return normalizeRoundAndPackFloat64( zSign, 0x408, zSig STATUS_VAR );3099 2994 } 3100 2995 … … 4703 4598 } 4704 4599 add128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); 4705 if ( aExp == 0 ) { 4706 if ( STATUS(flush_to_zero) ) return packFloat128( zSign, 0, 0, 0 ); 4707 return packFloat128( zSign, 0, zSig0, zSig1 ); 4708 } 4600 if ( aExp == 0 ) return packFloat128( zSign, 0, zSig0, zSig1 ); 4709 4601 zSig2 = 0; 4710 4602 zSig0 |= LIT64( 0x0002000000000000 ); … … 5588 5480 return a; 5589 5481 } 5590 if ( aExp != 0 ) 5591 aSig |= 0x00800000; 5592 else if ( aSig == 0 ) 5593 return a; 5594 5595 aExp += n - 1; 5596 aSig <<= 7; 5597 return normalizeRoundAndPackFloat32( aSign, aExp, aSig STATUS_VAR ); 5482 aExp += n; 5483 return roundAndPackFloat32( aSign, aExp, aSig STATUS_VAR ); 5598 5484 } 5599 5485 … … 5611 5497 return a; 5612 5498 } 5613 if ( aExp != 0 ) 5614 aSig |= LIT64( 0x0010000000000000 ); 5615 else if ( aSig == 0 ) 5616 return a; 5617 5618 aExp += n - 1; 5619 aSig <<= 10; 5620 return normalizeRoundAndPackFloat64( aSign, aExp, aSig STATUS_VAR ); 5499 aExp += n; 5500 return roundAndPackFloat64( aSign, aExp, aSig STATUS_VAR ); 5621 5501 } 5622 5502 … … 5635 5515 return a; 5636 5516 } 5637 if (aExp == 0 && aSig == 0)5638 return a;5639 5640 5517 aExp += n; 5641 return normalizeRoundAndPackFloatx80( STATUS(floatx80_rounding_precision),5642 5518 return roundAndPackFloatx80( STATUS(floatx80_rounding_precision), 5519 aSign, aExp, aSig, 0 STATUS_VAR ); 5643 5520 } 5644 5521 #endif … … 5658 5535 return a; 5659 5536 } 5660 if ( aExp != 0 ) 5661 aSig0 |= LIT64( 0x0001000000000000 ); 5662 else if ( aSig0 == 0 && aSig1 == 0 ) 5663 return a; 5664 5665 aExp += n - 1; 5666 return normalizeRoundAndPackFloat128( aSign, aExp, aSig0, aSig1 5667 STATUS_VAR ); 5537 aExp += n; 5538 return roundAndPackFloat128( aSign, aExp, aSig0, aSig1, 0 STATUS_VAR ); 5668 5539 5669 5540 } -
trunk/src/recompiler/fpu/softfloat.h
r26499 r36140 55 55 typedef uint8_t uint8; 56 56 typedef int8_t int8; 57 #ifndef _AIX58 57 typedef int uint16; 59 58 typedef int int16; 60 #endif61 59 typedef unsigned int uint32; 62 60 typedef signed int int32; … … 95 93 #else 96 94 /* native float support */ 97 #if (defined(__i386__) || defined(__x86_64__)) && (!defined(_BSD) || defined(VBOX)) 95 #if (defined(__i386__) || defined(__x86_64__)) && (!defined(_BSD) || defined(VBOX)) /** @todo VBOX: not correct on windows */ 98 96 #define FLOATX80 99 97 #endif … … 199 197 signed char floatx80_rounding_precision; 200 198 #endif 201 flag flush_to_zero;202 flag default_nan_mode;203 199 } float_status; 204 200 205 201 void set_float_rounding_mode(int val STATUS_PARAM); 206 202 void set_float_exception_flags(int val STATUS_PARAM); 207 INLINE void set_flush_to_zero(flag val STATUS_PARAM)208 {209 STATUS(flush_to_zero) = val;210 }211 INLINE void set_default_nan_mode(flag val STATUS_PARAM)212 {213 STATUS(default_nan_mode) = val;214 }215 203 INLINE int get_float_exception_flags(float_status *status) 216 204 { … … 278 266 float32 float32_rem( float32, float32 STATUS_PARAM ); 279 267 float32 float32_sqrt( float32 STATUS_PARAM ); 280 float32 float32_log2( float32 STATUS_PARAM );281 268 int float32_eq( float32, float32 STATUS_PARAM ); 282 269 int float32_le( float32, float32 STATUS_PARAM ); … … 301 288 } 302 289 303 INLINE int float32_is_infinity(float32 a)304 {305 return (float32_val(a) & 0x7fffffff) == 0x7f800000;306 }307 308 INLINE int float32_is_neg(float32 a)309 {310 return float32_val(a) >> 31;311 }312 313 INLINE int float32_is_zero(float32 a)314 {315 return (float32_val(a) & 0x7fffffff) == 0;316 }317 318 290 #define float32_zero make_float32(0) 319 #define float32_one make_float32(0x3f800000)320 291 321 292 /*---------------------------------------------------------------------------- … … 349 320 float64 float64_rem( float64, float64 STATUS_PARAM ); 350 321 float64 float64_sqrt( float64 STATUS_PARAM ); 351 float64 float64_log2( float64 STATUS_PARAM );352 322 int float64_eq( float64, float64 STATUS_PARAM ); 353 323 int float64_le( float64, float64 STATUS_PARAM ); … … 372 342 } 373 343 374 INLINE int float64_is_infinity(float64 a)375 {376 return (float64_val(a) & 0x7fffffffffffffffLL ) == 0x7ff0000000000000LL;377 }378 379 INLINE int float64_is_neg(float64 a)380 {381 return float64_val(a) >> 63;382 }383 384 INLINE int float64_is_zero(float64 a)385 {386 return (float64_val(a) & 0x7fffffffffffffffLL) == 0;387 }388 389 344 #define float64_zero make_float64(0) 390 #define float64_one make_float64(0x3ff0000000000000LL)391 345 392 346 #ifdef FLOATX80 … … 435 389 a.high ^= 0x8000; 436 390 return a; 437 }438 439 INLINE int floatx80_is_infinity(floatx80 a)440 {441 return (a.high & 0x7fff) == 0x7fff && a.low == 0;442 }443 444 INLINE int floatx80_is_neg(floatx80 a)445 {446 return a.high >> 15;447 }448 449 INLINE int floatx80_is_zero(floatx80 a)450 {451 return (a.high & 0x7fff) == 0 && a.low == 0;452 391 } 453 392 … … 503 442 } 504 443 505 INLINE int float128_is_infinity(float128 a)506 {507 return (a.high & 0x7fffffffffffffffLL) == 0x7fff000000000000LL && a.low == 0;508 }509 510 INLINE int float128_is_neg(float128 a)511 {512 return a.high >> 63;513 }514 515 INLINE int float128_is_zero(float128 a)516 {517 return (a.high & 0x7fffffffffffffffLL) == 0 && a.low == 0;518 }519 520 444 #endif 521 445 -
trunk/src/recompiler/hostregs_helper.h
r36125 r36140 28 28 */ 29 29 30 /* The GCC global register va riable extension is used to reserve some30 /* The GCC global register vairable extension is used to reserve some 31 31 host registers for use by dyngen. However only the core parts of the 32 32 translation engine are compiled with these settings. We must manually -
trunk/src/recompiler/osdep.h
r36125 r36140 2 2 #define QEMU_OSDEP_H 3 3 4 #ifdef VBOX 4 #ifdef VBOX /** @todo clean up this, it's not fully synched. */ 5 5 6 6 #include <iprt/alloc.h> … … 81 81 82 82 #ifndef likely 83 #ifndef VBOX84 83 #if __GNUC__ < 3 85 84 #define __builtin_expect(x, n) (x) … … 88 87 #define likely(x) __builtin_expect(!!(x), 1) 89 88 #define unlikely(x) __builtin_expect(!!(x), 0) 90 #else /* VBOX */91 #define likely(cond) RT_LIKELY(cond)92 #define unlikely(cond) RT_UNLIKELY(cond)93 #endif94 89 #endif /* !likely */ 95 90 … … 131 126 #endif 132 127 128 #ifndef VBOX 129 #define qemu_printf printf 130 #endif 131 133 132 #if defined (__GNUC__) && defined (__GNUC_MINOR_) 134 133 # define QEMU_GNUC_PREREQ(maj, min) \ -
trunk/src/recompiler/qemu-lock.h
r36125 r36140 30 30 pthread mutexes, and non-NPTL userspace isn't threadsafe anyway. 31 31 In either case a spinlock is probably the wrong kind of lock. 32 Spinlocks are only good if you know an other CPU has the lock and is32 Spinlocks are only good if you know annother CPU has the lock and is 33 33 likely to release it soon. In environments where you have more threads 34 34 than physical CPUs (the extreme case being a single CPU host) a spinlock -
trunk/src/recompiler/softmmu_exec.h
r17040 r36140 1 1 /* Common softmmu definitions and inline routines. */ 2 2 3 #define ldul_user ldl_user 4 #define ldul_kernel ldl_kernel 3 #define ldul_user ldl_user 4 #define ldul_kernel ldl_kernel 5 #define ldul_hypv ldl_hypv 6 #define ldul_executive ldl_executive 7 #define ldul_supervisor ldl_supervisor 8 9 #include "softmmu_defs.h" 5 10 6 11 #define ACCESS_TYPE 0 7 #define MEMSUFFIX _kernel12 #define MEMSUFFIX MMU_MODE0_SUFFIX 8 13 #define DATA_SIZE 1 9 14 #include "softmmu_header.h" … … 21 26 22 27 #define ACCESS_TYPE 1 23 #define MEMSUFFIX _user28 #define MEMSUFFIX MMU_MODE1_SUFFIX 24 29 #define DATA_SIZE 1 25 30 #include "softmmu_header.h" … … 36 41 #undef MEMSUFFIX 37 42 43 #if (NB_MMU_MODES >= 3) 44 45 #define ACCESS_TYPE 2 46 #define MEMSUFFIX MMU_MODE2_SUFFIX 47 #define DATA_SIZE 1 48 #include "softmmu_header.h" 49 50 #define DATA_SIZE 2 51 #include "softmmu_header.h" 52 53 #define DATA_SIZE 4 54 #include "softmmu_header.h" 55 56 #define DATA_SIZE 8 57 #include "softmmu_header.h" 58 #undef ACCESS_TYPE 59 #undef MEMSUFFIX 60 61 #if (NB_MMU_MODES >= 4) 62 63 #define ACCESS_TYPE 3 64 #define MEMSUFFIX MMU_MODE3_SUFFIX 65 #define DATA_SIZE 1 66 #include "softmmu_header.h" 67 68 #define DATA_SIZE 2 69 #include "softmmu_header.h" 70 71 #define DATA_SIZE 4 72 #include "softmmu_header.h" 73 74 #define DATA_SIZE 8 75 #include "softmmu_header.h" 76 #undef ACCESS_TYPE 77 #undef MEMSUFFIX 78 79 #if (NB_MMU_MODES > 4) 80 #error "NB_MMU_MODES > 4 is not supported for now" 81 #endif /* (NB_MMU_MODES > 4) */ 82 #endif /* (NB_MMU_MODES == 4) */ 83 #endif /* (NB_MMU_MODES >= 3) */ 84 38 85 /* these access are slower, they must be as rare as possible */ 39 #define ACCESS_TYPE 286 #define ACCESS_TYPE (NB_MMU_MODES) 40 87 #define MEMSUFFIX _data 41 88 #define DATA_SIZE 1 -
trunk/src/recompiler/softmmu_header.h
r36125 r36140 222 222 : "%eax", "%ecx", "%edx", "memory", "cc"); 223 223 } 224 224 225 #else 225 226 … … 228 229 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) 229 230 { 230 231 231 int page_index; 232 232 RES_TYPE res; … … 273 273 274 274 /* generic store macro */ 275 275 276 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v) 276 277 { -
trunk/src/recompiler/target-i386/cpu.h
r36125 r36140 100 100 #define DESC_P_MASK (1 << 15) 101 101 #define DESC_DPL_SHIFT 13 102 #define DESC_DPL_MASK (1 << DESC_DPL_SHIFT) 102 103 #define DESC_S_MASK (1 << 12) 103 104 #define DESC_TYPE_SHIFT 8 … … 114 115 115 116 /* eflags masks */ 116 #define CC_C 0x0001117 #define CC_P 0x0004117 #define CC_C 0x0001 118 #define CC_P 0x0004 118 119 #define CC_A 0x0010 119 120 #define CC_Z 0x0040 … … 125 126 #define VM_SHIFT 17 126 127 127 #define TF_MASK 128 #define IF_MASK 129 #define DF_MASK 128 #define TF_MASK 0x00000100 129 #define IF_MASK 0x00000200 130 #define DF_MASK 0x00000400 130 131 #define IOPL_MASK 0x00003000 131 #define NT_MASK 132 #define NT_MASK 0x00004000 132 133 #define RF_MASK 0x00010000 133 134 #define VM_MASK 0x00020000 … … 138 139 139 140 /* hidden flags - used internally by qemu to represent additional cpu 140 states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not redundant. We avoid141 using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring142 with eflags. */141 states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not 142 redundant. We avoid using the IOPL_MASK, TF_MASK and VM_MASK bit 143 position to ease oring with eflags. */ 143 144 /* current cpl */ 144 145 #define HF_CPL_SHIFT 0 … … 163 164 #define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */ 164 165 #define HF_VM_SHIFT 17 /* must be same as eflags */ 165 #define HF_HALTED_SHIFT 18 /* CPU halted */166 166 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ 167 167 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ … … 182 182 #define HF_CS64_MASK (1 << HF_CS64_SHIFT) 183 183 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) 184 #define HF_HALTED_MASK (1 << HF_HALTED_SHIFT)185 184 #define HF_SMM_MASK (1 << HF_SMM_SHIFT) 186 185 #define HF_SVME_MASK (1 << HF_SVME_SHIFT) … … 255 254 #define MSR_IA32_APICBASE_BASE (0xfffff<<12) 256 255 257 #ifndef MSR_IA32_SYSENTER_CS /* VBox x86.h kludge */258 #define MSR_IA32_SYSENTER_CS 0x174259 #define MSR_IA32_SYSENTER_ESP 0x175260 #define MSR_IA32_SYSENTER_EIP 0x176261 #endif262 263 256 #define MSR_IA32_SYSENTER_CS 0x174 264 257 #define MSR_IA32_SYSENTER_ESP 0x175 … … 283 276 284 277 #ifdef VBOX 285 # define MSR_APIC_RANGE_START0x800286 # define MSR_APIC_RANGE_END0x900278 # define MSR_APIC_RANGE_START 0x800 279 # define MSR_APIC_RANGE_END 0x900 287 280 #endif 288 281 … … 315 308 #define CPUID_PAT (1 << 16) 316 309 #define CPUID_PSE36 (1 << 17) 310 #define CPUID_PN (1 << 18) 317 311 #define CPUID_CLFLUSH (1 << 19) 318 312 #define CPUID_DTS (1 << 21) … … 322 316 #define CPUID_SSE (1 << 25) 323 317 #define CPUID_SSE2 (1 << 26) 324 #define CPUID_SS 325 #define CPUID_HT 326 #define CPUID_TM 318 #define CPUID_SS (1 << 27) 319 #define CPUID_HT (1 << 28) 320 #define CPUID_TM (1 << 29) 327 321 #define CPUID_IA64 (1 << 30) 328 #define CPUID_PBE 322 #define CPUID_PBE (1 << 31) 329 323 330 324 #define CPUID_EXT_SSE3 (1 << 0) … … 409 403 enum { 410 404 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ 411 CC_OP_EFLAGS, /* all cc are explicit ely computed, CC_SRC = flags */405 CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ 412 406 413 407 CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ … … 461 455 CC_OP_SARQ, 462 456 463 CC_OP_NB 457 CC_OP_NB, 464 458 }; 465 459 … … 538 532 539 533 typedef struct CPUX86State { 540 /* standard registers */534 /* standard registers */ 541 535 target_ulong regs[CPU_NB_REGS]; 542 536 target_ulong eip; … … 550 544 uint32_t cc_op; 551 545 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ 552 uint32_t hflags; 553 546 uint32_t hflags; /* TB flags, see HF_xxx constants. These flags 547 are known at translation time. */ 554 548 uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ 555 549 … … 601 595 uint32_t alignment0; 602 596 #endif 603 uint64_tsysenter_esp;604 uint64_tsysenter_eip;597 target_ulong sysenter_esp; 598 target_ulong sysenter_eip; 605 599 uint64_t efer; 606 600 uint64_t star; … … 675 669 user */ 676 670 struct APICState *apic_state; 677 #else 671 #else /* VBOX */ 678 672 uint32_t alignment2[3]; 679 673 /** Profiling tb_flush. */ 680 674 STAMPROFILE StatTbFlush; 681 #endif 675 #endif /* VBOX */ 682 676 } CPUX86State; 683 677 … … 694 688 } SegmentCache_Ver16; 695 689 696 # define CPU_NB_REGS_VER16 8690 # define CPU_NB_REGS_VER16 8 697 691 698 692 /* Version 1.6 structure; just for loading the old saved state */ 699 693 typedef struct CPUX86State_Ver16 { 700 # if TARGET_LONG_BITS > HOST_LONG_BITS694 # if TARGET_LONG_BITS > HOST_LONG_BITS 701 695 /* temporaries if we cannot store them in host registers */ 702 696 uint32_t t0, t1, t2; 703 # endif697 # endif 704 698 705 699 /* standard registers */ … … 733 727 uint8_t fptags[8]; /* 0 = valid, 1 = empty */ 734 728 union { 735 # ifdef USE_X86LDOUBLE729 # ifdef USE_X86LDOUBLE 736 730 CPU86_LDouble d __attribute__((aligned(16))); 737 # else731 # else 738 732 CPU86_LDouble d; 739 # endif733 # endif 740 734 MMXReg mmx; 741 735 } fpregs[8]; … … 743 737 /* emulator internal variables */ 744 738 float_status fp_status; 745 # ifdef VBOX739 # ifdef VBOX 746 740 uint32_t alignment3[3]; /* force the long double to start a 16 byte line. */ 747 # endif741 # endif 748 742 CPU86_LDouble ft0; 749 # if defined(VBOX) && defined(RT_ARCH_X86) && !defined(RT_OS_DARWIN)743 # if defined(VBOX) && defined(RT_ARCH_X86) && !defined(RT_OS_DARWIN) 750 744 uint32_t alignment4; /* long double is 12 byte, pad it to 16. */ 751 # endif745 # endif 752 746 union { 753 747 float f; … … 767 761 uint32_t sysenter_esp; 768 762 uint32_t sysenter_eip; 769 # ifdef VBOX763 # ifdef VBOX 770 764 uint32_t alignment0; 771 # endif765 # endif 772 766 uint64_t efer; 773 767 uint64_t star; … … 776 770 777 771 /* temporary data for USE_CODE_COPY mode */ 778 # ifdef USE_CODE_COPY772 # ifdef USE_CODE_COPY 779 773 uint32_t tmp0; 780 774 uint32_t saved_esp; 781 775 int native_fp_regs; /* if true, the FPU state is in the native CPU regs */ 782 # endif776 # endif 783 777 784 778 /* exception/interrupt handling */ … … 788 782 /** CPUX86State state flags 789 783 * @{ */ 790 # define CPU_RAW_RING0 0x0002 /* Set after first time RawR0 is executed, never cleared. */791 # define CPU_EMULATE_SINGLE_INSTR 0x0040 /* Execute a single instruction in emulation mode */792 # define CPU_EMULATE_SINGLE_STEP 0x0080 /* go into single step mode */793 # define CPU_RAW_HWACC 0x0100 /* Set after first time HWACC is executed, never cleared. */784 # define CPU_RAW_RING0 0x0002 /* Set after first time RawR0 is executed, never cleared. */ 785 # define CPU_EMULATE_SINGLE_INSTR 0x0040 /* Execute a single instruction in emulation mode */ 786 # define CPU_EMULATE_SINGLE_STEP 0x0080 /* go into single step mode */ 787 # define CPU_RAW_HWACC 0x0100 /* Set after first time HWACC is executed, never cleared. */ 794 788 /** @} */ 795 789 #endif /* !VBOX */ … … 887 881 they can trigger unexpected exceptions */ 888 882 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); 889 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32);890 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32);883 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); 884 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); 891 885 892 886 /* you can call this signal handler from your SIGBUS and SIGSEGV … … 943 937 void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr); 944 938 void save_raw_fp_state(CPUX86State *env, uint8_t *ptr); 945 946 #endif 939 #endif /* VBOX */ 947 940 948 941 #define TARGET_PAGE_BITS 12 -
trunk/src/recompiler/target-i386/exec.h
r36125 r36140 40 40 #include "cpu-defs.h" 41 41 42 /* at least 4 register variables are defined */43 42 register struct CPUX86State *env asm(AREG0); 44 43 45 44 #include "qemu-log.h" 46 45 47 #ifndef reg_EAX48 46 #define EAX (env->regs[R_EAX]) 49 #endif50 #ifndef reg_ECX51 47 #define ECX (env->regs[R_ECX]) 52 #endif53 #ifndef reg_EDX54 48 #define EDX (env->regs[R_EDX]) 55 #endif56 #ifndef reg_EBX57 49 #define EBX (env->regs[R_EBX]) 58 #endif59 #ifndef reg_ESP60 50 #define ESP (env->regs[R_ESP]) 61 #endif62 #ifndef reg_EBP63 51 #define EBP (env->regs[R_EBP]) 64 #endif65 #ifndef reg_ESI66 52 #define ESI (env->regs[R_ESI]) 67 #endif68 #ifndef reg_EDI69 53 #define EDI (env->regs[R_EDI]) 70 #endif 71 #define EIP (env->eip) 54 #define EIP (env->eip) 72 55 #define DF (env->df) 73 56 … … 127 110 } 128 111 129 void check_iob_T0(void);130 void check_iow_T0(void);131 void check_iol_T0(void);132 void check_iob_DX(void);133 void check_iow_DX(void);134 void check_iol_DX(void);135 136 112 #if !defined(CONFIG_USER_ONLY) 137 113 138 114 #include "softmmu_exec.h" 139 140 static inline double ldfq(target_ulong ptr)141 {142 union {143 double d;144 uint64_t i;145 } u;146 u.i = ldq(ptr);147 return u.d;148 }149 150 static inline void stfq(target_ulong ptr, double v)151 {152 union {153 double d;154 uint64_t i;155 } u;156 u.d = v;157 stq(ptr, u.i);158 }159 160 static inline float ldfl(target_ulong ptr)161 {162 union {163 float f;164 uint32_t i;165 } u;166 u.i = ldl(ptr);167 return u.f;168 }169 170 static inline void stfl(target_ulong ptr, float v)171 {172 union {173 float f;174 uint32_t i;175 } u;176 u.f = v;177 stl(ptr, u.i);178 }179 115 180 116 #endif /* !defined(CONFIG_USER_ONLY) */ … … 252 188 253 189 #define RC_MASK 0xc00 254 #ifndef RC_NEAR255 190 #define RC_NEAR 0x000 256 #endif257 #ifndef RC_DOWN258 191 #define RC_DOWN 0x400 259 #endif260 #ifndef RC_UP261 192 #define RC_UP 0x800 262 #endif263 #ifndef RC_CHOP264 193 #define RC_CHOP 0xc00 265 #endif266 194 267 195 #define MAXTAN 9223372036854775808.0 … … 328 256 static inline void fpop(void) 329 257 { 330 env->fptags[env->fpstt] = 1; /* inv alidate stack entry */258 env->fptags[env->fpstt] = 1; /* invvalidate stack entry */ 331 259 env->fpstt = (env->fpstt + 1) & 7; 332 260 } … … 369 297 #else 370 298 371 /* XXX: same endianness assumed */372 373 #ifdef CONFIG_USER_ONLY374 375 static inline CPU86_LDouble helper_fldt(target_ulong ptr)376 {377 return *(CPU86_LDouble *)ptr;378 }379 380 static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr)381 {382 *(CPU86_LDouble *)ptr = f;383 }384 385 #else386 387 299 /* we use memory access macros */ 388 300 … … 404 316 stw(ptr + 8, temp.l.upper); 405 317 } 406 407 #endif /* !CONFIG_USER_ONLY */408 318 409 319 #endif /* USE_X86LDOUBLE */ … … 442 352 DF = 1 - (2 * ((eflags >> 10) & 1)); 443 353 env->eflags = (env->eflags & ~update_mask) | 444 (eflags & update_mask) ;354 (eflags & update_mask) | 0x2; 445 355 } 446 356 -
trunk/src/recompiler/target-i386/helper.c
r33656 r36140 33 33 #include <string.h> 34 34 #ifndef VBOX 35 # include <inttypes.h>36 # include <signal.h>37 # include <assert.h>35 # include <inttypes.h> 36 # include <signal.h> 37 # include <assert.h> 38 38 #endif 39 39 … … 105 105 } 106 106 #endif /* !VBOX */ 107 107 108 #ifndef VBOX 108 109 CPUX86State *cpu_x86_init(const char *cpu_model) 109 { 110 #else 111 CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model) 112 #endif 113 { 114 #ifndef VBOX 110 115 CPUX86State *env; 116 #endif 111 117 static int inited; 112 118 119 #ifndef VBOX 113 120 env = qemu_mallocz(sizeof(CPUX86State)); 114 121 if (!env) 115 122 return NULL; 116 #else117 CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model)118 {119 static int inited;120 123 #endif 121 124 cpu_exec_init(env); … … 302 305 /* Some CPUs got no CPUID_SEP */ 303 306 .ext_features = CPUID_EXT_MONITOR | 304 CPUID_EXT_SSE3 /* PNI */, 307 CPUID_EXT_SSE3 /* PNI */, CPUID_EXT_SSSE3, 305 308 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST | 306 309 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */ … … 457 460 } 458 461 } 459 #endif / / !VBOX462 #endif /* !VBOX */ 460 463 return 0; 461 464 } … … 854 857 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) | 855 858 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)); 856 857 859 #ifdef VBOX 860 858 861 remR3ChangeCpuMode(env); 859 862 #endif -
trunk/src/recompiler/target-i386/helper.h
r17040 r36140 143 143 DEF_HELPER(int32_t, helper_fisttl_ST0, (void)) 144 144 DEF_HELPER(int64_t, helper_fisttll_ST0, (void)) 145 #else 145 #else /* VBOX */ 146 146 DEF_HELPER(RTCCUINTREG, helper_fsts_ST0, (void)) 147 147 DEF_HELPER(uint64_t, helper_fstl_ST0, (void)) … … 152 152 DEF_HELPER(RTCCINTREG, helper_fisttl_ST0, (void)) 153 153 DEF_HELPER(int64_t, helper_fisttll_ST0, (void)) 154 #endif 154 #endif /* VBOX */ 155 155 DEF_HELPER(void, helper_fldt_ST0, (target_ulong ptr)) 156 156 DEF_HELPER(void, helper_fstt_ST0, (target_ulong ptr)) … … 195 195 DEF_HELPER(uint32_t, helper_fnstsw, (void)) 196 196 DEF_HELPER(uint32_t, helper_fnstcw, (void)) 197 #else 197 #else /* VBOX */ 198 198 DEF_HELPER(RTCCUINTREG, helper_fnstsw, (void)) 199 199 DEF_HELPER(RTCCUINTREG, helper_fnstcw, (void)) 200 #endif 200 #endif /* VBOX */ 201 201 DEF_HELPER(void, helper_fldcw, (uint32_t val)) 202 202 DEF_HELPER(void, helper_fclex, (void)) … … 257 257 void sync_seg(CPUX86State *env1, int seg_reg, int selector); 258 258 void sync_ldtr(CPUX86State *env1, int selector); 259 260 #endif 259 #endif /* VBOX */ 261 260 262 261 #undef DEF_HELPER -
trunk/src/recompiler/target-i386/op_helper.c
r36125 r36140 33 33 34 34 #ifdef VBOX 35 # include "qemu-common.h"36 # include <math.h>37 # include "tcg.h"38 #endif 35 # include "qemu-common.h" 36 # include <math.h> 37 # include "tcg.h" 38 #endif /* VBOX */ 39 39 //#define DEBUG_PCALL 40 40 … … 139 139 140 140 #ifdef VBOX 141 141 142 void helper_write_eflags_vme(target_ulong t0) 142 143 { … … 190 191 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI])); 191 192 } 192 #endif 193 194 #endif /* VBOX */ 193 195 194 196 /* return non zero if error */ … … 207 209 selector = selector & 0xfffc; 208 210 } 209 #endif 211 #endif /* VBOX */ 210 212 211 213 if (selector & 0x4) … … 254 256 cpu_x86_load_seg_cache(env, seg, selector, 255 257 (selector << 4), 0xffff, flags); 256 #else 258 #else /* VBOX */ 257 259 cpu_x86_load_seg_cache(env, seg, selector, 258 260 (selector << 4), 0xffff, 0); 259 #endif 261 #endif /* VBOX */ 260 262 } 261 263 … … 314 316 selector = selector & 0xfffc; 315 317 } 316 #endif 318 #endif /* VBOX */ 317 319 318 320 if ((selector & 0xfffc) != 0) { … … 358 360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc); 359 361 #ifdef VBOX 360 # if 0362 # if 0 361 363 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */ 362 364 cpu_x86_load_seg_cache(env, seg_reg, selector, 363 365 0, 0, 0); 364 # endif365 #endif 366 # endif 367 #endif /* VBOX */ 366 368 } 367 369 } … … 637 639 unsigned int io_offset; 638 640 #endif /* VBOX */ 641 639 642 /* TSS must be a valid 32 bit one */ 640 643 if (!(env->tr.flags & DESC_P_MASK) || … … 678 681 sync_seg(env, reg, env->segs[reg].newselector); 679 682 } 680 #endif 683 #endif /* VBOX */ 681 684 682 685 void helper_check_iob(uint32_t t0) … … 1271 1274 #ifndef VBOX 1272 1275 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 1273 #else 1276 #else /* VBOX */ 1274 1277 /* 1275 1278 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD … … 1278 1281 */ 1279 1282 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK); 1280 #endif 1283 #endif /* VBOX */ 1281 1284 } 1282 1285 #endif … … 1543 1546 target_ulong next_eip, int is_hw) 1544 1547 { 1548 #ifdef VBOX 1545 1549 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) { 1546 1550 if (is_int) { … … 1552 1556 } 1553 1557 } 1558 #endif 1554 1559 1555 1560 if (loglevel & CPU_LOG_INT) { … … 2648 2653 selector &= 0xffff; 2649 2654 cpl = env->hflags & HF_CPL_MASK; 2650 2651 2655 #ifdef VBOX 2656 2652 2657 /* Trying to load a selector with CPL=1? */ 2653 2658 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0)) … … 2656 2661 selector = selector & 0xfffc; 2657 2662 } 2658 #endif 2663 #endif /* VBOX */ 2659 2664 if ((selector & 0xfffc) == 0) { 2660 2665 /* null selector case */ … … 3665 3670 ECX = 0; 3666 3671 } 3667 #endif 3672 #endif /* VBOX */ 3668 3673 3669 3674 void helper_rdpmc(void) … … 3677 3682 EAX = 0; 3678 3683 EDX = 0; 3679 #else 3684 #else /* !VBOX */ 3680 3685 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { 3681 3686 raise_exception(EXCP0D_GPF); … … 3685 3690 /* currently unimplemented */ 3686 3691 raise_exception_err(EXCP06_ILLOP, 0); 3687 #endif 3692 #endif /* !VBOX */ 3688 3693 } 3689 3694 … … 3818 3823 val = 1000ULL; 3819 3824 /* CPU multiplier */ 3820 val |= (( uint64_t)4ULL<< 40);3821 break; 3822 #endif 3825 val |= (((uint64_t)4ULL) << 40); 3826 break; 3827 #endif /* !VBOX */ 3823 3828 #ifdef TARGET_X86_64 3824 3829 case MSR_LSTAR: … … 3860 3865 val = 0; 3861 3866 } 3862 #endif 3867 #endif /* VBOX */ 3863 3868 break; 3864 3869 } … … 4148 4153 return u.i; 4149 4154 } 4155 4150 4156 #ifndef VBOX 4151 4157 int32_t helper_fist_ST0(void) … … 5318 5324 if ((uint32_t)ECX > 1) 5319 5325 raise_exception(EXCP0D_GPF); 5320 #else 5326 #else /* !VBOX */ 5321 5327 if ((uint32_t)ECX != 0) 5322 5328 raise_exception(EXCP0D_GPF); 5323 #endif 5329 #endif /* !VBOX */ 5324 5330 /* XXX: store address ? */ 5325 5331 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0); … … 5332 5338 #ifdef VBOX 5333 5339 helper_hlt(next_eip_addend); 5334 #else 5340 #else /* !VBOX */ 5335 5341 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0); 5336 5342 EIP += next_eip_addend; … … 5343 5349 do_hlt(); 5344 5350 } 5345 #endif 5351 #endif /* !VBOX */ 5346 5352 } 5347 5353 … … 5386 5392 env->eflags |= VIF_MASK; 5387 5393 } 5388 #endif 5394 #endif /* VBOX */ 5389 5395 5390 5396 #if 0 … … 5512 5518 remR3PhysWriteU64(addr, val); 5513 5519 } 5514 #endif 5520 #endif /* VBOX */ 5515 5521 5516 5522 /* try to fill the TLB and return an exception if error. If retaddr is … … 6428 6434 #ifndef VBOX 6429 6435 switch(type) { 6430 #ifndef VBOX6431 6436 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8: 6432 #else6433 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:6434 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:6435 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:6436 #endif6437 6437 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) { 6438 6438 helper_vmexit(type, param); 6439 6439 } 6440 6440 break; 6441 #ifndef VBOX6442 6441 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8: 6443 #else6444 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:6445 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:6446 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:6447 #endif6448 6442 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) { 6449 6443 helper_vmexit(type, param); … … 6501 6495 break; 6502 6496 } 6503 #else 6497 #else /* VBOX */ 6504 6498 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!")); 6505 #endif 6499 #endif /* VBOX */ 6506 6500 } 6507 6501 … … 6746 6740 } 6747 6741 6748 #ifndef VBOX6749 6742 CCTable cc_table[CC_OP_NB] = { 6750 6743 [CC_OP_DYNAMIC] = { /* should never happen */ }, … … 6814 6807 #endif 6815 6808 }; 6816 #else /* VBOX */ 6817 /* Sync carefully with cpu.h */ 6818 CCTable cc_table[CC_OP_NB] = { 6819 /* CC_OP_DYNAMIC */ { 0, 0 }, 6820 6821 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags }, 6822 6823 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull }, 6824 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull }, 6825 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull }, 6826 #ifdef TARGET_X86_64 6827 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull }, 6828 #else 6829 /* CC_OP_MULQ */ { 0, 0 }, 6830 #endif 6831 6832 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb }, 6833 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw }, 6834 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl }, 6835 #ifdef TARGET_X86_64 6836 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq }, 6837 #else 6838 /* CC_OP_ADDQ */ { 0, 0 }, 6839 #endif 6840 6841 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb }, 6842 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw }, 6843 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl }, 6844 #ifdef TARGET_X86_64 6845 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq }, 6846 #else 6847 /* CC_OP_ADCQ */ { 0, 0 }, 6848 #endif 6849 6850 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb }, 6851 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw }, 6852 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl }, 6853 #ifdef TARGET_X86_64 6854 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq }, 6855 #else 6856 /* CC_OP_SUBQ */ { 0, 0 }, 6857 #endif 6858 6859 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb }, 6860 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw }, 6861 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl }, 6862 #ifdef TARGET_X86_64 6863 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq }, 6864 #else 6865 /* CC_OP_SBBQ */ { 0, 0 }, 6866 #endif 6867 6868 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb }, 6869 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw }, 6870 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl }, 6871 #ifdef TARGET_X86_64 6872 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq }, 6873 #else 6874 /* CC_OP_LOGICQ */ { 0, 0 }, 6875 #endif 6876 6877 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl }, 6878 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl }, 6879 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl }, 6880 #ifdef TARGET_X86_64 6881 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl }, 6882 #else 6883 /* CC_OP_INCQ */ { 0, 0 }, 6884 #endif 6885 6886 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl }, 6887 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl }, 6888 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl }, 6889 #ifdef TARGET_X86_64 6890 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl }, 6891 #else 6892 /* CC_OP_DECQ */ { 0, 0 }, 6893 #endif 6894 6895 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb }, 6896 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw }, 6897 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll }, 6898 #ifdef TARGET_X86_64 6899 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq }, 6900 #else 6901 /* CC_OP_SHLQ */ { 0, 0 }, 6902 #endif 6903 6904 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl }, 6905 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl }, 6906 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl }, 6907 #ifdef TARGET_X86_64 6908 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl}, 6909 #else 6910 /* CC_OP_SARQ */ { 0, 0 }, 6911 #endif 6912 }; 6913 #endif /* VBOX */ 6809 -
trunk/src/recompiler/target-i386/opreg_template.h
r33656 r36140 29 29 */ 30 30 31 #error "VBOX: obsolete file?" 32 31 33 void OPPROTO glue(op_movl_A0,REGNAME)(void) 32 34 { -
trunk/src/recompiler/target-i386/ops_mem.h
r1 r36140 1 #error "VBOX: obsolete file?" 1 2 void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T0_A0)(void) 2 3 { -
trunk/src/recompiler/target-i386/ops_sse.h
r36125 r36140 31 31 #if SHIFT == 0 32 32 #define Reg MMXReg 33 #ifndef VBOX34 33 #define XMM_ONLY(x...) 35 #else36 #define XMM_ONLY(x)37 #endif38 34 #define B(n) MMX_B(n) 39 35 #define W(n) MMX_W(n) … … 43 39 #else 44 40 #define Reg XMMReg 45 #ifndef VBOX46 41 #define XMM_ONLY(x...) x 47 #else48 #define XMM_ONLY(x) x49 #endif50 42 #define B(n) XMM_B(n) 51 43 #define W(n) XMM_W(n) … … 77 69 #endif 78 70 } 71 FORCE_RET(); 79 72 } 80 73 … … 122 115 #endif 123 116 } 117 FORCE_RET(); 124 118 } 125 119 … … 142 136 #endif 143 137 } 138 FORCE_RET(); 144 139 } 145 140 … … 179 174 #endif 180 175 } 176 FORCE_RET(); 181 177 } 182 178 … … 197 193 #endif 198 194 } 195 FORCE_RET(); 199 196 } 200 197 … … 215 212 #endif 216 213 } 214 FORCE_RET(); 217 215 } 218 216 … … 229 227 for(i = 16 - shift; i < 16; i++) 230 228 d->B(i) = 0; 229 FORCE_RET(); 231 230 } 232 231 … … 242 241 for(i = 0; i < shift; i++) 243 242 d->B(i) = 0; 243 FORCE_RET(); 244 244 } 245 245 #endif … … 443 443 (int16_t)s->W(2*i+1) * (int16_t)d->W(2*i+1); 444 444 } 445 FORCE_RET(); 445 446 } 446 447 … … 489 490 stb(a0 + i, d->B(i)); 490 491 } 492 FORCE_RET(); 491 493 } 492 494 … … 926 928 ret = float32_compare_quiet(s0, s1, &env->sse_status); 927 929 CC_SRC = comis_eflags[ret + 1]; 930 FORCE_RET(); 928 931 } 929 932 … … 937 940 ret = float32_compare(s0, s1, &env->sse_status); 938 941 CC_SRC = comis_eflags[ret + 1]; 942 FORCE_RET(); 939 943 } 940 944 … … 948 952 ret = float64_compare_quiet(d0, d1, &env->sse_status); 949 953 CC_SRC = comis_eflags[ret + 1]; 954 FORCE_RET(); 950 955 } 951 956 … … 959 964 ret = float64_compare(d0, d1, &env->sse_status); 960 965 CC_SRC = comis_eflags[ret + 1]; 966 FORCE_RET(); 961 967 } 962 968 … … 1504 1510 d->elem(0) = F(0);\ 1505 1511 d->elem(1) = F(1);\ 1506 if (num > 2) {\1507 d->elem(2) = F(2);\1508 d->elem(3) = F(3);\1509 if (num > 4) {\1510 d->elem(4) = F(4);\1511 d->elem(5) = F(5);\1512 d->elem(2) = F(2);\ 1513 d->elem(3) = F(3);\ 1514 if (num > 3) {\ 1515 d->elem(4) = F(4);\ 1516 d->elem(5) = F(5);\ 1517 if (num > 5) {\ 1512 1518 d->elem(6) = F(6);\ 1513 1519 d->elem(7) = F(7);\ -
trunk/src/recompiler/target-i386/ops_template.h
r33656 r36140 29 29 */ 30 30 31 #error "VBOX: obsolete file?" 31 32 #define DATA_BITS (1 << (3 + SHIFT)) 32 33 #define SHIFT_MASK (DATA_BITS - 1) -
trunk/src/recompiler/target-i386/ops_template_mem.h
r33656 r36140 29 29 */ 30 30 31 #error "VBOX: Obsolete file?" 31 32 #ifdef MEM_WRITE 32 33 -
trunk/src/recompiler/target-i386/translate.c
r36125 r36140 33 33 #include <string.h> 34 34 #ifndef VBOX 35 # include <inttypes.h>36 # include <signal.h>37 # include <assert.h>35 # include <inttypes.h> 36 # include <signal.h> 37 # include <assert.h> 38 38 #endif /* !VBOX */ 39 39 … … 52 52 #ifdef TARGET_X86_64 53 53 #define X86_64_ONLY(x) x 54 #ifndef VBOX55 54 #define X86_64_DEF(x...) x 56 #else57 #define X86_64_DEF(x...) x58 #endif59 55 #define CODE64(s) ((s)->code64) 60 56 #define REX_X(s) ((s)->rex_x) … … 66 62 #else 67 63 #define X86_64_ONLY(x) NULL 68 #ifndef VBOX69 64 #define X86_64_DEF(x...) 70 #else71 #define X86_64_DEF(x)72 #endif73 65 #define CODE64(s) 0 74 66 #define REX_X(s) 0 … … 120 112 121 113 #endif /* VBOX */ 122 123 114 124 115 typedef struct DisasContext { … … 170 161 171 162 #ifdef VBOX 172 static void gen_check_external_event( );163 static void gen_check_external_event(void); 173 164 #endif 174 165 … … 719 710 720 711 #ifdef VBOX 721 static void gen_check_external_event() 722 { 723 #if 1 712 713 static void gen_check_external_event(void) 714 { 715 # if 1 724 716 /** @todo: once TCG codegen improves, we may want to use version 725 717 from else version */ 726 718 tcg_gen_helper_0_0(helper_check_external_event); 727 # else719 # else 728 720 int skip_label; 729 721 TCGv t0; … … 747 739 748 740 gen_set_label(skip_label); 749 # endif750 } 751 752 # if 0 /* unused code? */741 # endif 742 } 743 744 # if 0 /* unused code? */ 753 745 static void gen_check_external_event2() 754 746 { 755 747 tcg_gen_helper_0_0(helper_check_external_event); 756 748 } 757 # endif758 759 #endif 749 # endif 750 751 #endif /* VBOX */ 760 752 761 753 static inline void gen_jmp_im(target_ulong pc) … … 770 762 gen_jmp_im(pc); 771 763 # ifdef VBOX_DUMP_STATE 772 764 tcg_gen_helper_0_0(helper_dump_state); 773 765 # endif 774 766 } … … 1135 1127 1136 1128 /* generate a conditional jump to label 'l1' according to jump opcode 1137 value 'b'. In the fast case, T0 is guarante ed not to be used. */1129 value 'b'. In the fast case, T0 is guaranted not to be used. */ 1138 1130 static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1) 1139 1131 { … … 1432 1424 target_ulong cur_eip, target_ulong next_eip) \ 1433 1425 { \ 1434 int l2; 1426 int l2;\ 1435 1427 gen_update_cc_op(s); \ 1436 1428 l2 = gen_jz_ecx_string(s, next_eip); \ … … 1450 1442 int nz) \ 1451 1443 { \ 1452 int l2; 1444 int l2;\ 1453 1445 gen_update_cc_op(s); \ 1454 1446 l2 = gen_jz_ecx_string(s, next_eip); \ … … 2153 2145 } 2154 2146 } 2155 /* index == 4 means no index*/2156 if (havesib && (index != 4 )) {2147 /* XXX: index == 4 is always invalid */ 2148 if (havesib && (index != 4 || scale != 0)) { 2157 2149 #ifdef TARGET_X86_64 2158 2150 if (s->aflag == 2) { … … 2401 2393 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) { 2402 2394 #ifdef VBOX 2403 gen_check_external_event( s);2395 gen_check_external_event(); 2404 2396 #endif /* VBOX */ 2405 2397 /* jump to same page: we can use a direct jump */ … … 2467 2459 } else { 2468 2460 /* slow case: it is more efficient not to generate a jump, 2469 although it is question able whether this optimization is2461 although it is questionnable whether this optimization is 2470 2462 worth to */ 2471 2463 inv = b & 1; … … 2832 2824 2833 2825 #ifdef VBOX 2834 gen_check_external_event( s);2826 gen_check_external_event(); 2835 2827 #endif /* VBOX */ 2836 2828 … … 3293 3285 case 0x02b: /* movntps */ 3294 3286 case 0x12b: /* movntps */ 3287 case 0x3f0: /* lddqu */ 3295 3288 if (mod == 3) 3296 3289 goto illegal_op; 3297 3290 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3298 3291 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); 3299 break;3300 case 0x3f0: /* lddqu */3301 if (mod == 3)3302 goto illegal_op;3303 gen_lea_modrm(s, modrm, ®_addr, &offset_addr);3304 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));3305 3292 break; 3306 3293 case 0x6e: /* movd mm, ea */ … … 3817 3804 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ 3818 3805 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ 3819 tcg_gen_qemu_ld32u(cpu_tmp 0, cpu_A0,3806 tcg_gen_qemu_ld32u(cpu_tmp2_i32, cpu_A0, 3820 3807 (s->mem_index >> 2) - 1); 3821 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);3822 3808 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset + 3823 3809 offsetof(XMMReg, XMM_L(0))); … … 4302 4288 #endif /* VBOX */ 4303 4289 4304 4305 4290 /* convert one instruction. s->is_jmp is set if the translation must 4306 4291 be stopped. Return the next pc value */ … … 4315 4300 if (unlikely(loglevel & CPU_LOG_TB_OP)) 4316 4301 tcg_gen_debug_insn_start(pc_start); 4317 4318 4302 s->pc = pc_start; 4319 4303 prefixes = 0; … … 4336 4320 gen_update_eip(pc_start - s->cs_base); 4337 4321 # endif 4338 #endif 4322 #endif /* VBOX */ 4339 4323 4340 4324 next_byte: … … 6382 6366 gen_check_io(s, ot, pc_start - s->cs_base, 6383 6367 svm_is_rep(prefixes)); 6384 #ifdef VBOX /* bird: linux is writing to this port for delaying I/O. */ 6368 #ifdef VBOX /* bird: linux is writing to this port for delaying I/O. */ /** @todo this breaks AIX, remove. */ 6385 6369 if (val == 0x80) 6386 6370 break; … … 7350 7334 op = (modrm >> 3) & 7; 7351 7335 rm = modrm & 7; 7352 7353 7336 #ifdef VBOX 7354 7337 /* 0f 01 f9 */ … … 7361 7344 break; 7362 7345 } 7363 #endif 7346 #endif /* VBOX */ 7364 7347 switch(op) { 7365 7348 case 0: /* sgdt */ … … 7625 7608 { 7626 7609 int label1; 7627 TCGv t0, t1, t2, a0; 7610 TCGv t0, t1, t2; 7611 #ifdef VBOX 7612 TCGv a0; 7613 #endif 7628 7614 7629 7615 if (!s->pe || s->vm86) 7630 7616 goto illegal_op; 7631 7632 7617 t0 = tcg_temp_local_new(TCG_TYPE_TL); 7633 7618 t1 = tcg_temp_local_new(TCG_TYPE_TL); … … 8035 8020 dc->vme = !!(env->cr[4] & CR4_VME_MASK); 8036 8021 dc->pvi = !!(env->cr[4] & CR4_PVI_MASK); 8037 # ifdef VBOX_WITH_CALL_RECORD8022 # ifdef VBOX_WITH_CALL_RECORD 8038 8023 if ( !(env->state & CPU_RAW_RING0) 8039 8024 && (env->cr[0] & CR0_PG_MASK) … … 8043 8028 else 8044 8029 dc->record_call = 0; 8045 # endif8030 # endif 8046 8031 #endif 8047 8032 dc->cpl = (flags >> HF_CPL_SHIFT) & 3; … … 8138 8123 break; 8139 8124 #ifdef VBOX 8140 # ifdef DEBUG8125 # ifdef DEBUG 8141 8126 /* 8142 8127 if(cpu_check_code_raw(env, pc_ptr, env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK))) == ERROR_SUCCESS) … … 8146 8131 } 8147 8132 */ 8148 # endif8133 # endif /* DEBUG */ 8149 8134 if (env->state & CPU_EMULATE_SINGLE_INSTR) 8150 8135 { -
trunk/src/recompiler/tcg/README
r33540 r36140 5 5 TCG (Tiny Code Generator) began as a generic backend for a C 6 6 compiler. It was simplified to be used in QEMU. It also has its roots 7 in the QOP code generator written by Paul Brook. 7 in the QOP code generator written by Paul Brook. 8 8 9 9 2) Definitions … … 31 31 32 32 A TCG "basic block" corresponds to a list of instructions terminated 33 by a branch instruction. 33 by a branch instruction. 34 34 35 35 3) Intermediate representation … … 98 98 99 99 and_i32 t0, t0, $0xffffffff 100 100 101 101 is suppressed. 102 102 … … 285 285 286 286 t0 = read(t1 + offset) 287 Load 8, 16, 32 or 64 bits with or without sign extension from host memory. 287 Load 8, 16, 32 or 64 bits with or without sign extension from host memory. 288 288 offset must be a constant. 289 289 … … 386 386 - The first N parameters are passed in registers. 387 387 - The next parameters are passed on the stack by storing them as words. 388 - Some registers are clobbered during the call. 388 - Some registers are clobbered during the call. 389 389 - The function can return 0 or 1 value in registers. On a 32 bit 390 390 target, functions must be able to return 2 values in registers for … … 425 425 426 426 - Don't hesitate to use helpers for complicated or seldom used target 427 in structions. There is little performance advantage in using TCG to427 intructions. There is little performance advantage in using TCG to 428 428 implement target instructions taking more than about twenty TCG 429 429 instructions. -
trunk/src/recompiler/tcg/i386/tcg-target.c
r36125 r36140 199 199 #define P_EXT 0x100 /* 0x0f opcode prefix */ 200 200 201 #if !defined(VBOX) || !defined(_MSC_VER)202 201 static const uint8_t tcg_cond_to_jcc[10] = { 203 202 [TCG_COND_EQ] = JCC_JE, … … 212 211 [TCG_COND_GTU] = JCC_JA, 213 212 }; 214 #else215 /* Fortunately, ordering is right */216 static const uint8_t tcg_cond_to_jcc[10] = {217 JCC_JE,218 JCC_JNE,219 JCC_JL,220 JCC_JGE,221 JCC_JLE,222 JCC_JG,223 JCC_JB,224 JCC_JAE,225 JCC_JBE,226 JCC_JA,227 };228 #endif229 213 230 214 static inline void tcg_out_opc(TCGContext *s, int opc) … … 291 275 tcg_out32(s, arg); 292 276 } 293 }294 295 static inline void tcg_out_push(TCGContext *s, int reg)296 {297 tcg_out_opc(s, 0x50 + reg);298 }299 300 static inline void tcg_out_pop(TCGContext *s, int reg)301 {302 tcg_out_opc(s, 0x58 + reg);303 277 } 304 278 … … 1065 1039 tcg_abort(); 1066 1040 } 1067 #else 1041 #else /* VBOX && REM_PHYS_ADDR_IN_TLB */ 1068 1042 tcg_out_vbox_phys_write(s, opc, r0, data_reg, data_reg2); 1069 #endif 1043 #endif /* VBOX && REM_PHYS_ADDR_IN_TLB */ 1070 1044 1071 1045 #if defined(CONFIG_SOFTMMU) … … 1376 1350 }; 1377 1351 1352 static inline void tcg_out_push(TCGContext *s, int reg) 1353 { 1354 tcg_out_opc(s, 0x50 + reg); 1355 } 1356 1357 static inline void tcg_out_pop(TCGContext *s, int reg) 1358 { 1359 tcg_out_opc(s, 0x58 + reg); 1360 } 1361 1378 1362 /* Generate global QEMU prologue and epilogue code */ 1379 1363 void tcg_target_qemu_prologue(TCGContext *s) -
trunk/src/recompiler/tcg/i386/tcg-target.h
r36125 r36140 53 53 #define TCG_AREG3 TCG_REG_EDI 54 54 #else 55 # define TCG_AREG0 TCG_REG_ESI56 # define TCG_AREG1 TCG_REG_EDI55 # define TCG_AREG0 TCG_REG_ESI 56 # define TCG_AREG1 TCG_REG_EDI 57 57 #endif 58 58 -
trunk/src/recompiler/tcg/tcg-dyngen.c
r29520 r36140 31 31 #include <inttypes.h> 32 32 #else 33 # include <stdio.h>34 # include "osdep.h"33 # include <stdio.h> 34 # include "osdep.h" 35 35 #endif 36 36 -
trunk/src/recompiler/tcg/tcg-runtime.c
r29520 r36140 27 27 #include <stdio.h> 28 28 #include <string.h> 29 #ifndef VBOX30 29 #include <inttypes.h> 31 #endif32 30 33 31 #include "config.h" -
trunk/src/recompiler/tcg/tcg.c
r36125 r36140 36 36 #include <string.h> 37 37 #include <inttypes.h> 38 #else 39 # include <stdio.h>40 # include "osdep.h"41 #endif 38 #else /* VBOX */ 39 # include <stdio.h> 40 # include "osdep.h" 41 #endif /* VBOX */ 42 42 #ifdef _WIN32 43 43 #include <malloc.h> … … 47 47 #include "qemu-common.h" 48 48 49 /* Note: the long term plan is to reduce the depend encies on the QEMU49 /* Note: the long term plan is to reduce the dependancies on the QEMU 50 50 CPU definitions. Currently they are used for qemu_ld/st 51 51 instructions */ … … 65 65 * @todo: fix it in compiler 66 66 */ 67 # if defined(TARGET_X86_64) && (TCG_TARGET_REG_BITS == 32)68 # undef USE_LIVENESS_ANALYSIS69 # endif70 #endif 67 # if defined(TARGET_X86_64) && (TCG_TARGET_REG_BITS == 32) 68 # undef USE_LIVENESS_ANALYSIS 69 # endif 70 #endif /* VBOX */ 71 71 72 72 static void patch_reloc(uint8_t *code_ptr, int type, … … 77 77 #ifndef VBOX 78 78 #define DEF2(s, iargs, oargs, cargs, flags) { #s, iargs, oargs, cargs, iargs + oargs + cargs, flags, 0 }, 79 #else 80 # define DEF2(s, iargs, oargs, cargs, flags) { #s, iargs, oargs, cargs, iargs + oargs + cargs, flags, 0, 0, 0 },81 #endif 79 #else /* VBOX */ 80 # define DEF2(s, iargs, oargs, cargs, flags) { #s, iargs, oargs, cargs, iargs + oargs + cargs, flags, 0, 0, 0 }, 81 #endif /* VBOX */ 82 82 #include "tcg-opc.h" 83 83 #undef DEF … … 508 508 n *= 2; 509 509 } 510 511 510 #ifdef VBOX 512 511 s->helpers = qemu_realloc(s->helpers, n * sizeof(TCGHelperInfo)); … … 766 765 } 767 766 768 #ifndef VBOX769 767 static const char * const cond_name[] = 770 768 { … … 780 778 [TCG_COND_GTU] = "gtu" 781 779 }; 782 #else783 static const char * const cond_name[] =784 {785 "eq",786 "ne",787 "lt",788 "ge",789 "le",790 "gt",791 "ltu",792 "geu",793 "leu",794 "gtu"795 };796 #endif797 780 798 781 void tcg_dump_ops(TCGContext *s, FILE *outfile) … … 1433 1416 } 1434 1417 1435 /* save globals to their can onical location and assume they can be1418 /* save globals to their cannonical location and assume they can be 1436 1419 modified be the following code. 'allocated_regs' is used in case a 1437 1420 temporary registers needs to be allocated to store a constant. */ … … 1943 1926 tcg_dump_ops(s, logfile); 1944 1927 fprintf(logfile, "\n"); 1945 fflush(logfile);1946 1928 } 1947 1929 #endif -
trunk/src/recompiler/tcg/tcg.h
r36125 r36140 22 22 * THE SOFTWARE. 23 23 */ 24 25 24 #include "tcg-target.h" 26 25 … … 110 109 typedef tcg_target_ulong TCGArg; 111 110 112 /* Define a type and accessor macros for var iables. Using a struct is111 /* Define a type and accessor macros for varables. Using a struct is 113 112 nice because it gives some level of type safely. Ideally the compiler 114 113 be able to see through all this. However in practice this is not true, … … 194 193 unsigned int mem_coherent:1; 195 194 unsigned int mem_allocated:1; 196 unsigned int temp_local:1; /* If true, the temp is saved ac ross195 unsigned int temp_local:1; /* If true, the temp is saved accross 197 196 basic blocks. Otherwise, it is not 198 preserved ac ross basic blocks. */197 preserved accross basic blocks. */ 199 198 unsigned int temp_allocated:1; /* never used for code gen */ 200 199 /* index of next free temp of same base type, -1 if end */ … … 370 369 abort();\ 371 370 } while (0) 372 #else 373 #define VBOX_STR(x) #x 374 #define VBOX_XSTR(x) VBOX_STR(x) 375 #define tcg_abort() \ 376 do {\ 377 remAbort(-1, "TCG fatal error: "__FILE__":"VBOX_XSTR(__LINE__)); \ 378 } while (0) 371 #else /* VBOX */ 372 # define tcg_abort() \ 373 do {\ 374 remAbort(-1, "TCG fatal error: "__FILE__":" RT_XSTR(__LINE__)); \ 375 } while (0) 379 376 extern void qemu_qsort(void* base, size_t nmemb, size_t size, 380 377 int(*compar)(const void*, const void*)); 381 378 #define tcg_exit(status) \ 382 do {\383 remAbort(-1, "TCG exit: "__FILE__":"VBOX_XSTR(__LINE__));\384 } while (0)385 #endif 379 do {\ 380 remAbort(-1, "TCG exit: "__FILE__":" RT_XSTR(__LINE__));\ 381 } while (0) 382 #endif /* VBOX */ 386 383 387 384 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs); … … 440 437 #else 441 438 442 # if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)443 # define tcg_qemu_tb_exec(tb_ptr, ret) \439 # if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM) 440 # define tcg_qemu_tb_exec(tb_ptr, ret) \ 444 441 __asm__ __volatile__("call *%%ecx" : "=a"(ret) : "a"(tb_ptr), "c" (&code_gen_prologue[0]) : "memory", "%edx", "cc") 445 # else442 # else /* !VBOX || !GCC_WITH_BUGGY_REG_PARAM */ 446 443 #define tcg_qemu_tb_exec(tb_ptr) ((long REGPARM (*)(void *))code_gen_prologue)(tb_ptr) 447 # endif448 449 #endif 444 # endif /* !VBOX || !GCC_WITH_BUGGY_REG_PARAM */ 445 446 #endif -
trunk/src/recompiler/tcg/x86_64/tcg-target.c
r36125 r36140 253 253 } 254 254 255 static inline void tcg_out_push(TCGContext *s, int reg)256 {257 tcg_out_opc(s, (0x50 + (reg & 7)), 0, reg, 0);258 }259 260 static inline void tcg_out_pop(TCGContext *s, int reg)261 {262 tcg_out_opc(s, (0x58 + (reg & 7)), 0, reg, 0);263 }264 265 266 255 /* rm < 0 means no register index plus (-rm - 1 immediate bytes) */ 267 256 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm, … … 633 622 } 634 623 635 #endif 624 #endif /* VBOX && REM_PHYS_ADDR_IN_TLB */ 636 625 637 626 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, … … 812 801 tcg_abort(); 813 802 } 814 #else /* VBOX*/803 #else /* VBOX && REM_PHYS_ADDR_IN_TLB */ 815 804 tcg_out_vbox_phys_read(s, opc, r0, data_reg); 816 #endif /* VBOX */805 #endif /* VBOX && REM_PHYS_ADDR_IN_TLB */ 817 806 818 807 #if defined(CONFIG_SOFTMMU) … … 964 953 tcg_abort(); 965 954 } 966 #else /* VBOX*/955 #else /* VBOX && REM_PHYS_ADDR_IN_TLB */ 967 956 tcg_out_vbox_phys_write(s, opc, r0, data_reg); 968 #endif /* VBOX */957 #endif /* VBOX && REM_PHYS_ADDR_IN_TLB */ 969 958 970 959 #if defined(CONFIG_SOFTMMU) … … 1003 992 args[0])); 1004 993 #else 1005 /* @todo: can we clobber RAX here? */994 /** @todo: can we clobber RAX here? */ 1006 995 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_RAX, 1007 996 (tcg_target_long)&(s->tb_next[args[0]])); … … 1319 1308 }; 1320 1309 1310 static inline void tcg_out_push(TCGContext *s, int reg) 1311 { 1312 tcg_out_opc(s, (0x50 + (reg & 7)), 0, reg, 0); 1313 } 1314 1315 static inline void tcg_out_pop(TCGContext *s, int reg) 1316 { 1317 tcg_out_opc(s, (0x58 + (reg & 7)), 0, reg, 0); 1318 } 1319 1321 1320 /* Generate global QEMU prologue and epilogue code */ 1322 1321 void tcg_target_qemu_prologue(TCGContext *s) -
trunk/src/recompiler/tcg/x86_64/tcg-target.h
r29520 r36140 68 68 #define TCG_TARGET_HAS_ext32s_i64 69 69 70 /* Must be in sync with dyngen register notion, seedyngen-exec.h */70 /* Note: must be synced with dyngen-exec.h */ 71 71 #define TCG_AREG0 TCG_REG_R14 72 72 #define TCG_AREG1 TCG_REG_R15 -
trunk/src/recompiler/tests/Makefile
r2426 r36140 1 1 -include ../config-host.mak 2 VPATH=$(SRC_PATH)/tests 2 3 3 CFLAGS=-Wall -O2 -g 4 CFLAGS=-Wall -O2 -g -fno-strict-aliasing 4 5 #CFLAGS+=-msse2 5 6 LDFLAGS= 6 7 7 8 ifeq ($(ARCH),i386) 8 TESTS=linux-test testthread sha1-i386 test-i386 runcom9 TESTS=linux-test testthread sha1-i386 test-i386 9 10 endif 10 11 ifeq ($(ARCH),x86_64) … … 13 14 TESTS+=sha1# test_path 14 15 #TESTS+=test_path 16 #TESTS+=runcom 15 17 16 QEMU=../i386- user/qemu-i38618 QEMU=../i386-linux-user/qemu-i386 17 19 18 20 all: $(TESTS) … … 32 34 test-i386: test-i386.c test-i386-code16.S test-i386-vm86.S \ 33 35 test-i386.h test-i386-shift.h test-i386-muldiv.h 34 $(CC) $(CFLAGS) $(LDFLAGS) -static -o $@ \35 test-i386.c test-i386-code16.Stest-i386-vm86.S -lm36 $(CC) -m32 $(CFLAGS) $(LDFLAGS) -static -o $@ \ 37 $(<D)/test-i386.c $(<D)/test-i386-code16.S $(<D)/test-i386-vm86.S -lm 36 38 37 39 test-x86_64: test-i386.c \ 38 40 test-i386.h test-i386-shift.h test-i386-muldiv.h 39 $(CC) $(CFLAGS) $(LDFLAGS) -static -o $@test-i386.c -lm41 $(CC) -m64 $(CFLAGS) $(LDFLAGS) -static -o $@ $(<D)/test-i386.c -lm 40 42 41 43 ifeq ($(ARCH),i386) … … 47 49 $(QEMU) test-i386 > test-i386.out 48 50 @if diff -u test-i386.ref test-i386.out ; then echo "Auto Test OK"; fi 49 ifeq ($(ARCH),i386) 50 $(QEMU) -no-code-copy test-i386 > test-i386.out 51 @if diff -u test-i386.ref test-i386.out ; then echo "Auto Test OK (no code copy)"; fi 52 endif 51 52 .PHONY: test-mmap 53 test-mmap: test-mmap.c 54 $(CC) $(CFLAGS) -Wall -static -O2 $(LDFLAGS) -o $@ $< 55 -./test-mmap 56 -$(QEMU) ./test-mmap 57 -$(QEMU) -p 8192 ./test-mmap 8192 58 -$(QEMU) -p 16384 ./test-mmap 16384 59 -$(QEMU) -p 32768 ./test-mmap 32768 53 60 54 61 # generic Linux and CPU test … … 83 90 arm-linux-gcc -Wall -g -O2 -c -o $@ $< 84 91 92 test-arm-iwmmxt: test-arm-iwmmxt.s 93 cpp < $< | arm-linux-gnu-gcc -Wall -static -march=iwmmxt -mabi=aapcs -x assembler - -o $@ 94 85 95 # MIPS test 86 96 hello-mips: hello-mips.c … … 90 100 mipsel-linux-gnu-gcc -nostdlib -static -mno-abicalls -fno-PIC -mabi=32 -Wall -Wextra -g -O2 -o $@ $< 91 101 92 # XXX: find a way to compile easily a test for each arch 93 test2: 94 @for arch in i386 arm armeb sparc ppc mips mipsel; do \ 95 ../$${arch}-user/qemu-$${arch} $${arch}/ls -l linux-test.c ; \ 96 done 102 # testsuite for the CRIS port. 103 test-cris: 104 $(MAKE) -C cris check 97 105 98 106 clean: -
trunk/src/recompiler/tests/qruncom.c
r33540 r36140 60 60 } 61 61 62 static void set_gate(void *ptr, unsigned int type, unsigned int dpl, 62 static void set_gate(void *ptr, unsigned int type, unsigned int dpl, 63 63 unsigned long addr, unsigned int sel) 64 64 { … … 142 142 } 143 143 144 static void host_segv_handler(int host_signum, siginfo_t *info, 144 static void host_segv_handler(int host_signum, siginfo_t *info, 145 145 void *puc) 146 146 { … … 161 161 usage(); 162 162 filename = argv[1]; 163 164 vm86_mem = mmap((void *)0x00000000, 0x110000, 165 PROT_WRITE | PROT_READ | PROT_EXEC, 163 164 vm86_mem = mmap((void *)0x00000000, 0x110000, 165 PROT_WRITE | PROT_READ | PROT_EXEC, 166 166 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); 167 167 if (vm86_mem == MAP_FAILED) { … … 186 186 { 187 187 struct sigaction act; 188 188 189 189 sigfillset(&act.sa_mask); 190 190 act.sa_flags = SA_SIGINFO; … … 194 194 sigaction(SIGSEGV, &act, NULL); 195 195 sigaction(SIGBUS, &act, NULL); 196 #if defined (TARGET_I386) && defined(USE_CODE_COPY)197 sigaction(SIGFPE, &act, NULL);198 #endif199 196 } 200 197 201 198 // cpu_set_log(CPU_LOG_TB_IN_ASM | CPU_LOG_TB_OUT_ASM | CPU_LOG_EXEC); 202 199 203 env = cpu_init(); 204 205 /* disable code copy to simplify debugging */ 206 code_copy_enabled = 0; 200 env = cpu_init("qemu32"); 207 201 208 202 /* set user mode state (XXX: should be done automatically by … … 219 213 mode. We also activate the VM86 flag to run DOS code */ 220 214 env->eflags |= IF_MASK | VM_MASK; 221 215 222 216 /* init basic registers */ 223 217 env->eip = 0x100; … … 225 219 seg = (COM_BASE_ADDR - 0x100) >> 4; 226 220 227 cpu_x86_load_seg_cache(env, R_CS, seg, 228 (seg << 4), 0xffff, 0); 229 cpu_x86_load_seg_cache(env, R_SS, seg, 230 (seg << 4), 0xffff, 0); 231 cpu_x86_load_seg_cache(env, R_DS, seg, 232 (seg << 4), 0xffff, 0); 233 cpu_x86_load_seg_cache(env, R_ES, seg, 234 (seg << 4), 0xffff, 0); 235 cpu_x86_load_seg_cache(env, R_FS, seg, 236 (seg << 4), 0xffff, 0); 237 cpu_x86_load_seg_cache(env, R_GS, seg, 221 cpu_x86_load_seg_cache(env, R_CS, seg, 222 (seg << 4), 0xffff, 0); 223 cpu_x86_load_seg_cache(env, R_SS, seg, 224 (seg << 4), 0xffff, 0); 225 cpu_x86_load_seg_cache(env, R_DS, seg, 226 (seg << 4), 0xffff, 0); 227 cpu_x86_load_seg_cache(env, R_ES, seg, 228 (seg << 4), 0xffff, 0); 229 cpu_x86_load_seg_cache(env, R_FS, seg, 230 (seg << 4), 0xffff, 0); 231 cpu_x86_load_seg_cache(env, R_GS, seg, 238 232 (seg << 4), 0xffff, 0); 239 233 … … 261 255 set_idt(18, 0); 262 256 set_idt(19, 0); 263 257 264 258 /* put return code */ 265 259 *seg_to_linear(env->segs[R_CS].selector, 0) = 0xb4; /* mov ah, $0 */ … … 275 269 env->regs[R_EDI] = 0xfffe; 276 270 277 /* inform the emulator of the m apped memory */278 page_set_flags(0x00000000, 0x110000, 271 /* inform the emulator of the mmaped memory */ 272 page_set_flags(0x00000000, 0x110000, 279 273 PAGE_WRITE | PAGE_READ | PAGE_EXEC | PAGE_VALID); 280 274 -
trunk/src/recompiler/tests/test-i386-code16.S
r2426 r36140 8 8 9 9 .globl code16_func1 10 10 11 11 /* basic test */ 12 12 code16_func1 = . - code16_start … … 25 25 data32 lret 26 26 27 /* test various jmp opcodes */ 27 /* test various jmp opcodes */ 28 28 .globl code16_func3 29 29 code16_func3 = . - code16_start … … 37 37 add $2, %ax 38 38 2: 39 39 40 40 call myfunc 41 41 42 42 lcall $CS_SEG, $(myfunc2 - code16_start) 43 43 … … 45 45 myjmp1_next: 46 46 47 cs lcall myfunc2_addr - code16_start47 cs lcall *myfunc2_addr - code16_start 48 48 49 cs ljmp myjmp2_addr - code16_start49 cs ljmp *myjmp2_addr - code16_start 50 50 myjmp2_next: 51 51 52 52 data32 lret 53 53 54 54 myfunc2_addr: 55 55 .short myfunc2 - code16_start -
trunk/src/recompiler/tests/test-i386-shift.h
r2426 r36140 146 146 exec_opl(s2, s0, s1, 0); 147 147 #ifdef OP_SHIFTD 148 if (s1 <= 15) 149 exec_opw(s2, s0, s1, 0); 148 exec_opw(s2, s0, s1, 0); 150 149 #else 151 150 exec_opw(s2, s0, s1, 0); -
trunk/src/recompiler/tests/test-i386-vm86.S
r1 r36140 15 15 es movw $GET_OFFSET(int90_test), 0x90 * 4 16 16 es movw %cs, 0x90 * 4 + 2 17 17 18 18 /* launch int 0x90 */ 19 19 … … 25 25 int $0x21 26 26 27 pushf 27 pushf 28 28 popw %dx 29 29 movb $0xff, %ah … … 31 31 32 32 cli 33 pushf 33 pushf 34 34 popw %dx 35 35 movb $0xff, %ah 36 36 int $0x21 37 37 38 sti 39 pushfl 38 sti 39 pushfl 40 40 popl %edx 41 41 movb $0xff, %ah 42 42 int $0x21 43 43 44 44 #if 0 45 45 movw $GET_OFFSET(IF_msg1), %dx … … 55 55 #endif 56 56 57 pushf 57 pushf 58 58 popw %dx 59 59 movb $0xff, %ah 60 60 int $0x21 61 61 62 62 pushfl 63 63 movw %sp, %bx … … 74 74 75 75 int90_test: 76 pushf 76 pushf 77 77 pop %dx 78 78 movb $0xff, %ah … … 83 83 movb $0xff, %ah 84 84 int $0x21 85 85 86 86 movw $GET_OFFSET(int90_msg), %dx 87 87 movb $0x09, %ah 88 88 int $0x21 89 89 iret 90 90 91 91 int90_msg: 92 92 .string "INT90 started\n$" 93 93 94 94 hello_world: 95 95 .string "Hello VM86 world\n$" … … 102 102 103 103 vm86_code_end: 104 -
trunk/src/recompiler/tests/test-i386.c
r33656 r36140 1 1 /* 2 2 * x86 CPU test 3 * 3 * 4 4 * Copyright (c) 2003 Fabrice Bellard 5 5 * … … 41 41 42 42 #if !defined(__x86_64__) 43 #define TEST_VM8643 //#define TEST_VM86 44 44 #define TEST_SEGS 45 45 #endif 46 46 //#define LINUX_VM86_IOPL_FIX 47 47 //#define TEST_P4_FLAGS 48 #if defined(__x86_64__)48 #ifdef __SSE__ 49 49 #define TEST_SSE 50 50 #define TEST_CMOV 1 51 51 #define TEST_FCOMI 1 52 52 #else 53 //#defineTEST_SSE54 #define TEST_CMOV 055 #define TEST_FCOMI 053 #undef TEST_SSE 54 #define TEST_CMOV 1 55 #define TEST_FCOMI 1 56 56 #endif 57 57 … … 468 468 } 469 469 470 #define TEST_LOOP(insn) \ 471 {\ 472 for(i = 0; i < sizeof(ecx_vals) / sizeof(long); i++) {\ 473 ecx = ecx_vals[i];\ 474 for(zf = 0; zf < 2; zf++) {\ 475 asm("test %2, %2\n\t"\ 476 "movl $1, %0\n\t"\ 477 insn " 1f\n\t" \ 478 "movl $0, %0\n\t"\ 479 "1:\n\t"\ 480 : "=a" (res)\ 481 : "c" (ecx), "b" (!zf)); \ 482 printf("%-10s ECX=" FMTLX " ZF=%ld r=%d\n", insn, ecx, zf, res); \ 483 }\ 484 }\ 485 } 486 487 void test_loop(void) 488 { 489 long ecx, zf; 490 const long ecx_vals[] = { 491 0, 492 1, 493 0x10000, 494 0x10001, 495 #if defined(__x86_64__) 496 0x100000000L, 497 0x100000001L, 498 #endif 499 }; 500 int i, res; 501 502 #if !defined(__x86_64__) 503 TEST_LOOP("jcxz"); 504 TEST_LOOP("loopw"); 505 TEST_LOOP("loopzw"); 506 TEST_LOOP("loopnzw"); 507 #endif 508 509 TEST_LOOP("jecxz"); 510 TEST_LOOP("loopl"); 511 TEST_LOOP("loopzl"); 512 TEST_LOOP("loopnzl"); 513 } 514 470 515 #undef CC_MASK 471 516 #ifdef TEST_P4_FLAGS … … 481 526 #include "test-i386-muldiv.h" 482 527 483 void test_imulw2(long op0, long op1) 528 void test_imulw2(long op0, long op1) 484 529 { 485 530 long res, s1, s0, flags; … … 490 535 asm volatile ("push %4\n\t" 491 536 "popf\n\t" 492 "imulw %w2, %w0\n\t" 537 "imulw %w2, %w0\n\t" 493 538 "pushf\n\t" 494 539 "pop %1\n\t" … … 499 544 } 500 545 501 void test_imull2(long op0, long op1) 546 void test_imull2(long op0, long op1) 502 547 { 503 548 long res, s1, s0, flags; … … 508 553 asm volatile ("push %4\n\t" 509 554 "popf\n\t" 510 "imull %k2, %k0\n\t" 555 "imull %k2, %k0\n\t" 511 556 "pushf\n\t" 512 557 "pop %1\n\t" … … 518 563 519 564 #if defined(__x86_64__) 520 void test_imulq2(long op0, long op1) 565 void test_imulq2(long op0, long op1) 521 566 { 522 567 long res, s1, s0, flags; … … 527 572 asm volatile ("push %4\n\t" 528 573 "popf\n\t" 529 "imulq %2, %0\n\t" 574 "imulq %2, %0\n\t" 530 575 "pushf\n\t" 531 576 "pop %1\n\t" … … 684 729 "mov $0x12345678, %0\n"\ 685 730 #op " %" size "2, %" size "0 ; setz %b1" \ 686 : "= r" (res), "=q" (resz)\687 : " g" (val));\731 : "=&r" (res), "=&q" (resz)\ 732 : "r" (val));\ 688 733 printf("%-10s A=" FMTLX " R=" FMTLX " %ld\n", #op, val, res, resz);\ 689 734 } … … 714 759 }; 715 760 716 union float64u q_nan = { .l = 0xFFF8000000000000 };717 union float64u s_nan = { .l = 0xFFF0000000000000 };761 union float64u q_nan = { .l = 0xFFF8000000000000LL }; 762 union float64u s_nan = { .l = 0xFFF0000000000000LL }; 718 763 719 764 void test_fops(double a, double b) … … 750 795 long double fpregs[8]; 751 796 } float_env32; 752 797 753 798 asm volatile ("fnstenv %0\n" : : "m" (float_env32)); 754 799 float_env32.fpus &= ~0x7f; … … 769 814 : "=a" (fpus) 770 815 : "t" (a), "u" (b)); 771 printf("fcom(%f %f)=%04lx \n", 816 printf("fcom(%f %f)=%04lx \n", 772 817 a, b, fpus & (0x4500 | FPUS_EMASK)); 773 818 fpu_clear_exceptions(); … … 776 821 : "=a" (fpus) 777 822 : "t" (a), "u" (b)); 778 printf("fucom(%f %f)=%04lx\n", 823 printf("fucom(%f %f)=%04lx\n", 779 824 a, b, fpus & (0x4500 | FPUS_EMASK)); 780 825 if (TEST_FCOMI) { … … 787 832 : "=r" (eflags), "=a" (fpus) 788 833 : "t" (a), "u" (b)); 789 printf("fcomi(%f %f)=%04lx %02lx\n", 834 printf("fcomi(%f %f)=%04lx %02lx\n", 790 835 a, b, fpus & FPUS_EMASK, eflags & (CC_Z | CC_P | CC_C)); 791 836 fpu_clear_exceptions(); … … 796 841 : "=r" (eflags), "=a" (fpus) 797 842 : "t" (a), "u" (b)); 798 printf("fucomi(%f %f)=%04lx %02lx\n", 843 printf("fucomi(%f %f)=%04lx %02lx\n", 799 844 a, b, fpus & FPUS_EMASK, eflags & (CC_Z | CC_P | CC_C)); 800 845 } … … 824 869 printf("(long double)%f = %Lf\n", a, la); 825 870 printf("a=" FMT64X "\n", *(uint64_t *)&a); 826 printf("la=" FMT64X " %04x\n", *(uint64_t *)&la, 871 printf("la=" FMT64X " %04x\n", *(uint64_t *)&la, 827 872 *(unsigned short *)((char *)(&la) + 8)); 828 873 … … 830 875 asm volatile ("fstcw %0" : "=m" (fpuc)); 831 876 for(i=0;i<4;i++) { 832 asm volatile ("fldcw %0" : : "m" ((fpuc & ~0x0c00) | (i << 10))); 877 uint16_t val16; 878 val16 = (fpuc & ~0x0c00) | (i << 10); 879 asm volatile ("fldcw %0" : : "m" (val16)); 833 880 asm volatile ("fist %0" : "=m" (wa) : "t" (a)); 834 881 asm volatile ("fistl %0" : "=m" (ia) : "t" (a)); … … 866 913 asm("fbstp %0" : "=m" (bcd[0]) : "t" (a) : "st"); 867 914 asm("fbld %1" : "=t" (b) : "m" (bcd[0])); 868 printf("a=%f bcd=%04x%04x%04x%04x%04x b=%f\n", 915 printf("a=%f bcd=%04x%04x%04x%04x%04x b=%f\n", 869 916 a, bcd[4], bcd[3], bcd[2], bcd[1], bcd[0], b); 870 917 } … … 987 1034 test_fcvt(q_nan.d); 988 1035 test_fconst(); 989 test_fbcd(1234567890123456 );990 test_fbcd(-123451234567890 );1036 test_fbcd(1234567890123456.0); 1037 test_fbcd(-123451234567890.0); 991 1038 test_fenv(); 992 1039 if (TEST_CMOV) { … … 1052 1099 TEST_BCD(aaa, 0x1234040a, 0, (CC_C | CC_A)); 1053 1100 TEST_BCD(aaa, 0x123405fa, 0, (CC_C | CC_A)); 1054 1101 1055 1102 TEST_BCD(aas, 0x12340205, CC_A, (CC_C | CC_A)); 1056 1103 TEST_BCD(aas, 0x12340306, CC_A, (CC_C | CC_A)); … … 1074 1121 asm(#op " %" size "0, %" size "1" \ 1075 1122 : "=q" (op0), opconst (op1) \ 1076 : "0" (op0) , "1" (op1));\1123 : "0" (op0));\ 1077 1124 printf("%-10s A=" FMTLX " B=" FMTLX "\n",\ 1078 1125 #op, op0, op1);\ … … 1087 1134 asm(#op " %" size "0, %" size "1" \ 1088 1135 : "=q" (op0), opconst (op1) \ 1089 : "0" (op0), " 1" (op1), "a" (op2));\1136 : "0" (op0), "a" (op2));\ 1090 1137 printf("%-10s EAX=" FMTLX " A=" FMTLX " C=" FMTLX "\n",\ 1091 1138 #op, op2, op0, op1);\ … … 1095 1142 { 1096 1143 #if defined(__x86_64__) 1097 TEST_XCHG(xchgq, "", " =q");1098 #endif 1099 TEST_XCHG(xchgl, "k", " =q");1100 TEST_XCHG(xchgw, "w", " =q");1101 TEST_XCHG(xchgb, "b", " =q");1144 TEST_XCHG(xchgq, "", "+q"); 1145 #endif 1146 TEST_XCHG(xchgl, "k", "+q"); 1147 TEST_XCHG(xchgw, "w", "+q"); 1148 TEST_XCHG(xchgb, "b", "+q"); 1102 1149 1103 1150 #if defined(__x86_64__) 1104 1151 TEST_XCHG(xchgq, "", "=m"); 1105 1152 #endif 1106 TEST_XCHG(xchgl, "k", " =m");1107 TEST_XCHG(xchgw, "w", " =m");1108 TEST_XCHG(xchgb, "b", " =m");1153 TEST_XCHG(xchgl, "k", "+m"); 1154 TEST_XCHG(xchgw, "w", "+m"); 1155 TEST_XCHG(xchgb, "b", "+m"); 1109 1156 1110 1157 #if defined(__x86_64__) 1111 TEST_XCHG(xaddq, "", " =q");1112 #endif 1113 TEST_XCHG(xaddl, "k", " =q");1114 TEST_XCHG(xaddw, "w", " =q");1115 TEST_XCHG(xaddb, "b", " =q");1158 TEST_XCHG(xaddq, "", "+q"); 1159 #endif 1160 TEST_XCHG(xaddl, "k", "+q"); 1161 TEST_XCHG(xaddw, "w", "+q"); 1162 TEST_XCHG(xaddb, "b", "+q"); 1116 1163 1117 1164 { … … 1123 1170 1124 1171 #if defined(__x86_64__) 1125 TEST_XCHG(xaddq, "", " =m");1126 #endif 1127 TEST_XCHG(xaddl, "k", " =m");1128 TEST_XCHG(xaddw, "w", " =m");1129 TEST_XCHG(xaddb, "b", " =m");1172 TEST_XCHG(xaddq, "", "+m"); 1173 #endif 1174 TEST_XCHG(xaddl, "k", "+m"); 1175 TEST_XCHG(xaddw, "w", "+m"); 1176 TEST_XCHG(xaddb, "b", "+m"); 1130 1177 1131 1178 #if defined(__x86_64__) 1132 TEST_CMPXCHG(cmpxchgq, "", " =q", 0xfbca7654);1133 #endif 1134 TEST_CMPXCHG(cmpxchgl, "k", " =q", 0xfbca7654);1135 TEST_CMPXCHG(cmpxchgw, "w", " =q", 0xfbca7654);1136 TEST_CMPXCHG(cmpxchgb, "b", " =q", 0xfbca7654);1179 TEST_CMPXCHG(cmpxchgq, "", "+q", 0xfbca7654); 1180 #endif 1181 TEST_CMPXCHG(cmpxchgl, "k", "+q", 0xfbca7654); 1182 TEST_CMPXCHG(cmpxchgw, "w", "+q", 0xfbca7654); 1183 TEST_CMPXCHG(cmpxchgb, "b", "+q", 0xfbca7654); 1137 1184 1138 1185 #if defined(__x86_64__) 1139 TEST_CMPXCHG(cmpxchgq, "", " =q", 0xfffefdfc);1140 #endif 1141 TEST_CMPXCHG(cmpxchgl, "k", " =q", 0xfffefdfc);1142 TEST_CMPXCHG(cmpxchgw, "w", " =q", 0xfffefdfc);1143 TEST_CMPXCHG(cmpxchgb, "b", " =q", 0xfffefdfc);1186 TEST_CMPXCHG(cmpxchgq, "", "+q", 0xfffefdfc); 1187 #endif 1188 TEST_CMPXCHG(cmpxchgl, "k", "+q", 0xfffefdfc); 1189 TEST_CMPXCHG(cmpxchgw, "w", "+q", 0xfffefdfc); 1190 TEST_CMPXCHG(cmpxchgb, "b", "+q", 0xfffefdfc); 1144 1191 1145 1192 #if defined(__x86_64__) 1146 TEST_CMPXCHG(cmpxchgq, "", " =m", 0xfbca7654);1147 #endif 1148 TEST_CMPXCHG(cmpxchgl, "k", " =m", 0xfbca7654);1149 TEST_CMPXCHG(cmpxchgw, "w", " =m", 0xfbca7654);1150 TEST_CMPXCHG(cmpxchgb, "b", " =m", 0xfbca7654);1193 TEST_CMPXCHG(cmpxchgq, "", "+m", 0xfbca7654); 1194 #endif 1195 TEST_CMPXCHG(cmpxchgl, "k", "+m", 0xfbca7654); 1196 TEST_CMPXCHG(cmpxchgw, "w", "+m", 0xfbca7654); 1197 TEST_CMPXCHG(cmpxchgb, "b", "+m", 0xfbca7654); 1151 1198 1152 1199 #if defined(__x86_64__) 1153 TEST_CMPXCHG(cmpxchgq, "", " =m", 0xfffefdfc);1154 #endif 1155 TEST_CMPXCHG(cmpxchgl, "k", " =m", 0xfffefdfc);1156 TEST_CMPXCHG(cmpxchgw, "w", " =m", 0xfffefdfc);1157 TEST_CMPXCHG(cmpxchgb, "b", " =m", 0xfffefdfc);1200 TEST_CMPXCHG(cmpxchgq, "", "+m", 0xfffefdfc); 1201 #endif 1202 TEST_CMPXCHG(cmpxchgl, "k", "+m", 0xfffefdfc); 1203 TEST_CMPXCHG(cmpxchgw, "w", "+m", 0xfffefdfc); 1204 TEST_CMPXCHG(cmpxchgb, "b", "+m", 0xfffefdfc); 1158 1205 1159 1206 { 1160 1207 uint64_t op0, op1, op2; 1208 long eax, edx; 1161 1209 long i, eflags; 1162 1210 1163 1211 for(i = 0; i < 2; i++) { 1164 op0 = 0x123456789abcd; 1212 op0 = 0x123456789abcdLL; 1213 eax = i2l(op0 & 0xffffffff); 1214 edx = i2l(op0 >> 32); 1165 1215 if (i == 0) 1166 op1 = 0xfbca765423456 ;1216 op1 = 0xfbca765423456LL; 1167 1217 else 1168 1218 op1 = op0; 1169 op2 = 0x6532432432434 ;1170 asm("cmpxchg8b % 1\n"1219 op2 = 0x6532432432434LL; 1220 asm("cmpxchg8b %2\n" 1171 1221 "pushf\n" 1172 "pop % 2\n"1173 : "= A" (op0), "=m" (op1), "=g" (eflags)1174 : "0" ( op0), "m" (op1), "b" ((int)op2), "c" ((int)(op2 >> 32)));1175 printf("cmpxchg8b: op0=" FMT64X " op1=" FMT64X " CC=%02lx\n",1176 op0, op1, eflags & CC_Z);1222 "pop %3\n" 1223 : "=a" (eax), "=d" (edx), "=m" (op1), "=g" (eflags) 1224 : "0" (eax), "1" (edx), "m" (op1), "b" ((int)op2), "c" ((int)(op2 >> 32))); 1225 printf("cmpxchg8b: eax=" FMTLX " edx=" FMTLX " op1=" FMT64X " CC=%02lx\n", 1226 eax, edx, op1, eflags & CC_Z); 1177 1227 } 1178 1228 } … … 1183 1233 /* segmentation tests */ 1184 1234 1235 #include <sys/syscall.h> 1236 #include <unistd.h> 1185 1237 #include <asm/ldt.h> 1186 #include <linux/unistd.h>1187 1238 #include <linux/version.h> 1188 1239 1189 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount) 1240 static inline int modify_ldt(int func, void * ptr, unsigned long bytecount) 1241 { 1242 return syscall(__NR_modify_ldt, func, ptr, bytecount); 1243 } 1190 1244 1191 1245 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66) … … 1201 1255 {\ 1202 1256 int res, res2;\ 1257 uint16_t mseg = seg;\ 1203 1258 res = 0x12345678;\ 1204 1259 asm (op " %" size "2, %" size "0\n" \ … … 1207 1262 "movl $1, %1\n"\ 1208 1263 "1:\n"\ 1209 : "=r" (res), "=r" (res2) : "m" ( seg), "0" (res));\1264 : "=r" (res), "=r" (res2) : "m" (mseg), "0" (res));\ 1210 1265 printf(op ": Z=%d %08x\n", res2, res & ~(mask));\ 1266 } 1267 1268 #define TEST_ARPL(op, size, op1, op2)\ 1269 {\ 1270 long a, b, c; \ 1271 a = (op1); \ 1272 b = (op2); \ 1273 asm volatile(op " %" size "3, %" size "0\n"\ 1274 "movl $0,%1\n"\ 1275 "jnz 1f\n"\ 1276 "movl $1,%1\n"\ 1277 "1:\n"\ 1278 : "=r" (a), "=r" (c) : "0" (a), "r" (b)); \ 1279 printf(op size " A=" FMTLX " B=" FMTLX " R=" FMTLX " z=%ld\n",\ 1280 (long)(op1), (long)(op2), a, c);\ 1211 1281 } 1212 1282 … … 1287 1357 segoff.seg = MK_SEL(2); 1288 1358 segoff.offset = 0xabcdef12; 1289 asm volatile("lfs %2, %0\n\t" 1359 asm volatile("lfs %2, %0\n\t" 1290 1360 "movl %%fs, %1\n\t" 1291 : "=r" (res), "=g" (res2) 1361 : "=r" (res), "=g" (res2) 1292 1362 : "m" (segoff)); 1293 1363 printf("FS:reg = %04x:%08x\n", res2, res); … … 1302 1372 TEST_LR("lslw", "w", 0xfff8, 0); 1303 1373 TEST_LR("lsll", "", 0xfff8, 0); 1374 1375 TEST_ARPL("arpl", "w", 0x12345678 | 3, 0x762123c | 1); 1376 TEST_ARPL("arpl", "w", 0x12345678 | 1, 0x762123c | 3); 1377 TEST_ARPL("arpl", "w", 0x12345678 | 1, 0x762123c | 1); 1304 1378 } 1305 1379 … … 1328 1402 1329 1403 /* call the first function */ 1330 asm volatile ("lcall %1, %2" 1404 asm volatile ("lcall %1, %2" 1331 1405 : "=a" (res) 1332 1406 : "i" (MK_SEL(1)), "i" (&code16_func1): "memory", "cc"); 1333 1407 printf("func1() = 0x%08x\n", res); 1334 asm volatile ("lcall %2, %3" 1408 asm volatile ("lcall %2, %3" 1335 1409 : "=a" (res), "=c" (res2) 1336 1410 : "i" (MK_SEL(1)), "i" (&code16_func2): "memory", "cc"); 1337 1411 printf("func2() = 0x%08x spdec=%d\n", res, res2); 1338 asm volatile ("lcall %1, %2" 1412 asm volatile ("lcall %1, %2" 1339 1413 : "=a" (res) 1340 1414 : "i" (MK_SEL(1)), "i" (&code16_func3): "memory", "cc"); … … 1374 1448 1375 1449 #if defined(__x86_64__) 1450 #if 0 1376 1451 { 1452 /* XXX: see if Intel Core2 and AMD64 behavior really 1453 differ. Here we implemented the Intel way which is not 1454 compatible yet with QEMU. */ 1377 1455 static struct __attribute__((packed)) { 1378 uint 32_t offset;1456 uint64_t offset; 1379 1457 uint16_t seg; 1380 1458 } desc; … … 1384 1462 1385 1463 asm volatile ("push %1\n" 1386 "call func_lret\n" 1464 "call func_lret\n" 1387 1465 : "=a" (res) 1388 1466 : "r" (cs_sel) : "memory", "cc"); 1389 1467 printf("func_lret=" FMTLX "\n", res); 1390 1468 1391 /* NOTE: we assume that &func_lret < 4GB */1392 1469 desc.offset = (long)&func_lret; 1393 1470 desc.seg = cs_sel; 1394 1471 1395 1472 asm volatile ("xor %%rax, %%rax\n" 1396 "rex64 lcall %1\n"1473 "rex64 lcall *(%%rcx)\n" 1397 1474 : "=a" (res) 1398 : " m" (desc)1475 : "c" (&desc) 1399 1476 : "memory", "cc"); 1400 1477 printf("func_lret2=" FMTLX "\n", res); … … 1403 1480 "mov $ 1f, %%rax\n" 1404 1481 "push %%rax\n" 1405 " ljmp %1\n"1482 "rex64 ljmp *(%%rcx)\n" 1406 1483 "1:\n" 1407 1484 : "=a" (res) 1408 : " m" (desc), "b" (cs_sel)1485 : "c" (&desc), "b" (cs_sel) 1409 1486 : "memory", "cc"); 1410 1487 printf("func_lret3=" FMTLX "\n", res); 1411 1488 } 1489 #endif 1412 1490 #else 1413 asm volatile ("push %%cs ; call %1" 1491 asm volatile ("push %%cs ; call %1" 1414 1492 : "=a" (res) 1415 1493 : "m" (func_lret): "memory", "cc"); 1416 1494 printf("func_lret=" FMTLX "\n", res); 1417 1495 1418 asm volatile ("pushf ; push %%cs ; call %1" 1496 asm volatile ("pushf ; push %%cs ; call %1" 1419 1497 : "=a" (res) 1420 1498 : "m" (func_iret): "memory", "cc"); … … 1483 1561 TEST_STRING(stos, "rep "); 1484 1562 TEST_STRING(lods, ""); /* to verify stos */ 1485 TEST_STRING(lods, "rep "); 1563 TEST_STRING(lods, "rep "); 1486 1564 TEST_STRING(movs, ""); 1487 1565 TEST_STRING(movs, "rep "); … … 1516 1594 } 1517 1595 1518 #undef __syscall_return 1519 #define __syscall_return(type, res) \ 1520 do { \ 1521 return (type) (res); \ 1522 } while (0) 1523 1524 _syscall2(int, vm86, int, func, struct vm86plus_struct *, v86) 1596 static inline int vm86(int func, struct vm86plus_struct *v86) 1597 { 1598 return syscall(__NR_vm86, func, v86); 1599 } 1525 1600 1526 1601 extern char vm86_code_start; … … 1537 1612 int seg, ret; 1538 1613 1539 vm86_mem = mmap((void *)0x00000000, 0x110000, 1540 PROT_WRITE | PROT_READ | PROT_EXEC, 1614 vm86_mem = mmap((void *)0x00000000, 0x110000, 1615 PROT_WRITE | PROT_READ | PROT_EXEC, 1541 1616 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); 1542 1617 if (vm86_mem == MAP_FAILED) { … … 1561 1636 /* move code to proper address. We use the same layout as a .com 1562 1637 dos program. */ 1563 memcpy(vm86_mem + (VM86_CODE_CS << 4) + VM86_CODE_IP, 1638 memcpy(vm86_mem + (VM86_CODE_CS << 4) + VM86_CODE_IP, 1564 1639 &vm86_code_start, &vm86_code_end - &vm86_code_start); 1565 1640 … … 1573 1648 { 1574 1649 int int_num, ah, v; 1575 1650 1576 1651 int_num = VM86_ARG(ret); 1577 1652 if (int_num != 0x21) … … 1676 1751 struct sigaction act; 1677 1752 volatile int val; 1678 1753 1679 1754 act.sa_sigaction = sig_handler; 1680 1755 sigemptyset(&act.sa_mask); … … 1729 1804 ldt.useable = 1; 1730 1805 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */ 1731 1806 1732 1807 if (setjmp(jmp_env) == 0) { 1733 1808 /* segment not present */ … … 1754 1829 v1 = *(char *)0x1234; 1755 1830 } 1756 1831 1757 1832 /* test illegal instruction reporting */ 1758 1833 printf("UD2 exception:\n"); … … 1766 1841 asm volatile("lock nop"); 1767 1842 } 1768 1843 1769 1844 printf("INT exception:\n"); 1770 1845 if (setjmp(jmp_env) == 0) { … … 1838 1913 "orl $0x00100, (%%esp)\n" 1839 1914 "popf\n" 1840 "movl $0xabcd, %0\n" 1915 "movl $0xabcd, %0\n" 1841 1916 "movl $0x0, %0\n" : "=m" (val) : : "cc", "memory"); 1842 1917 } … … 1869 1944 "orl $0x00100, (%%esp)\n" 1870 1945 "popf\n" 1871 "movl $0xabcd, %0\n" 1946 "movl $0xabcd, %0\n" 1872 1947 1873 1948 /* jmp test */ … … 1895 1970 "movl $4, %%ecx\n" 1896 1971 "rep cmpsb\n" 1897 1972 1898 1973 /* getpid() syscall: single step should skip one 1899 1974 instruction */ … … 1901 1976 "int $0x80\n" 1902 1977 "movl $0, %%eax\n" 1903 1978 1904 1979 /* when modifying SS, trace is not done on the next 1905 1980 instruction */ … … 1917 1992 "addl $1, %0\n" 1918 1993 "movl $1, %%eax\n" 1919 1994 1920 1995 "pushf\n" 1921 1996 "andl $~0x00100, (%%esp)\n" 1922 1997 "popf\n" 1923 : "=m" (val) 1924 : 1998 : "=m" (val) 1999 : 1925 2000 : "cc", "memory", "eax", "ecx", "esi", "edi"); 1926 2001 printf("val=%d\n", val); … … 1935 2010 }; 1936 2011 1937 asm("smc_code2:\n" 2012 asm(".section \".data\"\n" 2013 "smc_code2:\n" 1938 2014 "movl 4(%esp), %eax\n" 1939 2015 "movl %eax, smc_patch_addr2 + 1\n" … … 1948 2024 "smc_patch_addr2:\n" 1949 2025 "movl $1, %eax\n" 1950 "ret\n"); 2026 "ret\n" 2027 ".previous\n" 2028 ); 1951 2029 1952 2030 typedef int FuncType(void); … … 1955 2033 { 1956 2034 int i; 1957 1958 2035 printf("self modifying code:\n"); 1959 2036 printf("func1 = 0x%x\n", ((FuncType *)code)()); … … 2037 2114 2038 2115 typedef int __m64 __attribute__ ((__mode__ (__V2SI__))); 2039 typedef int __m128 __attribute__ ((__mode__(__V4SF__)));2116 typedef float __m128 __attribute__ ((__mode__(__V4SF__))); 2040 2117 2041 2118 typedef union { … … 2211 2288 2212 2289 /* Force %xmm0 usage to avoid the case where both register index are 0 2213 to test in struction decoding more extensively */2290 to test intruction decoding more extensively */ 2214 2291 #define CVT_OP_XMM2MMX(op)\ 2215 2292 {\ 2216 2293 asm volatile (#op " %1, %0" : "=y" (r.q[0]) : "x" (a.dq) \ 2217 : "%xmm0");\ 2294 : "%xmm0"); \ 2295 asm volatile("emms\n"); \ 2218 2296 printf("%-9s: a=" FMT64X "" FMT64X " r=" FMT64X "\n",\ 2219 2297 #op,\ … … 2225 2303 {\ 2226 2304 asm volatile (#op " %1, %0" : "=x" (r.dq) : "y" (a.q[0]));\ 2305 asm volatile("emms\n"); \ 2227 2306 printf("%-9s: a=" FMT64X " r=" FMT64X "" FMT64X "\n",\ 2228 2307 #op,\ … … 2293 2372 " fxsave %1\n" 2294 2373 " fninit\n" 2295 : "=m" (*(uint32_t *)fp2), "=m" (*(uint32_t *)fp) 2374 : "=m" (*(uint32_t *)fp2), "=m" (*(uint32_t *)fp) 2296 2375 : "m" (a), "m" (b)); 2297 2376 printf("fpuc=%04x\n", fp->fpuc); … … 2300 2379 for(i = 0; i < 3; i++) { 2301 2380 printf("ST%d: " FMT64X " %04x\n", 2302 i, 2381 i, 2303 2382 *(uint64_t *)&fp->fpregs1[i * 16], 2304 2383 *(uint16_t *)&fp->fpregs1[i * 16 + 8]); … … 2312 2391 for(i = 0; i < nb_xmm; i++) { 2313 2392 printf("xmm%d: " FMT64X "" FMT64X "\n", 2314 i, 2393 i, 2315 2394 *(uint64_t *)&fp->xmm_regs[i * 16], 2316 2395 *(uint64_t *)&fp->xmm_regs[i * 16 + 8]); … … 2352 2431 MMX_OP2(pmulhuw); 2353 2432 MMX_OP2(pmulhw); 2354 2433 2355 2434 MMX_OP2(psubsb); 2356 2435 MMX_OP2(psubsw); … … 2391 2470 asm volatile ("pmovmskb %1, %0" : "=r" (r.l[0]) : "y" (a.q[0])); 2392 2471 printf("%-9s: r=%08x\n", "pmovmskb", r.l[0]); 2393 2472 2394 2473 asm volatile ("pmovmskb %1, %0" : "=r" (r.l[0]) : "x" (a.dq)); 2395 2474 printf("%-9s: r=%08x\n", "pmovmskb", r.l[0]); … … 2403 2482 b.q[0] = test_values[1][0]; 2404 2483 b.q[1] = test_values[1][1]; 2405 asm volatile("maskmovq %1, %0" : 2484 asm volatile("maskmovq %1, %0" : 2406 2485 : "y" (a.q[0]), "y" (b.q[0]), "D" (&r) 2407 : "memory"); 2408 printf("%-9s: r=" FMT64X " a=" FMT64X " b=" FMT64X "\n", 2409 "maskmov", 2410 r.q[0], 2411 a.q[0], 2486 : "memory"); 2487 printf("%-9s: r=" FMT64X " a=" FMT64X " b=" FMT64X "\n", 2488 "maskmov", 2489 r.q[0], 2490 a.q[0], 2412 2491 b.q[0]); 2413 asm volatile("maskmovdqu %1, %0" : 2492 asm volatile("maskmovdqu %1, %0" : 2414 2493 : "x" (a.dq), "x" (b.dq), "D" (&r) 2415 : "memory"); 2416 printf("%-9s: r=" FMT64X "" FMT64X " a=" FMT64X "" FMT64X " b=" FMT64X "" FMT64X "\n", 2417 "maskmov", 2418 r.q[1], r.q[0], 2419 a.q[1], a.q[0], 2494 : "memory"); 2495 printf("%-9s: r=" FMT64X "" FMT64X " a=" FMT64X "" FMT64X " b=" FMT64X "" FMT64X "\n", 2496 "maskmov", 2497 r.q[1], r.q[0], 2498 a.q[1], a.q[0], 2420 2499 b.q[1], b.q[0]); 2421 2500 } … … 2517 2596 SSE_OPS(cmpnle); 2518 2597 SSE_OPS(cmpord); 2519 2520 2598 2599 2521 2600 a.d[0] = 2.7; 2522 2601 a.d[1] = -3.4; … … 2594 2673 #endif 2595 2674 2675 #define TEST_CONV_RAX(op)\ 2676 {\ 2677 unsigned long a, r;\ 2678 a = i2l(0x8234a6f8);\ 2679 r = a;\ 2680 asm volatile(#op : "=a" (r) : "0" (r));\ 2681 printf("%-10s A=" FMTLX " R=" FMTLX "\n", #op, a, r);\ 2682 } 2683 2684 #define TEST_CONV_RAX_RDX(op)\ 2685 {\ 2686 unsigned long a, d, r, rh; \ 2687 a = i2l(0x8234a6f8);\ 2688 d = i2l(0x8345a1f2);\ 2689 r = a;\ 2690 rh = d;\ 2691 asm volatile(#op : "=a" (r), "=d" (rh) : "0" (r), "1" (rh)); \ 2692 printf("%-10s A=" FMTLX " R=" FMTLX ":" FMTLX "\n", #op, a, r, rh); \ 2693 } 2694 2695 void test_conv(void) 2696 { 2697 TEST_CONV_RAX(cbw); 2698 TEST_CONV_RAX(cwde); 2699 #if defined(__x86_64__) 2700 TEST_CONV_RAX(cdqe); 2701 #endif 2702 2703 TEST_CONV_RAX_RDX(cwd); 2704 TEST_CONV_RAX_RDX(cdq); 2705 #if defined(__x86_64__) 2706 TEST_CONV_RAX_RDX(cqo); 2707 #endif 2708 2709 { 2710 unsigned long a, r; 2711 a = i2l(0x12345678); 2712 asm volatile("bswapl %k0" : "=r" (r) : "0" (a)); 2713 printf("%-10s: A=" FMTLX " R=" FMTLX "\n", "bswapl", a, r); 2714 } 2715 #if defined(__x86_64__) 2716 { 2717 unsigned long a, r; 2718 a = i2l(0x12345678); 2719 asm volatile("bswapq %0" : "=r" (r) : "0" (a)); 2720 printf("%-10s: A=" FMTLX " R=" FMTLX "\n", "bswapq", a, r); 2721 } 2722 #endif 2723 } 2724 2596 2725 extern void *__start_initcall; 2597 2726 extern void *__stop_initcall; … … 2611 2740 test_mul(); 2612 2741 test_jcc(); 2742 test_loop(); 2613 2743 test_floats(); 2614 2744 #if !defined(__x86_64__) … … 2626 2756 test_vm86(); 2627 2757 #endif 2758 #if !defined(__x86_64__) 2628 2759 test_exceptions(); 2629 #if !defined(__x86_64__)2630 2760 test_self_modifying_code(); 2631 2761 test_single_step(); 2632 2762 #endif 2633 2763 test_enter(); 2764 test_conv(); 2634 2765 #ifdef TEST_SSE 2635 2766 test_sse(); -
trunk/src/recompiler/tests/test_path.c
r1 r36140 150 150 return 0; 151 151 } 152 152 -
trunk/src/recompiler/translate-all.c
r33656 r36140 32 32 #include <stdio.h> 33 33 #include <string.h> 34 #include <inttypes.h> 34 35 35 36 #include "config.h" … … 55 56 target_ulong gen_opc_npc[OPC_BUF_SIZE]; 56 57 target_ulong gen_opc_jump_pc[2]; 57 #elif defined(TARGET_MIPS) 58 #elif defined(TARGET_MIPS) || defined(TARGET_SH4) 58 59 uint32_t gen_opc_hflags[OPC_BUF_SIZE]; 59 60 #endif … … 71 72 if (max == 0) { 72 73 max = TCG_MAX_OP_SIZE; 73 #define DEF(s, n, copy_size) max = (copy_size > max)? copy_size : max;74 #define DEF(s, n, copy_size) max = copy_size > max? copy_size : max; 74 75 #include "tcg-opc.h" 75 76 #undef DEF … … 80 81 } 81 82 82 void cpu_gen_init( )83 void cpu_gen_init(void) 83 84 { 84 85 tcg_context_init(&tcg_ctx); … … 93 94 code). 94 95 */ 95 int cpu_gen_code(CPUState *env, TranslationBlock *tb, 96 int *gen_code_size_ptr) 96 int cpu_gen_code(CPUState *env, TranslationBlock *tb, int *gen_code_size_ptr) 97 97 { 98 98 TCGContext *s = &tcg_ctx; … … 111 111 #ifdef VBOX 112 112 RAWEx_ProfileStart(env, STATS_QEMU_COMPILATION); 113 #endif 114 113 115 tcg_func_start(s); 114 116 115 117 gen_intermediate_code(env, tb); 116 #else /* !VBOX */117 tcg_func_start(s);118 119 gen_intermediate_code(env, tb);120 #endif /* !VBOX */121 118 122 119 /* generate machine code */ … … 129 126 s->tb_next = NULL; 130 127 /* the following two entries are optional (only used for string ops) */ 128 /* XXX: not used ? */ 131 129 tb->tb_jmp_offset[2] = 0xffff; 132 130 tb->tb_jmp_offset[3] = 0xffff; … … 141 139 s->code_time -= profile_getclock(); 142 140 #endif 143 144 141 gen_code_size = dyngen_code(s, gen_code_buf); 145 142 *gen_code_size_ptr = gen_code_size;
Note:
See TracChangeset
for help on using the changeset viewer.