Changeset 42601 in vbox for trunk/src/recompiler
- Timestamp:
- Aug 5, 2012 4:26:25 PM (13 years ago)
- svn:sync-xref-src-repo-rev:
- 79786
- Location:
- trunk/src/recompiler
- Files:
-
- 22 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler/Makefile.kmk
r42595 r42601 64 64 #VBoxRemPrimary_DEFS += REM_PHYS_ADDR_IN_TLB 65 65 #VBoxRemPrimary_DEFS += DEBUG_ALL_LOGGING DEBUG_DISAS DEBUG_PCALL CONFIG_DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB # Enables huge amounts of debug logging. 66 #VBoxRemPrimary_DEFS += DEBUG_DISAS DEBUG_PCALL CONFIG_DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB # Enables huge amounts of debug logging. 66 67 ifdef IEM_VERIFICATION_MODE 67 68 VBoxRemPrimary_DEFS += IEM_VERIFICATION_MODE … … 110 111 VBoxRemPrimary_SOURCES.win.x86 = $(VBoxREMImp_0_OUTDIR)/VBoxREMWin.def 111 112 ifdef VBOX_USE_MINGWW64 112 VBoxRemPrimary_SOURCES.win.amd64 = $(VBoxREMImp_0_OUTDIR)/VBoxREMWin.def 113 if 0 # exporting all helps when windbg pops up on crashes 114 VBoxRemPrimary_SOURCES.win.amd64 = $(VBoxREMImp_0_OUTDIR)/VBoxREMWin.def 115 else 116 VBoxRemPrimary_LDFLAGS.win.amd64 = --export-all 117 endif 113 118 endif 114 119 -
trunk/src/recompiler/VBoxRecompiler.c
r42420 r42601 56 56 57 57 /* Don't wanna include everything. */ 58 extern void cpu_exec_init_all(u nsigned longtb_size);58 extern void cpu_exec_init_all(uintptr_t tb_size); 59 59 extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); 60 60 extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); … … 66 66 67 67 #ifdef VBOX_STRICT 68 unsigned longget_phys_page_offset(target_ulong addr);68 ram_addr_t get_phys_page_offset(target_ulong addr); 69 69 #endif 70 70 … … 266 266 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE)); 267 267 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem))); 268 #if 0 /* just an annoyance at the moment. */ 268 269 #if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff. 269 270 Assert(!testmath()); 271 #endif 270 272 #endif 271 273 … … 3446 3448 { 3447 3449 #ifdef VBOX_STRICT 3448 unsigned longoff;3450 ram_addr_t off; 3449 3451 REMR3ReplayHandlerNotifications(pVM); 3450 3452 -
trunk/src/recompiler/cache-utils.h
r36170 r42601 13 13 14 14 /* mildly adjusted code from tcg-dyngen.c */ 15 static inline void flush_icache_range(u nsigned long start, unsigned longstop)15 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) 16 16 { 17 17 unsigned long p, start1, stop1; -
trunk/src/recompiler/cpu-all.h
r37702 r42601 774 774 */ 775 775 #if defined(CONFIG_USE_GUEST_BASE) 776 extern u nsigned longguest_base;776 extern uintptr_t guest_base; 777 777 extern int have_guest_base; 778 extern u nsigned longreserved_va;778 extern uintptr_t reserved_va; 779 779 #define GUEST_BASE guest_base 780 780 #define RESERVED_VA reserved_va … … 785 785 786 786 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ 787 #define g2h(x) ((void *)((u nsigned long)(x) + GUEST_BASE))787 #define g2h(x) ((void *)((uintptr_t)(x) + GUEST_BASE)) 788 788 789 789 #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS … … 791 791 #else 792 792 #define h2g_valid(x) ({ \ 793 u nsigned long __guest = (unsigned long)(x) - GUEST_BASE; \793 uintptr_t __guest = (uintptr_t)(x) - GUEST_BASE; \ 794 794 __guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS); \ 795 795 }) … … 797 797 798 798 #define h2g(x) ({ \ 799 u nsigned long __ret = (unsigned long)(x) - GUEST_BASE; \799 uintptr_t __ret = (uintptr_t)(x) - GUEST_BASE; \ 800 800 /* Check if given address fits target address space */ \ 801 801 assert(h2g_valid(x)); \ … … 809 809 /* NOTE: we use double casts if pointers and target_ulong have 810 810 different sizes */ 811 #define saddr(x) (uint8_t *)( long)(x)812 #define laddr(x) (uint8_t *)( long)(x)811 #define saddr(x) (uint8_t *)(intptr_t)(x) 812 #define laddr(x) (uint8_t *)(intptr_t)(x) 813 813 #endif 814 814 … … 877 877 #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) 878 878 879 /* ??? These should be the larger of u nsigned longand target_ulong. */880 extern unsigned longqemu_real_host_page_size;881 extern unsigned longqemu_host_page_bits;882 extern unsigned longqemu_host_page_size;883 extern u nsigned longqemu_host_page_mask;879 /* ??? These should be the larger of uintptr_t and target_ulong. */ 880 extern size_t qemu_real_host_page_size; 881 extern size_t qemu_host_page_bits; 882 extern size_t qemu_host_page_size; 883 extern uintptr_t qemu_host_page_mask; 884 884 885 885 #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) … … 903 903 904 904 typedef int (*walk_memory_regions_fn)(void *, abi_ulong, 905 abi_ulong, u nsigned long);905 abi_ulong, uintptr_t); 906 906 int walk_memory_regions(void *, walk_memory_regions_fn); 907 907 -
trunk/src/recompiler/cpu-common.h
r37689 r42601 22 22 23 23 /* address in the RAM (different from a physical address) */ 24 typedef u nsigned longram_addr_t;24 typedef uintptr_t ram_addr_t; 25 25 26 26 /* memory API */ -
trunk/src/recompiler/cpu-defs.h
r37689 r42601 115 115 /* Addend to virtual address to get host address. IO accesses 116 116 use the corresponding iotlb value. */ 117 u nsigned longaddend;117 uintptr_t addend; 118 118 /* padding to get a power of two size */ 119 119 uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - 120 120 (sizeof(target_ulong) * 3 + 121 ((-sizeof(target_ulong) * 3) & (sizeof(u nsigned long) - 1)) +122 sizeof(u nsigned long))];121 ((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) + 122 sizeof(uintptr_t))]; 123 123 } CPUTLBEntry; 124 124 … … 175 175 helpers, we store some rarely used information in the CPU \ 176 176 context) */ \ 177 u nsigned long mem_io_pc; /* host pc at which the memory was\177 uintptr_t mem_io_pc; /* host pc at which the memory was \ 178 178 accessed */ \ 179 179 target_ulong mem_io_vaddr; /* target virtual addr at which the \ -
trunk/src/recompiler/cpu-exec.c
r37702 r42601 111 111 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb) 112 112 { 113 u nsigned longnext_tb;113 uintptr_t next_tb; 114 114 TranslationBlock *tb; 115 115 … … 237 237 TranslationBlock *tb; 238 238 uint8_t *tc_ptr; 239 #ifndef VBOX240 239 uintptr_t next_tb; 241 #else /* VBOX */242 unsigned long next_tb;243 #endif /* VBOX */244 240 245 241 # ifndef VBOX … … 739 735 } 740 736 #ifdef CONFIG_DEBUG_EXEC 741 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx[" TARGET_FMT_lx "] %s\n",742 ( long)tb->tc_ptr, tb->pc,737 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n", 738 (void *)tb->tc_ptr, tb->pc, 743 739 lookup_symbol(tb->pc)); 744 740 #endif … … 775 771 #define env cpu_single_env 776 772 #endif 773 Log5(("REM: tb=%p tc_ptr=%p %04x:%08RGv\n", tb, tc_ptr, env->segs[R_CS].selector, (RTGCPTR)env->eip)); 777 774 #if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM) 778 775 tcg_qemu_tb_exec(tc_ptr, next_tb); … … 780 777 next_tb = tcg_qemu_tb_exec(tc_ptr); 781 778 #endif 779 if (next_tb) 780 Log5(("REM: next_tb=%p %04x:%08RGv\n", next_tb, env->segs[R_CS].selector, (RTGCPTR)env->eip)); 782 781 #ifdef VBOX 783 782 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE); … … 786 785 /* Instruction counter expired. */ 787 786 int insns_left; 788 tb = (TranslationBlock *)( long)(next_tb & ~3);787 tb = (TranslationBlock *)(uintptr_t)(next_tb & ~3); 789 788 /* Restore PC. */ 790 789 cpu_pc_from_tb(env, tb); … … 938 937 write caused the exception and otherwise 0'. 'old_set' is the 939 938 signal set which should be restored */ 940 static inline int handle_cpu_signal(u nsigned long pc, unsigned longaddress,939 static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address, 941 940 int is_write, sigset_t *old_set, 942 941 void *puc) … … 1025 1024 struct ucontext *uc = puc; 1026 1025 #endif 1027 u nsigned longpc;1026 uintptr_t pc; 1028 1027 int trapno; 1029 1028 … … 1036 1035 pc = EIP_sig(uc); 1037 1036 trapno = TRAP_sig(uc); 1038 return handle_cpu_signal(pc, (u nsigned long)info->si_addr,1037 return handle_cpu_signal(pc, (uintptr_t)info->si_addr, 1039 1038 trapno == 0xe ? 1040 1039 (ERROR_sig(uc) >> 1) & 1 : 0, … … 1072 1071 { 1073 1072 siginfo_t *info = pinfo; 1074 u nsigned longpc;1073 uintptr_t pc; 1075 1074 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__) 1076 1075 ucontext_t *uc = puc; … … 1082 1081 1083 1082 pc = PC_sig(uc); 1084 return handle_cpu_signal(pc, (u nsigned long)info->si_addr,1083 return handle_cpu_signal(pc, (uintptr_t)info->si_addr, 1085 1084 TRAP_sig(uc) == 0xe ? 1086 1085 (ERROR_sig(uc) >> 1) & 1 : 0, … … 1162 1161 struct ucontext *uc = puc; 1163 1162 #endif 1164 u nsigned longpc;1163 uintptr_t pc; 1165 1164 int is_write; 1166 1165 … … 1175 1174 is_write = 1; 1176 1175 #endif 1177 return handle_cpu_signal(pc, (u nsigned long)info->si_addr,1176 return handle_cpu_signal(pc, (uintptr_t)info->si_addr, 1178 1177 is_write, &uc->uc_sigmask, puc); 1179 1178 } … … 1206 1205 } 1207 1206 1208 return handle_cpu_signal(pc, (u nsigned long)info->si_addr,1207 return handle_cpu_signal(pc, (uintptr_t)info->si_addr, 1209 1208 is_write, &uc->uc_sigmask, puc); 1210 1209 } … … 1221 1220 void *sigmask = (regs + 20); 1222 1221 /* XXX: is there a standard glibc define ? */ 1223 u nsigned longpc = regs[1];1222 uintptr_t pc = regs[1]; 1224 1223 #else 1225 1224 #ifdef __linux__ 1226 1225 struct sigcontext *sc = puc; 1227 u nsigned longpc = sc->sigc_regs.tpc;1226 uintptr_t pc = sc->sigc_regs.tpc; 1228 1227 void *sigmask = (void *)sc->sigc_mask; 1229 1228 #elif defined(__OpenBSD__) 1230 1229 struct sigcontext *uc = puc; 1231 u nsigned longpc = uc->sc_pc;1232 void *sigmask = (void *)( long)uc->sc_mask;1230 uintptr_t pc = uc->sc_pc; 1231 void *sigmask = (void *)(uintptr_t)uc->sc_mask; 1233 1232 #endif 1234 1233 #endif … … 1262 1261 } 1263 1262 } 1264 return handle_cpu_signal(pc, (u nsigned long)info->si_addr,1263 return handle_cpu_signal(pc, (uintptr_t)info->si_addr, 1265 1264 is_write, sigmask, NULL); 1266 1265 } … … 1273 1272 siginfo_t *info = pinfo; 1274 1273 struct ucontext *uc = puc; 1275 u nsigned longpc;1274 uintptr_t pc; 1276 1275 int is_write; 1277 1276 … … 1283 1282 /* XXX: compute is_write */ 1284 1283 is_write = 0; 1285 return handle_cpu_signal(pc, (u nsigned long)info->si_addr,1284 return handle_cpu_signal(pc, (uintptr_t)info->si_addr, 1286 1285 is_write, 1287 1286 &uc->uc_sigmask, puc); … … 1295 1294 siginfo_t *info = pinfo; 1296 1295 struct ucontext *uc = puc; 1297 u nsigned longpc;1296 uintptr_t pc; 1298 1297 int is_write; 1299 1298 … … 1301 1300 /* XXX: compute is_write */ 1302 1301 is_write = 0; 1303 return handle_cpu_signal(pc, (u nsigned long)info->si_addr,1302 return handle_cpu_signal(pc, (uintptr_t)info->si_addr, 1304 1303 is_write, 1305 1304 &uc->uc_sigmask, puc); … … 1317 1316 siginfo_t *info = pinfo; 1318 1317 struct ucontext *uc = puc; 1319 u nsigned longip;1318 uintptr_t ip; 1320 1319 int is_write = 0; 1321 1320 … … 1335 1334 break; 1336 1335 } 1337 return handle_cpu_signal(ip, (u nsigned long)info->si_addr,1336 return handle_cpu_signal(ip, (uintptr_t)info->si_addr, 1338 1337 is_write, 1339 1338 (sigset_t *)&uc->uc_sigmask, puc); … … 1347 1346 siginfo_t *info = pinfo; 1348 1347 struct ucontext *uc = puc; 1349 u nsigned longpc;1348 uintptr_t pc; 1350 1349 uint16_t *pinsn; 1351 1350 int is_write = 0; … … 1389 1388 break; 1390 1389 } 1391 return handle_cpu_signal(pc, (u nsigned long)info->si_addr,1390 return handle_cpu_signal(pc, (uintptr_t)info->si_addr, 1392 1391 is_write, &uc->uc_sigmask, puc); 1393 1392 } … … 1405 1404 /* XXX: compute is_write */ 1406 1405 is_write = 0; 1407 return handle_cpu_signal(pc, (u nsigned long)info->si_addr,1406 return handle_cpu_signal(pc, (uintptr_t)info->si_addr, 1408 1407 is_write, &uc->uc_sigmask, puc); 1409 1408 } … … 1416 1415 struct siginfo *info = pinfo; 1417 1416 struct ucontext *uc = puc; 1418 u nsigned longpc = uc->uc_mcontext.sc_iaoq[0];1417 uintptr_t pc = uc->uc_mcontext.sc_iaoq[0]; 1419 1418 uint32_t insn = *(uint32_t *)pc; 1420 1419 int is_write = 0; … … 1447 1446 } 1448 1447 1449 return handle_cpu_signal(pc, (u nsigned long)info->si_addr,1448 return handle_cpu_signal(pc, (uintptr_t)info->si_addr, 1450 1449 is_write, &uc->uc_sigmask, puc); 1451 1450 } -
trunk/src/recompiler/dyngen-exec.h
r37689 r42601 120 120 Subtracting one gets us the call instruction itself. */ 121 121 #if defined(__s390__) && !defined(__s390x__) 122 # define GETPC() ((void*)(((u nsigned long)__builtin_return_address(0) & 0x7fffffffUL) - 1))122 # define GETPC() ((void*)(((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)) 123 123 #elif defined(__arm__) 124 124 /* Thumb return addresses have the low bit set, so we need to subtract two. 125 125 This is still safe in ARM mode because instructions are 4 bytes. */ 126 # define GETPC() ((void *)((u nsigned long)__builtin_return_address(0) - 2))126 # define GETPC() ((void *)((uintptr_t)__builtin_return_address(0) - 2)) 127 127 #else 128 # define GETPC() ((void *)((u nsigned long)__builtin_return_address(0) - 1))128 # define GETPC() ((void *)((uintptr_t)__builtin_return_address(0) - 1)) 129 129 #endif 130 130 -
trunk/src/recompiler/exec-all.h
r41436 r42601 101 101 void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb); 102 102 void gen_pc_load(CPUState *env, struct TranslationBlock *tb, 103 u nsigned longsearched_pc, int pc_pos, void *puc);103 uintptr_t searched_pc, int pc_pos, void *puc); 104 104 105 105 void cpu_gen_init(void); … … 107 107 int *gen_code_size_ptr); 108 108 int cpu_restore_state(struct TranslationBlock *tb, 109 CPUState *env, u nsigned longsearched_pc,109 CPUState *env, uintptr_t searched_pc, 110 110 void *puc); 111 111 void cpu_resume_from_signal(CPUState *env1, void *puc); … … 116 116 void cpu_exec_init(CPUState *env); 117 117 void QEMU_NORETURN cpu_loop_exit(void); 118 int page_unprotect(target_ulong address, u nsigned longpc, void *puc);118 int page_unprotect(target_ulong address, uintptr_t pc, void *puc); 119 119 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, 120 120 int is_cpu_write_access); … … 179 179 uint16_t tb_jmp_offset[2]; /* offset of jump instruction */ 180 180 #else 181 u nsigned longtb_next[2]; /* address of jump generated code */181 uintptr_t tb_next[2]; /* address of jump generated code */ 182 182 #endif 183 183 /* list of TBs jumping to this one. This is a circular list using … … 222 222 223 223 #if defined(_ARCH_PPC) 224 extern void ppc_tb_set_jmp_target(u nsigned long jmp_addr, unsigned longaddr);224 extern void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); 225 225 #define tb_set_jmp_target1 ppc_tb_set_jmp_target 226 226 #elif defined(__i386__) || defined(__x86_64__) 227 static inline void tb_set_jmp_target1(u nsigned long jmp_addr, unsigned longaddr)227 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) 228 228 { 229 229 /* patch the branch destination */ … … 232 232 } 233 233 #elif defined(__arm__) 234 static inline void tb_set_jmp_target1(u nsigned long jmp_addr, unsigned longaddr)234 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) 235 235 { 236 236 #if QEMU_GNUC_PREREQ(4, 1) … … 260 260 261 261 static inline void tb_set_jmp_target(TranslationBlock *tb, 262 int n, u nsigned longaddr)263 { 264 u nsigned longoffset;262 int n, uintptr_t addr) 263 { 264 uintptr_t offset; 265 265 266 266 offset = tb->tb_jmp_offset[n]; 267 tb_set_jmp_target1((u nsigned long)(tb->tc_ptr + offset), addr);267 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr); 268 268 } 269 269 … … 272 272 /* set the jump target */ 273 273 static inline void tb_set_jmp_target(TranslationBlock *tb, 274 int n, u nsigned longaddr)274 int n, uintptr_t addr) 275 275 { 276 276 tb->tb_next[n] = addr; … … 285 285 if (!tb->jmp_next[n]) { 286 286 /* patch the native jump address */ 287 tb_set_jmp_target(tb, n, (u nsigned long)tb_next->tc_ptr);287 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr); 288 288 289 289 /* add in TB jmp circular list */ 290 290 tb->jmp_next[n] = tb_next->jmp_first; 291 tb_next->jmp_first = (TranslationBlock *)(( long)(tb) | (n));291 tb_next->jmp_first = (TranslationBlock *)((intptr_t)(tb) | (n)); 292 292 } 293 293 } 294 294 295 TranslationBlock *tb_find_pc(u nsigned longpc_ptr);295 TranslationBlock *tb_find_pc(uintptr_t pc_ptr); 296 296 297 297 #include "qemu-lock.h" … … 378 378 return addr + env1->phys_addends[mmu_idx][page_index]; 379 379 # else 380 p = (void *)(u nsigned long)addr380 p = (void *)(uintptr_t)addr 381 381 + env1->tlb_table[mmu_idx][page_index].addend; 382 382 return qemu_ram_addr_from_host(p); -
trunk/src/recompiler/exec.c
r37702 r42601 130 130 #endif /* VBOX */ 131 131 static uint8_t *code_gen_buffer; 132 static unsigned longcode_gen_buffer_size;132 static size_t code_gen_buffer_size; 133 133 /* threshold to flush the translated code buffer */ 134 static unsigned longcode_gen_buffer_max_size;134 static size_t code_gen_buffer_max_size; 135 135 static uint8_t *code_gen_ptr; 136 136 … … 209 209 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) 210 210 211 unsigned longqemu_real_host_page_size;212 unsigned longqemu_host_page_bits;213 unsigned longqemu_host_page_size;214 u nsigned longqemu_host_page_mask;211 size_t qemu_real_host_page_size; 212 size_t qemu_host_page_bits; 213 size_t qemu_host_page_size; 214 uintptr_t qemu_host_page_mask; 215 215 216 216 /* This is a multi-level map on the virtual address space. … … 268 268 #ifndef VBOX 269 269 #ifdef _WIN32 270 static void map_exec(void *addr, longsize)270 static void map_exec(void *addr, size_t size) 271 271 { 272 272 DWORD old_protect; … … 276 276 } 277 277 #else 278 static void map_exec(void *addr, longsize)279 { 280 u nsigned longstart, end, page_size;278 static void map_exec(void *addr, size_t size) 279 { 280 uintptr_t start, end, page_size; 281 281 282 282 page_size = getpagesize(); 283 start = (u nsigned long)addr;283 start = (uintptr_t)addr; 284 284 start &= ~(page_size - 1); 285 285 286 end = (u nsigned long)addr + size;286 end = (uintptr_t)addr + size; 287 287 end += page_size - 1; 288 288 end &= ~(page_size - 1); … … 293 293 #endif 294 294 #else /* VBOX */ 295 static void map_exec(void *addr, longsize)295 static void map_exec(void *addr, size_t size) 296 296 { 297 297 RTMemProtect(addr, size, … … 340 340 mmap_lock(); 341 341 for (i = 0; i < cnt; i++) { 342 u nsigned longstartaddr, endaddr;342 uintptr_t startaddr, endaddr; 343 343 344 344 startaddr = freep[i].kve_start; … … 364 364 FILE *f; 365 365 366 last_brk = (u nsigned long)sbrk(0);366 last_brk = (uintptr_t)sbrk(0); 367 367 368 368 f = fopen("/compat/linux/proc/self/maps", "r"); … … 371 371 372 372 do { 373 u nsigned longstartaddr, endaddr;373 uintptr_t startaddr, endaddr; 374 374 int n; 375 375 … … 527 527 #endif 528 528 529 static void code_gen_alloc(u nsigned longtb_size)529 static void code_gen_alloc(uintptr_t tb_size) 530 530 { 531 531 #ifdef USE_STATIC_CODE_GEN_BUFFER … … 548 548 #else 549 549 /* XXX: needs adjustments */ 550 code_gen_buffer_size = (u nsigned long)(ram_size / 4);550 code_gen_buffer_size = (uintptr_t)(ram_size / 4); 551 551 #endif 552 552 } … … 646 646 (in bytes) allocated to the translation buffer. Zero means default 647 647 size. */ 648 void cpu_exec_init_all(u nsigned longtb_size)648 void cpu_exec_init_all(uintptr_t tb_size) 649 649 { 650 650 cpu_gen_init(); … … 792 792 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0); 793 793 #endif 794 if ((u nsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)794 if ((uintptr_t)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size) 795 795 cpu_abort(env1, "Internal error: code buffer overflow\n"); 796 796 … … 874 874 for(;;) { 875 875 tb1 = *ptb; 876 n1 = ( long)tb1 & 3;877 tb1 = (TranslationBlock *)(( long)tb1 & ~3);876 n1 = (intptr_t)tb1 & 3; 877 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3); 878 878 if (tb1 == tb) { 879 879 *ptb = tb1->page_next[n1]; … … 895 895 for(;;) { 896 896 tb1 = *ptb; 897 n1 = ( long)tb1 & 3;898 tb1 = (TranslationBlock *)(( long)tb1 & ~3);897 n1 = (intptr_t)tb1 & 3; 898 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3); 899 899 if (n1 == n && tb1 == tb) 900 900 break; … … 916 916 static inline void tb_reset_jump(TranslationBlock *tb, int n) 917 917 { 918 tb_set_jmp_target(tb, n, (u nsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));918 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n])); 919 919 } 920 920 … … 961 961 tb1 = tb->jmp_first; 962 962 for(;;) { 963 n1 = ( long)tb1 & 3;963 n1 = (intptr_t)tb1 & 3; 964 964 if (n1 == 2) 965 965 break; 966 tb1 = (TranslationBlock *)(( long)tb1 & ~3);966 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3); 967 967 tb2 = tb1->jmp_next[n1]; 968 968 tb_reset_jump(tb1, n1); … … 970 970 tb1 = tb2; 971 971 } 972 tb->jmp_first = (TranslationBlock *)(( long)tb | 2); /* fail safe */972 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2); /* fail safe */ 973 973 974 974 tb_phys_invalidate_count++; … … 991 991 pc = cs_base + eip; 992 992 993 tb = tb_find(&ptb, (u nsigned long)pc, (unsigned long)cs_base,993 tb = tb_find(&ptb, (uintptr_t)pc, (uintptr_t)cs_base, 994 994 flags); 995 995 … … 1013 1013 * Gets the page offset. 1014 1014 */ 1015 unsigned longget_phys_page_offset(target_ulong addr)1015 ram_addr_t get_phys_page_offset(target_ulong addr) 1016 1016 { 1017 1017 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS); … … 1058 1058 tb = p->first_tb; 1059 1059 while (tb != NULL) { 1060 n = ( long)tb & 3;1061 tb = (TranslationBlock *)(( long)tb & ~3);1060 n = (intptr_t)tb & 3; 1061 tb = (TranslationBlock *)((intptr_t)tb & ~3); 1062 1062 /* NOTE: this is subtle as a TB may span two physical pages */ 1063 1063 if (n == 0) { … … 1103 1103 tb->cflags = cflags; 1104 1104 cpu_gen_code(env, tb, &code_gen_size); 1105 code_gen_ptr = (void *)(((u nsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));1105 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); 1106 1106 1107 1107 /* check next page if needed */ … … 1151 1151 tb = p->first_tb; 1152 1152 while (tb != NULL) { 1153 n = ( long)tb & 3;1154 tb = (TranslationBlock *)(( long)tb & ~3);1153 n = (intptr_t)tb & 3; 1154 tb = (TranslationBlock *)((intptr_t)tb & ~3); 1155 1155 tb_next = tb->page_next[n]; 1156 1156 /* NOTE: this is subtle as a TB may span two physical pages */ … … 1236 1236 cpu_single_env->mem_io_vaddr, len, 1237 1237 cpu_single_env->eip, 1238 cpu_single_env->eip + ( long)cpu_single_env->segs[R_CS].base);1238 cpu_single_env->eip + (intptr_t)cpu_single_env->segs[R_CS].base); 1239 1239 } 1240 1240 #endif … … 1255 1255 #if !defined(CONFIG_SOFTMMU) 1256 1256 static void tb_invalidate_phys_page(tb_page_addr_t addr, 1257 u nsigned longpc, void *puc)1257 uintptr_t pc, void *puc) 1258 1258 { 1259 1259 TranslationBlock *tb; … … 1280 1280 #endif 1281 1281 while (tb != NULL) { 1282 n = ( long)tb & 3;1283 tb = (TranslationBlock *)(( long)tb & ~3);1282 n = (intptr_t)tb & 3; 1283 tb = (TranslationBlock *)((intptr_t)tb & ~3); 1284 1284 #ifdef TARGET_HAS_PRECISE_SMC 1285 1285 if (current_tb == tb && … … 1325 1325 tb->page_next[n] = p->first_tb; 1326 1326 last_first_tb = p->first_tb; 1327 p->first_tb = (TranslationBlock *)(( long)tb | n);1327 p->first_tb = (TranslationBlock *)((intptr_t)tb | n); 1328 1328 invalidate_page_bitmap(p); 1329 1329 … … 1375 1375 1376 1376 if (nb_tbs >= code_gen_max_blocks || 1377 (code_gen_ptr - code_gen_buffer) >= VBOX_ONLY((u nsigned long))code_gen_buffer_max_size)1377 (code_gen_ptr - code_gen_buffer) >= VBOX_ONLY((uintptr_t))code_gen_buffer_max_size) 1378 1378 return NULL; 1379 1379 tb = &tbs[nb_tbs++]; … … 1418 1418 tb->page_addr[1] = -1; 1419 1419 1420 tb->jmp_first = (TranslationBlock *)(( long)tb | 2);1420 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2); 1421 1421 tb->jmp_next[0] = NULL; 1422 1422 tb->jmp_next[1] = NULL; … … 1436 1436 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < 1437 1437 tb[1].tc_ptr. Return NULL if not found */ 1438 TranslationBlock *tb_find_pc(u nsigned longtc_ptr)1438 TranslationBlock *tb_find_pc(uintptr_t tc_ptr) 1439 1439 { 1440 1440 int m_min, m_max, m; 1441 u nsigned longv;1441 uintptr_t v; 1442 1442 TranslationBlock *tb; 1443 1443 1444 1444 if (nb_tbs <= 0) 1445 1445 return NULL; 1446 if (tc_ptr < (u nsigned long)code_gen_buffer ||1447 tc_ptr >= (u nsigned long)code_gen_ptr)1446 if (tc_ptr < (uintptr_t)code_gen_buffer || 1447 tc_ptr >= (uintptr_t)code_gen_ptr) 1448 1448 return NULL; 1449 1449 /* binary search (cf Knuth) */ … … 1453 1453 m = (m_min + m_max) >> 1; 1454 1454 tb = &tbs[m]; 1455 v = (u nsigned long)tb->tc_ptr;1455 v = (uintptr_t)tb->tc_ptr; 1456 1456 if (v == tc_ptr) 1457 1457 return tb; … … 1476 1476 /* find head of list */ 1477 1477 for(;;) { 1478 n1 = ( long)tb1 & 3;1479 tb1 = (TranslationBlock *)(( long)tb1 & ~3);1478 n1 = (intptr_t)tb1 & 3; 1479 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3); 1480 1480 if (n1 == 2) 1481 1481 break; … … 1489 1489 for(;;) { 1490 1490 tb1 = *ptb; 1491 n1 = ( long)tb1 & 3;1492 tb1 = (TranslationBlock *)(( long)tb1 & ~3);1491 n1 = (intptr_t)tb1 & 3; 1492 tb1 = (TranslationBlock *)((intptr_t)tb1 & ~3); 1493 1493 if (n1 == n && tb1 == tb) 1494 1494 break; … … 2204 2204 2205 2205 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 2206 u nsigned long start, unsigned longlength)2207 { 2208 u nsigned longaddr;2206 uintptr_t start, uintptr_t length) 2207 { 2208 uintptr_t addr; 2209 2209 #ifdef VBOX 2210 2210 … … 2225 2225 { 2226 2226 CPUState *env; 2227 u nsigned longlength, start1;2227 uintptr_t length, start1; 2228 2228 int i; 2229 2229 … … 2241 2241 start1 = start; 2242 2242 #elif !defined(VBOX) 2243 start1 = (u nsigned long)qemu_get_ram_ptr(start);2243 start1 = (uintptr_t)qemu_get_ram_ptr(start); 2244 2244 /* Chek that we don't span multiple blocks - this breaks the 2245 2245 address comparisons below. */ 2246 if ((u nsigned long)qemu_get_ram_ptr(end - 1) - start12246 if ((uintptr_t)qemu_get_ram_ptr(end - 1) - start1 2247 2247 != (end - 1) - start) { 2248 2248 abort(); 2249 2249 } 2250 2250 #else 2251 start1 = (u nsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */2251 start1 = (uintptr_t)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */ 2252 2252 #endif 2253 2253 … … 2299 2299 { 2300 2300 ram_addr_t ram_addr; 2301 #ifndef VBOX 2301 2302 void *p; 2303 #endif 2302 2304 2303 2305 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { … … 2305 2307 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; 2306 2308 #elif !defined(VBOX) 2307 p = (void *)(u nsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)2309 p = (void *)(uintptr_t)((tlb_entry->addr_write & TARGET_PAGE_MASK) 2308 2310 + tlb_entry->addend); 2309 2311 ram_addr = qemu_ram_addr_from_host(p); … … 2383 2385 { 2384 2386 PhysPageDesc *p; 2385 unsigned longpd;2387 ram_addr_t pd; 2386 2388 unsigned int index; 2387 2389 target_ulong address; 2388 2390 target_ulong code_address; 2389 u nsigned longaddend;2391 uintptr_t addend; 2390 2392 CPUTLBEntry *te; 2391 2393 CPUWatchpoint *wp; … … 2407 2409 #if defined(DEBUG_TLB) 2408 2410 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d size=" TARGET_FMT_lx " pd=0x%08lx\n", 2409 vaddr, (int)paddr, prot, mmu_idx, size, pd);2411 vaddr, (int)paddr, prot, mmu_idx, size, (long)pd); 2410 2412 #endif 2411 2413 … … 2418 2420 addend = pd & TARGET_PAGE_MASK; 2419 2421 #elif !defined(VBOX) 2420 addend = (u nsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);2422 addend = (uintptr_t)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK); 2421 2423 #else 2422 2424 /** @todo this is racing the phys_page_find call above since it may register 2423 2425 * a new chunk of memory... */ 2424 addend = (u nsigned long)remR3TlbGCPhys2Ptr(env, pd & TARGET_PAGE_MASK, !!(prot & PAGE_WRITE));2426 addend = (uintptr_t)remR3TlbGCPhys2Ptr(env, pd & TARGET_PAGE_MASK, !!(prot & PAGE_WRITE)); 2425 2427 #endif 2426 2428 … … 2553 2555 walk_memory_regions_fn fn; 2554 2556 void *priv; 2555 u nsigned longstart;2557 uintptr_t start; 2556 2558 int prot; 2557 2559 }; … … 2614 2616 { 2615 2617 struct walk_memory_regions_data data; 2616 unsignedlong i;2618 target_ulong i; 2617 2619 2618 2620 data.fn = fn; … … 2758 2760 /* called from signal handler: invalidate the code and unprotect the 2759 2761 page. Return TRUE if the fault was successfully handled. */ 2760 int page_unprotect(target_ulong address, u nsigned longpc, void *puc)2762 int page_unprotect(target_ulong address, uintptr_t pc, void *puc) 2761 2763 { 2762 2764 unsigned int prot; … … 2805 2807 2806 2808 static inline void tlb_set_dirty(CPUState *env, 2807 u nsigned longaddr, target_ulong vaddr)2809 uintptr_t addr, target_ulong vaddr) 2808 2810 { 2809 2811 } … … 2983 2985 #define HUGETLBFS_MAGIC 0x958458f6 2984 2986 2985 static longgethugepagesize(const char *path)2987 static size_t gethugepagesize(const char *path) 2986 2988 { 2987 2989 struct statfs fs; … … 3000 3002 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); 3001 3003 3002 return fs.f_bsize;3004 return (size_t)fs.f_bsize; 3003 3005 } 3004 3006 … … 3013 3015 int flags; 3014 3016 #endif 3015 unsigned longhpagesize;3017 size_t hpagesize; 3016 3018 3017 3019 hpagesize = gethugepagesize(path); … … 3806 3808 uint32_t val; 3807 3809 target_phys_addr_t page; 3808 unsigned longpd;3810 ram_addr_t pd; 3809 3811 PhysPageDesc *p; 3810 3812 … … 3858 3860 } 3859 3861 } else { 3860 unsigned longaddr1;3862 ram_addr_t addr1; 3861 3863 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3862 3864 /* RAM case */ … … 3937 3939 uint8_t *ptr; 3938 3940 target_phys_addr_t page; 3939 unsigned longpd;3941 ram_addr_t pd; 3940 3942 PhysPageDesc *p; 3941 3943 … … 3957 3959 /* do nothing */ 3958 3960 } else { 3959 unsigned longaddr1;3961 ram_addr_t addr1; 3960 3962 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3961 3963 /* ROM/RAM case */ … … 4032 4034 uint8_t *ptr; 4033 4035 target_phys_addr_t page; 4034 unsigned longpd;4036 ram_addr_t pd; 4035 4037 PhysPageDesc *p; 4036 unsigned longaddr1;4038 ram_addr_t addr1; 4037 4039 4038 4040 while (len > 0) { … … 4121 4123 uint8_t *ptr; 4122 4124 uint32_t val; 4123 unsigned longpd;4125 ram_addr_t pd; 4124 4126 PhysPageDesc *p; 4125 4127 … … 4157 4159 uint8_t *ptr; 4158 4160 uint64_t val; 4159 unsigned longpd;4161 ram_addr_t pd; 4160 4162 PhysPageDesc *p; 4161 4163 … … 4205 4207 { 4206 4208 int io_index; 4209 #ifndef VBOX 4207 4210 uint8_t *ptr; 4211 #endif 4208 4212 uint64_t val; 4209 unsigned longpd;4213 ram_addr_t pd; 4210 4214 PhysPageDesc *p; 4211 4215 … … 4244 4248 int io_index; 4245 4249 uint8_t *ptr; 4246 unsigned longpd;4250 ram_addr_t pd; 4247 4251 PhysPageDesc *p; 4248 4252 … … 4261 4265 } else { 4262 4266 #ifndef VBOX 4263 unsigned longaddr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);4267 ram_addr_t addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 4264 4268 ptr = qemu_get_ram_ptr(addr1); 4265 4269 stl_p(ptr, val); … … 4286 4290 int io_index; 4287 4291 uint8_t *ptr; 4288 unsigned longpd;4292 ram_addr_t pd; 4289 4293 PhysPageDesc *p; 4290 4294 … … 4323 4327 int io_index; 4324 4328 uint8_t *ptr; 4325 unsigned longpd;4329 ram_addr_t pd; 4326 4330 PhysPageDesc *p; 4327 4331 … … 4339 4343 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 4340 4344 } else { 4341 unsigned longaddr1;4345 ram_addr_t addr1; 4342 4346 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 4343 4347 /* RAM case */ … … 4370 4374 int io_index; 4371 4375 uint8_t *ptr; 4372 unsigned longpd;4376 ram_addr_t pd; 4373 4377 PhysPageDesc *p; 4374 4378 … … 4386 4390 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); 4387 4391 } else { 4388 unsigned longaddr1;4392 ram_addr_t addr1; 4389 4393 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 4390 4394 /* RAM case */ … … 4453 4457 uint64_t flags; 4454 4458 4455 tb = tb_find_pc((u nsigned long)retaddr);4459 tb = tb_find_pc((uintptr_t)retaddr); 4456 4460 if (!tb) { 4457 4461 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", … … 4459 4463 } 4460 4464 n = env->icount_decr.u16.low + tb->icount; 4461 cpu_restore_state(tb, env, (u nsigned long)retaddr, NULL);4465 cpu_restore_state(tb, env, (uintptr_t)retaddr, NULL); 4462 4466 /* Calculate how many instructions had been executed before the fault 4463 4467 occurred. */ -
trunk/src/recompiler/gen-icount.h
r37689 r42601 30 30 *icount_arg = num_insns; 31 31 gen_set_label(icount_label); 32 tcg_gen_exit_tb(( long)tb + 2);32 tcg_gen_exit_tb((uintptr_t)(tb + 2)); 33 33 } 34 34 } -
trunk/src/recompiler/qemu-lock.h
r37675 r42601 101 101 static inline int testandset (int *p) 102 102 { 103 long int readval = 0;103 long int64_t readval = 0; 104 104 105 105 __asm__ __volatile__ ("lock; cmpxchgl %2, %0" -
trunk/src/recompiler/softmmu_header.h
r38320 r42601 87 87 RES_TYPE res; 88 88 target_ulong addr; 89 u nsigned longphysaddr;89 uintptr_t physaddr; 90 90 int mmu_idx; 91 91 … … 108 108 int res, page_index; 109 109 target_ulong addr; 110 u nsigned longphysaddr;110 uintptr_t physaddr; 111 111 int mmu_idx; 112 112 … … 133 133 int page_index; 134 134 target_ulong addr; 135 u nsigned longphysaddr;135 uintptr_t physaddr; 136 136 int mmu_idx; 137 137 -
trunk/src/recompiler/softmmu_template.h
r37829 r42601 82 82 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 83 83 physaddr = (physaddr & TARGET_PAGE_MASK) + addr; 84 env->mem_io_pc = (u nsigned long)retaddr;84 env->mem_io_pc = (uintptr_t)retaddr; 85 85 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT) 86 86 && !can_do_io(env)) { … … 120 120 target_ulong tlb_addr; 121 121 target_phys_addr_t ioaddr; 122 u nsigned longaddend;122 uintptr_t addend; 123 123 void *retaddr; 124 124 … … 154 154 #endif 155 155 addend = env->tlb_table[mmu_idx][index].addend; 156 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)( long)(addr+addend));156 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(uintptr_t)(addr+addend)); 157 157 } 158 158 } else { … … 177 177 int index, shift; 178 178 target_phys_addr_t ioaddr; 179 u nsigned longaddend;179 uintptr_t addend; 180 180 target_ulong tlb_addr, addr1, addr2; 181 181 … … 209 209 /* unaligned/aligned access in the same page */ 210 210 addend = env->tlb_table[mmu_idx][index].addend; 211 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)( long)(addr+addend));211 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(uintptr_t)(addr+addend)); 212 212 } 213 213 } else { … … 240 240 241 241 env->mem_io_vaddr = addr; 242 env->mem_io_pc = (u nsigned long)retaddr;242 env->mem_io_pc = (uintptr_t)retaddr; 243 243 #if SHIFT <= 2 244 244 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val); … … 259 259 { 260 260 target_phys_addr_t ioaddr; 261 u nsigned longaddend;261 uintptr_t addend; 262 262 target_ulong tlb_addr; 263 263 void *retaddr; … … 292 292 #endif 293 293 addend = env->tlb_table[mmu_idx][index].addend; 294 glue(glue(st, SUFFIX), _raw)((uint8_t *)( long)(addr+addend), val);294 glue(glue(st, SUFFIX), _raw)((uint8_t *)(uintptr_t)(addr+addend), val); 295 295 } 296 296 } else { … … 313 313 { 314 314 target_phys_addr_t ioaddr; 315 u nsigned longaddend;315 uintptr_t addend; 316 316 target_ulong tlb_addr; 317 317 int index, i; … … 344 344 /* aligned/unaligned access in the same page */ 345 345 addend = env->tlb_table[mmu_idx][index].addend; 346 glue(glue(st, SUFFIX), _raw)((uint8_t *)( long)(addr+addend), val);346 glue(glue(st, SUFFIX), _raw)((uint8_t *)(uintptr_t)(addr+addend), val); 347 347 } 348 348 } else { -
trunk/src/recompiler/target-i386/op_helper.c
r42488 r42601 5643 5643 TranslationBlock *tb; 5644 5644 int ret; 5645 u nsigned longpc;5645 uintptr_t pc; 5646 5646 CPUX86State *saved_env; 5647 5647 … … 5655 5655 if (retaddr) { 5656 5656 /* now we have a real cpu fault */ 5657 pc = (u nsigned long)retaddr;5657 pc = (uintptr_t)retaddr; 5658 5658 tb = tb_find_pc(pc); 5659 5659 if (tb) { … … 5845 5845 DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n) 5846 5846 { 5847 tb_set_jmp_target(tb, n, (u nsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));5847 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n])); 5848 5848 } 5849 5849 -
trunk/src/recompiler/target-i386/translate.c
r40360 r42601 2480 2480 tcg_gen_goto_tb(tb_num); 2481 2481 gen_jmp_im(eip); 2482 tcg_gen_exit_tb(( long)tb + tb_num);2482 tcg_gen_exit_tb((intptr_t)tb + tb_num); 2483 2483 } else { 2484 2484 /* jump to another page: currently not optimized */ … … 8364 8364 8365 8365 void gen_pc_load(CPUState *env, TranslationBlock *tb, 8366 unsigned longsearched_pc, int pc_pos, void *puc)8366 uintptr_t searched_pc, int pc_pos, void *puc) 8367 8367 { 8368 8368 int cc_op; -
trunk/src/recompiler/tcg/i386/tcg-target.c
r38064 r42601 44 44 TCG_REG_R10, 45 45 TCG_REG_R11, 46 # if !defined(VBOX) || !defined(__MINGW64__) 46 47 TCG_REG_R9, 47 48 TCG_REG_R8, 48 49 TCG_REG_RCX, 49 50 TCG_REG_RDX, 51 # endif 50 52 TCG_REG_RSI, 51 53 TCG_REG_RDI, 54 # if defined(VBOX) && defined(__MINGW64__) 55 TCG_REG_R9, 56 TCG_REG_R8, 57 TCG_REG_RDX, 58 TCG_REG_RCX, 59 # endif 52 60 TCG_REG_RAX, 53 61 #else … … 64 72 static const int tcg_target_call_iarg_regs[] = { 65 73 #if TCG_TARGET_REG_BITS == 64 74 # if defined(VBOX) && defined(__MINGW64__) 75 TCG_REG_RCX, 76 TCG_REG_RDX, 77 # else 66 78 TCG_REG_RDI, 67 79 TCG_REG_RSI, 68 80 TCG_REG_RDX, 69 81 TCG_REG_RCX, 82 # endif 70 83 TCG_REG_R8, 71 84 TCG_REG_R9, … … 197 210 if (TCG_TARGET_REG_BITS == 64) { 198 211 tcg_regset_set32(ct->u.regs, 0, 0xffff); 212 #if defined(VBOX) && defined(__MINGW64__) 213 tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[2]); 214 tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[1]); 215 tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[0]); 216 #else 217 /** @todo figure why RDX isn't mentioned here. */ 199 218 tcg_regset_reset_reg(ct->u.regs, TCG_REG_RSI); 200 219 tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDI); 220 #endif 201 221 } else { 202 222 tcg_regset_set32(ct->u.regs, 0, 0xff); … … 759 779 } 760 780 761 #ifdef VBOX 781 #ifdef VBOX_16_BYTE_STACK_ALIGN 762 782 static void tcg_out_subi(TCGContext *s, int reg, tcg_target_long val) 763 783 { … … 1390 1410 *label_ptr[2] = s->code_ptr - label_ptr[2] - 1; 1391 1411 #else 1412 # if defined(VBOX) && defined(__MINGW64__) 1413 # error port me 1414 # endif 1392 1415 { 1393 1416 int32_t offset = GUEST_BASE; … … 1526 1549 /* XXX: move that code at the end of the TB */ 1527 1550 if (TCG_TARGET_REG_BITS == 64) { 1551 # if defined(VBOX) && defined(__MINGW64__) 1552 tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32), 1553 tcg_target_call_iarg_regs[1], data_reg); 1554 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], mem_index); 1555 # else 1528 1556 tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32), 1529 1557 TCG_REG_RSI, data_reg); 1530 1558 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index); 1559 # endif 1531 1560 stack_adjust = 0; 1532 1561 } else if (TARGET_LONG_BITS == 32) { … … 1597 1626 *label_ptr[2] = s->code_ptr - label_ptr[2] - 1; 1598 1627 #else 1628 # if defined(VBOX) && defined(__MINGW64__) 1629 # error port me 1630 # endif 1599 1631 { 1600 1632 int32_t offset = GUEST_BASE; … … 2092 2124 TCG_REG_RBP, 2093 2125 TCG_REG_RBX, 2126 # if defined(VBOX) && defined(__MINGW64__) 2127 TCG_REG_RSI, 2128 TCG_REG_RDI, 2129 # endif 2094 2130 TCG_REG_R12, 2095 2131 TCG_REG_R13, … … 2132 2168 2133 2169 frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE; 2170 #if defined(VBOX) && defined(__MINGW64__) 2171 frame_size += TCG_TARGET_CALL_STACK_OFFSET; 2172 #endif 2134 2173 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) & 2135 2174 ~(TCG_TARGET_STACK_ALIGN - 1); … … 2176 2215 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX); 2177 2216 if (TCG_TARGET_REG_BITS == 64) { 2217 # if !defined(VBOX) || !defined(__MINGW64__) 2178 2218 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI); 2179 2219 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI); 2220 # endif 2180 2221 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8); 2181 2222 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9); -
trunk/src/recompiler/tcg/i386/tcg-target.h
r37689 r42601 73 73 #define TCG_REG_CALL_STACK TCG_REG_ESP 74 74 #define TCG_TARGET_STACK_ALIGN 16 75 #if defined(VBOX) && defined(__MINGW64__) 76 # define TCG_TARGET_CALL_STACK_OFFSET 32 /* 4 qword argument/register spill zone */ 77 #else 75 78 #define TCG_TARGET_CALL_STACK_OFFSET 0 79 #endif 76 80 77 81 /* optional instructions */ … … 126 130 #endif 127 131 128 static inline void flush_icache_range(u nsigned long start, unsigned longstop)132 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) 129 133 { 130 134 } -
trunk/src/recompiler/tcg/tcg-dyngen.c
r36140 r42601 52 52 #if 0 53 53 #if defined(__s390__) 54 static inline void flush_icache_range(u nsigned long start, unsigned longstop)54 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) 55 55 { 56 56 } 57 57 #elif defined(__ia64__) 58 static inline void flush_icache_range(u nsigned long start, unsigned longstop)58 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) 59 59 { 60 60 while (start < stop) { … … 68 68 #define MIN_CACHE_LINE_SIZE 8 /* conservative value */ 69 69 70 static inline void flush_icache_range(u nsigned long start, unsigned longstop)71 { 72 u nsigned longp;70 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) 71 { 72 uintptr_t p; 73 73 74 74 start &= ~(MIN_CACHE_LINE_SIZE - 1); … … 86 86 } 87 87 #elif defined(__alpha__) 88 static inline void flush_icache_range(u nsigned long start, unsigned longstop)88 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) 89 89 { 90 90 asm ("imb"); 91 91 } 92 92 #elif defined(__sparc__) 93 static inline void flush_icache_range(u nsigned long start, unsigned longstop)94 { 95 u nsigned longp;93 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) 94 { 95 uintptr_t p; 96 96 97 97 p = start & ~(8UL - 1UL); … … 102 102 } 103 103 #elif defined(__arm__) 104 static inline void flush_icache_range(u nsigned long start, unsigned longstop)105 { 106 register u nsigned long_beg __asm ("a1") = start;107 register u nsigned long_end __asm ("a2") = stop;108 register u nsigned long_flg __asm ("a3") = 0;104 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) 105 { 106 register uintptr_t _beg __asm ("a1") = start; 107 register uintptr_t _end __asm ("a2") = stop; 108 register uintptr_t _flg __asm ("a3") = 0; 109 109 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); 110 110 } … … 112 112 113 113 # include <asm/cachectl.h> 114 static inline void flush_icache_range(u nsigned long start, unsigned longstop)114 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) 115 115 { 116 116 cacheflush(start,FLUSH_SCOPE_LINE,FLUSH_CACHE_BOTH,stop-start+16); … … 119 119 120 120 #include <sys/cachectl.h> 121 static inline void flush_icache_range(u nsigned long start, unsigned longstop)121 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) 122 122 { 123 123 _flush_cache ((void *)start, stop - start, BCACHE); … … 157 157 uint64_t m0, m1, v0, v1, b0, b1, *b = (uint64_t *) (insn_addr & -16); 158 158 # define insn_mask ((1UL << 41) - 1) 159 u nsigned longshift;159 uintptr_t shift; 160 160 161 161 b0 = b[0]; b1 = b[1]; … … 302 302 struct ia64_fixup *plt_fixes, 303 303 int num_plts, 304 u nsigned long*plt_target,304 uintptr_t *plt_target, 305 305 unsigned int *plt_offset) 306 306 { … … 395 395 396 396 for (; stub != NULL; stub = stub->next) { 397 u nsigned long l = (unsigned long)p;397 uintptr_t l = (uintptr_t)p; 398 398 /* stub: 399 399 * ldil L'target, %r1 -
trunk/src/recompiler/tcg/tcg.c
r37693 r42601 118 118 119 119 static void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type, 120 int label_index, long addend)120 int label_index, tcg_target_long addend) 121 121 { 122 122 TCGLabel *l; … … 265 265 s->code_ptr = s->code_buf; 266 266 tcg_target_qemu_prologue(s); 267 flush_icache_range((u nsigned long)s->code_buf,268 (u nsigned long)s->code_ptr);267 flush_icache_range((uintptr_t)s->code_buf, 268 (uintptr_t)s->code_ptr); 269 269 } 270 270 … … 2004 2004 2005 2005 static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf, 2006 longsearch_pc)2006 intptr_t search_pc) 2007 2007 { 2008 2008 TCGOpcode opc; … … 2096 2096 case INDEX_op_set_label: 2097 2097 tcg_reg_alloc_bb_end(s, s->reserved_regs); 2098 tcg_out_label(s, args[0], ( long)s->code_ptr);2098 tcg_out_label(s, args[0], (intptr_t)s->code_ptr); 2099 2099 break; 2100 2100 case INDEX_op_call: … … 2145 2145 2146 2146 /* flush instruction cache */ 2147 flush_icache_range((u nsigned long)gen_code_buf,2148 (u nsigned long)s->code_ptr);2147 flush_icache_range((uintptr_t)gen_code_buf, 2148 (uintptr_t)s->code_ptr); 2149 2149 return s->code_ptr - gen_code_buf; 2150 2150 } … … 2154 2154 not be changed, though writing the same values is ok. 2155 2155 Return -1 if not found. */ 2156 int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, longoffset)2156 int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, intptr_t offset) 2157 2157 { 2158 2158 return tcg_gen_code_common(s, gen_code_buf, offset); -
trunk/src/recompiler/tcg/tcg.h
r37689 r42601 282 282 /* goto_tb support */ 283 283 uint8_t *code_buf; 284 u nsigned long*tb_next;284 uintptr_t *tb_next; 285 285 uint16_t *tb_next_offset; 286 286 uint16_t *tb_jmp_offset; /* != NULL if USE_DIRECT_JUMP */ … … 342 342 TCGContext *s = &tcg_ctx; 343 343 uint8_t *ptr, *ptr_end; 344 size = (size + sizeof( long) - 1) & ~(sizeof(long) - 1);344 size = (size + sizeof(void *) - 1) & ~(sizeof(void *) - 1); 345 345 ptr = s->pool_cur; 346 346 ptr_end = ptr + size; … … 358 358 359 359 int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf); 360 int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, longoffset);360 int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, intptr_t offset); 361 361 362 362 void tcg_set_frame(TCGContext *s, int reg, … … 502 502 #if defined(_ARCH_PPC) && !defined(_ARCH_PPC64) 503 503 #define tcg_qemu_tb_exec(tb_ptr) \ 504 (( longREGPARM __attribute__ ((longcall)) (*)(void *))code_gen_prologue)(tb_ptr)505 #else 506 # if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM) 504 ((intptr_t REGPARM __attribute__ ((longcall)) (*)(void *))code_gen_prologue)(tb_ptr) 505 #else 506 # if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM) && !defined(__MINGW64__) 507 507 # define tcg_qemu_tb_exec(tb_ptr, ret) \ 508 508 __asm__ __volatile__("call *%%ecx" : "=a"(ret) : "a"(tb_ptr), "c" (&code_gen_prologue[0]) : "memory", "%edx", "cc") 509 # else /* !VBOX || !GCC_WITH_BUGGY_REG_PARAM */510 #define tcg_qemu_tb_exec(tb_ptr) (( longREGPARM (*)(void *))code_gen_prologue)(tb_ptr)511 # endif /* !VBOX || !GCC_WITH_BUGGY_REG_PARAM */512 #endif 509 # else 510 #define tcg_qemu_tb_exec(tb_ptr) ((intptr_t REGPARM (*)(void *))code_gen_prologue)(tb_ptr) 511 # endif 512 #endif -
trunk/src/recompiler/translate-all.c
r37689 r42601 56 56 tcg_context_init(&tcg_ctx); 57 57 tcg_set_frame(&tcg_ctx, TCG_AREG0, offsetof(CPUState, temp_buf), 58 CPU_TEMP_BUF_NLONGS * sizeof(long));58 sizeof(((CPUState *)0)->temp_buf)); 59 59 } 60 60 … … 132 132 */ 133 133 int cpu_restore_state(TranslationBlock *tb, 134 CPUState *env, u nsigned longsearched_pc,134 CPUState *env, uintptr_t searched_pc, 135 135 void *puc) 136 136 { 137 137 TCGContext *s = &tcg_ctx; 138 138 int j; 139 u nsigned longtc_ptr;139 uintptr_t tc_ptr; 140 140 #ifdef CONFIG_PROFILER 141 141 int64_t ti; … … 157 157 158 158 /* find opc index corresponding to search_pc */ 159 tc_ptr = (u nsigned long)tb->tc_ptr;159 tc_ptr = (uintptr_t)tb->tc_ptr; 160 160 if (searched_pc < tc_ptr) 161 161 return -1;
Note:
See TracChangeset
for help on using the changeset viewer.